Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(22)

Side by Side Diff: Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py

Issue 1154343007: Add extra content_shell per worker for virtual tests, except on Android. (Closed) Base URL: https://chromium.googlesource.com/chromium/blink.git@master
Patch Set: merge Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 # Copyright (C) 2011 Google Inc. All rights reserved. 1 # Copyright (C) 2011 Google Inc. All rights reserved.
2 # 2 #
3 # Redistribution and use in source and binary forms, with or without 3 # Redistribution and use in source and binary forms, with or without
4 # modification, are permitted provided that the following conditions are 4 # modification, are permitted provided that the following conditions are
5 # met: 5 # met:
6 # 6 #
7 # * Redistributions of source code must retain the above copyright 7 # * Redistributions of source code must retain the above copyright
8 # notice, this list of conditions and the following disclaimer. 8 # notice, this list of conditions and the following disclaimer.
9 # * Redistributions in binary form must reproduce the above 9 # * Redistributions in binary form must reproduce the above
10 # copyright notice, this list of conditions and the following disclaimer 10 # copyright notice, this list of conditions and the following disclaimer
(...skipping 10 matching lines...) Expand all
21 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 28
29 import logging 29 import logging
30 import math 30 import math
31 import threading
32 import time 31 import time
33 32
34 from webkitpy.common import message_pool 33 from webkitpy.common import message_pool
35 from webkitpy.layout_tests.controllers import single_test_runner 34 from webkitpy.layout_tests.controllers import single_test_runner
36 from webkitpy.layout_tests.models.test_run_results import TestRunResults 35 from webkitpy.layout_tests.models.test_run_results import TestRunResults
37 from webkitpy.layout_tests.models import test_expectations 36 from webkitpy.layout_tests.models import test_expectations
38 from webkitpy.layout_tests.models import test_failures 37 from webkitpy.layout_tests.models import test_failures
39 from webkitpy.layout_tests.models import test_results 38 from webkitpy.layout_tests.models import test_results
40 from webkitpy.tool import grammar 39 from webkitpy.tool import grammar
41 40
(...skipping 159 matching lines...) Expand 10 before | Expand all | Expand 10 after
201 pass 200 pass
202 201
203 def _handle_finished_test(self, worker_name, result, log_messages=[]): 202 def _handle_finished_test(self, worker_name, result, log_messages=[]):
204 self._update_summary_with_result(self._current_run_results, result) 203 self._update_summary_with_result(self._current_run_results, result)
205 204
206 def _handle_device_failed(self, worker_name, list_name, remaining_tests): 205 def _handle_device_failed(self, worker_name, list_name, remaining_tests):
207 _log.warning("%s has failed" % worker_name) 206 _log.warning("%s has failed" % worker_name)
208 if remaining_tests: 207 if remaining_tests:
209 self._shards_to_redo.append(TestShard(list_name, remaining_tests)) 208 self._shards_to_redo.append(TestShard(list_name, remaining_tests))
210 209
210
211 class Worker(object): 211 class Worker(object):
212 def __init__(self, caller, results_directory, options): 212 def __init__(self, caller, results_directory, options):
213 self._caller = caller 213 self._caller = caller
214 self._worker_number = caller.worker_number 214 self._worker_number = caller.worker_number
215 self._name = caller.name 215 self._name = caller.name
216 self._results_directory = results_directory 216 self._results_directory = results_directory
217 self._options = options 217 self._options = options
218 218
219 # The remaining fields are initialized in start() 219 # The remaining fields are initialized in start()
220 self._host = None 220 self._host = None
221 self._port = None 221 self._port = None
222 self._batch_size = None 222 self._batch_size = None
223 self._batch_count = None 223 self._batch_count = None
224 self._filesystem = None 224 self._filesystem = None
225 self._driver = None 225 self._primary_driver = None
226 self._secondary_driver = None
226 self._num_tests = 0 227 self._num_tests = 0
227 228
228 def __del__(self): 229 def __del__(self):
229 self.stop() 230 self.stop()
230 231
231 def start(self): 232 def start(self):
232 """This method is called when the object is starting to be used and it i s safe 233 """This method is called when the object is starting to be used and it i s safe
233 for the object to create state that does not need to be pickled (usually this means 234 for the object to create state that does not need to be pickled (usually this means
234 it is called in a child process).""" 235 it is called in a child process)."""
235 self._host = self._caller.host 236 self._host = self._caller.host
236 self._filesystem = self._host.filesystem 237 self._filesystem = self._host.filesystem
237 self._port = self._host.port_factory.get(self._options.platform, self._o ptions) 238 self._port = self._host.port_factory.get(self._options.platform, self._o ptions)
239 self._primary_driver = self._port.create_driver(self._worker_number)
240
241 if self._port.max_drivers_per_process() > 1:
242 self._secondary_driver = self._port.create_driver(self._worker_numbe r)
238 243
239 self._batch_count = 0 244 self._batch_count = 0
240 self._batch_size = self._options.batch_size or 0 245 self._batch_size = self._options.batch_size or 0
241 246
242 def handle(self, name, source, test_list_name, test_inputs): 247 def handle(self, name, source, test_list_name, test_inputs):
243 assert name == 'test_list' 248 assert name == 'test_list'
244 for i, test_input in enumerate(test_inputs): 249 for i, test_input in enumerate(test_inputs):
245 device_failed = self._run_test(test_input, test_list_name) 250 device_failed = self._run_test(test_input, test_list_name)
246 if device_failed: 251 if device_failed:
247 self._caller.post('device_failed', test_list_name, test_inputs[i :]) 252 self._caller.post('device_failed', test_list_name, test_inputs[i :])
248 self._caller.stop_running() 253 self._caller.stop_running()
249 return 254 return
250 255
256 # Kill the secondary driver at the end of each test shard.
257 self._kill_driver(self._secondary_driver, 'secondary')
258
251 self._caller.post('finished_test_list', test_list_name) 259 self._caller.post('finished_test_list', test_list_name)
252 260
253 def _update_test_input(self, test_input): 261 def _update_test_input(self, test_input):
254 if test_input.reference_files is None: 262 if test_input.reference_files is None:
255 # Lazy initialization. 263 # Lazy initialization.
256 test_input.reference_files = self._port.reference_files(test_input.t est_name) 264 test_input.reference_files = self._port.reference_files(test_input.t est_name)
257 if test_input.reference_files: 265 if test_input.reference_files:
258 test_input.should_run_pixel_test = True 266 test_input.should_run_pixel_test = True
259 else: 267 else:
260 test_input.should_run_pixel_test = self._port.should_run_as_pixel_te st(test_input) 268 test_input.should_run_pixel_test = self._port.should_run_as_pixel_te st(test_input)
261 269
262 def _run_test(self, test_input, shard_name): 270 def _run_test(self, test_input, shard_name):
263 self._batch_count += 1 271 self._batch_count += 1
264 272
265 stop_when_done = False 273 stop_when_done = False
266 if self._batch_size > 0 and self._batch_count >= self._batch_size: 274 if self._batch_size > 0 and self._batch_count >= self._batch_size:
267 self._batch_count = 0 275 self._batch_count = 0
268 stop_when_done = True 276 stop_when_done = True
269 277
270 self._update_test_input(test_input) 278 self._update_test_input(test_input)
271 test_timeout_sec = self._timeout(test_input) 279 test_timeout_sec = self._timeout(test_input)
272 start = time.time() 280 start = time.time()
273 device_failed = False 281 device_failed = False
274 282
275 if self._driver and self._driver.has_crashed():
276 self._kill_driver()
277 if not self._driver:
278 self._driver = self._port.create_driver(self._worker_number)
279
280 if not self._driver:
281 # FIXME: Is this the best way to handle a device crashing in the mid dle of the test, or should we create
282 # a new failure type?
283 device_failed = True
284 return device_failed
285
286 _log.debug("%s %s started" % (self._name, test_input.test_name)) 283 _log.debug("%s %s started" % (self._name, test_input.test_name))
287 self._caller.post('started_test', test_input, test_timeout_sec) 284 self._caller.post('started_test', test_input, test_timeout_sec)
288 result = single_test_runner.run_single_test(self._port, self._options, s elf._results_directory, 285 result = single_test_runner.run_single_test(
289 self._name, self._driver, test_input, stop_when_done) 286 self._port, self._options, self._results_directory, self._name,
287 self._primary_driver, self._secondary_driver, test_input,
288 stop_when_done)
290 289
291 result.shard_name = shard_name 290 result.shard_name = shard_name
292 result.worker_name = self._name 291 result.worker_name = self._name
293 result.total_run_time = time.time() - start 292 result.total_run_time = time.time() - start
294 result.test_number = self._num_tests 293 result.test_number = self._num_tests
295 self._num_tests += 1 294 self._num_tests += 1
296 self._caller.post('finished_test', result) 295 self._caller.post('finished_test', result)
297 self._clean_up_after_test(test_input, result) 296 self._clean_up_after_test(test_input, result)
298 return result.device_failed 297 return result.device_failed
299 298
300 def stop(self): 299 def stop(self):
301 _log.debug("%s cleaning up" % self._name) 300 _log.debug("%s cleaning up" % self._name)
302 self._kill_driver() 301 self._kill_driver(self._primary_driver, "primary")
302 self._kill_driver(self._secondary_driver, "secondary")
303 303
304 def _timeout(self, test_input): 304 def _timeout(self, test_input):
305 """Compute the appropriate timeout value for a test.""" 305 """Compute the appropriate timeout value for a test."""
306 # The driver watchdog uses 2.5x the timeout; we want to be 306 # The driver watchdog uses 2.5x the timeout; we want to be
307 # larger than that. We also add a little more padding if we're 307 # larger than that. We also add a little more padding if we're
308 # running tests in a separate thread. 308 # running tests in a separate thread.
309 # 309 #
310 # Note that we need to convert the test timeout from a 310 # Note that we need to convert the test timeout from a
311 # string value in milliseconds to a float for Python. 311 # string value in milliseconds to a float for Python.
312 312
313 # FIXME: Can we just return the test_input.timeout now? 313 # FIXME: Can we just return the test_input.timeout now?
314 driver_timeout_sec = 3.0 * float(test_input.timeout) / 1000.0 314 driver_timeout_sec = 3.0 * float(test_input.timeout) / 1000.0
315 315
316 def _kill_driver(self): 316 def _kill_driver(self, driver, label):
317 # Be careful about how and when we kill the driver; if driver.stop() 317 # Be careful about how and when we kill the driver; if driver.stop()
318 # raises an exception, this routine may get re-entered via __del__. 318 # raises an exception, this routine may get re-entered via __del__.
319 driver = self._driver
320 self._driver = None
321 if driver: 319 if driver:
322 _log.debug("%s killing driver" % self._name) 320 _log.debug("%s killing %s driver" % (self._name, label))
323 driver.stop() 321 driver.stop()
324 322
325
326 def _clean_up_after_test(self, test_input, result): 323 def _clean_up_after_test(self, test_input, result):
327 test_name = test_input.test_name 324 test_name = test_input.test_name
328 325
329 if result.failures: 326 if result.failures:
330 # Check and kill the driver if we need to. 327 # Check and kill the driver if we need to.
331 if any([f.driver_needs_restart() for f in result.failures]): 328 if any([f.driver_needs_restart() for f in result.failures]):
332 self._kill_driver() 329 # FIXME: Need more information in failure reporting so
330 # we know which driver needs to be restarted. For now
331 # we kill both drivers.
332 self._kill_driver(self._primary_driver, "primary")
333 self._kill_driver(self._secondary_driver, "secondary")
334
333 # Reset the batch count since the shell just bounced. 335 # Reset the batch count since the shell just bounced.
334 self._batch_count = 0 336 self._batch_count = 0
335 337
336 # Print the error message(s). 338 # Print the error message(s).
337 _log.debug("%s %s failed:" % (self._name, test_name)) 339 _log.debug("%s %s failed:" % (self._name, test_name))
338 for f in result.failures: 340 for f in result.failures:
339 _log.debug("%s %s" % (self._name, f.message())) 341 _log.debug("%s %s" % (self._name, f.message()))
340 elif result.type == test_expectations.SKIP: 342 elif result.type == test_expectations.SKIP:
341 _log.debug("%s %s skipped" % (self._name, test_name)) 343 _log.debug("%s %s skipped" % (self._name, test_name))
342 else: 344 else:
(...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after
498 def split_at(seq, index): 500 def split_at(seq, index):
499 return (seq[:index], seq[index:]) 501 return (seq[:index], seq[index:])
500 502
501 num_old_per_new = divide_and_round_up(len(old_shards), max_new_shards) 503 num_old_per_new = divide_and_round_up(len(old_shards), max_new_shards)
502 new_shards = [] 504 new_shards = []
503 remaining_shards = old_shards 505 remaining_shards = old_shards
504 while remaining_shards: 506 while remaining_shards:
505 some_shards, remaining_shards = split_at(remaining_shards, num_old_p er_new) 507 some_shards, remaining_shards = split_at(remaining_shards, num_old_p er_new)
506 new_shards.append(TestShard('%s_%d' % (shard_name_prefix, len(new_sh ards) + 1), extract_and_flatten(some_shards))) 508 new_shards.append(TestShard('%s_%d' % (shard_name_prefix, len(new_sh ards) + 1), extract_and_flatten(some_shards)))
507 return new_shards 509 return new_shards
OLDNEW
« no previous file with comments | « no previous file | Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698