Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(2)

Side by Side Diff: Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py

Issue 1158323009: Revert of Add an additional content_shell per worker for running virtual tests. (Closed) Base URL: https://chromium.googlesource.com/chromium/blink.git@virtual_reference_flags
Patch Set: Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 # Copyright (C) 2011 Google Inc. All rights reserved. 1 # Copyright (C) 2011 Google Inc. All rights reserved.
2 # 2 #
3 # Redistribution and use in source and binary forms, with or without 3 # Redistribution and use in source and binary forms, with or without
4 # modification, are permitted provided that the following conditions are 4 # modification, are permitted provided that the following conditions are
5 # met: 5 # met:
6 # 6 #
7 # * Redistributions of source code must retain the above copyright 7 # * Redistributions of source code must retain the above copyright
8 # notice, this list of conditions and the following disclaimer. 8 # notice, this list of conditions and the following disclaimer.
9 # * Redistributions in binary form must reproduce the above 9 # * Redistributions in binary form must reproduce the above
10 # copyright notice, this list of conditions and the following disclaimer 10 # copyright notice, this list of conditions and the following disclaimer
(...skipping 10 matching lines...) Expand all
21 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 28
29 import logging 29 import logging
30 import math 30 import math
31 import threading
31 import time 32 import time
32 33
33 from webkitpy.common import message_pool 34 from webkitpy.common import message_pool
34 from webkitpy.layout_tests.controllers import single_test_runner 35 from webkitpy.layout_tests.controllers import single_test_runner
35 from webkitpy.layout_tests.models.test_run_results import TestRunResults 36 from webkitpy.layout_tests.models.test_run_results import TestRunResults
36 from webkitpy.layout_tests.models import test_expectations 37 from webkitpy.layout_tests.models import test_expectations
37 from webkitpy.layout_tests.models import test_failures 38 from webkitpy.layout_tests.models import test_failures
38 from webkitpy.layout_tests.models import test_results 39 from webkitpy.layout_tests.models import test_results
39 from webkitpy.tool import grammar 40 from webkitpy.tool import grammar
40 41
(...skipping 159 matching lines...) Expand 10 before | Expand all | Expand 10 after
200 pass 201 pass
201 202
202 def _handle_finished_test(self, worker_name, result, log_messages=[]): 203 def _handle_finished_test(self, worker_name, result, log_messages=[]):
203 self._update_summary_with_result(self._current_run_results, result) 204 self._update_summary_with_result(self._current_run_results, result)
204 205
205 def _handle_device_failed(self, worker_name, list_name, remaining_tests): 206 def _handle_device_failed(self, worker_name, list_name, remaining_tests):
206 _log.warning("%s has failed" % worker_name) 207 _log.warning("%s has failed" % worker_name)
207 if remaining_tests: 208 if remaining_tests:
208 self._shards_to_redo.append(TestShard(list_name, remaining_tests)) 209 self._shards_to_redo.append(TestShard(list_name, remaining_tests))
209 210
210
211 class Worker(object): 211 class Worker(object):
212 def __init__(self, caller, results_directory, options): 212 def __init__(self, caller, results_directory, options):
213 self._caller = caller 213 self._caller = caller
214 self._worker_number = caller.worker_number 214 self._worker_number = caller.worker_number
215 self._name = caller.name 215 self._name = caller.name
216 self._results_directory = results_directory 216 self._results_directory = results_directory
217 self._options = options 217 self._options = options
218 218
219 # The remaining fields are initialized in start() 219 # The remaining fields are initialized in start()
220 self._host = None 220 self._host = None
221 self._port = None 221 self._port = None
222 self._batch_size = None 222 self._batch_size = None
223 self._batch_count = None 223 self._batch_count = None
224 self._filesystem = None 224 self._filesystem = None
225 self._primary_driver = None 225 self._driver = None
226 self._secondary_driver = None
227 self._num_tests = 0 226 self._num_tests = 0
228 227
229 def __del__(self): 228 def __del__(self):
230 self.stop() 229 self.stop()
231 230
232 def start(self): 231 def start(self):
233 """This method is called when the object is starting to be used and it i s safe 232 """This method is called when the object is starting to be used and it i s safe
234 for the object to create state that does not need to be pickled (usually this means 233 for the object to create state that does not need to be pickled (usually this means
235 it is called in a child process).""" 234 it is called in a child process)."""
236 self._host = self._caller.host 235 self._host = self._caller.host
237 self._filesystem = self._host.filesystem 236 self._filesystem = self._host.filesystem
238 self._port = self._host.port_factory.get(self._options.platform, self._o ptions) 237 self._port = self._host.port_factory.get(self._options.platform, self._o ptions)
239 self._primary_driver = self._port.create_driver(self._worker_number)
240 self._secondary_driver = self._port.create_driver(self._worker_number)
241 238
242 self._batch_count = 0 239 self._batch_count = 0
243 self._batch_size = self._options.batch_size or 0 240 self._batch_size = self._options.batch_size or 0
244 241
245 def handle(self, name, source, test_list_name, test_inputs): 242 def handle(self, name, source, test_list_name, test_inputs):
246 assert name == 'test_list' 243 assert name == 'test_list'
247 for i, test_input in enumerate(test_inputs): 244 for i, test_input in enumerate(test_inputs):
248 device_failed = self._run_test(test_input, test_list_name) 245 device_failed = self._run_test(test_input, test_list_name)
249 if device_failed: 246 if device_failed:
250 self._caller.post('device_failed', test_list_name, test_inputs[i :]) 247 self._caller.post('device_failed', test_list_name, test_inputs[i :])
251 self._caller.stop_running() 248 self._caller.stop_running()
252 return 249 return
253 250
254 # Kill the secondary driver at the end of each test shard.
255 self._kill_driver(self._secondary_driver, 'secondary')
256
257 self._caller.post('finished_test_list', test_list_name) 251 self._caller.post('finished_test_list', test_list_name)
258 252
259 def _update_test_input(self, test_input): 253 def _update_test_input(self, test_input):
260 if test_input.reference_files is None: 254 if test_input.reference_files is None:
261 # Lazy initialization. 255 # Lazy initialization.
262 test_input.reference_files = self._port.reference_files(test_input.t est_name) 256 test_input.reference_files = self._port.reference_files(test_input.t est_name)
263 if test_input.reference_files: 257 if test_input.reference_files:
264 test_input.should_run_pixel_test = True 258 test_input.should_run_pixel_test = True
265 else: 259 else:
266 test_input.should_run_pixel_test = self._port.should_run_as_pixel_te st(test_input) 260 test_input.should_run_pixel_test = self._port.should_run_as_pixel_te st(test_input)
267 261
268 def _run_test(self, test_input, shard_name): 262 def _run_test(self, test_input, shard_name):
269 self._batch_count += 1 263 self._batch_count += 1
270 264
271 stop_when_done = False 265 stop_when_done = False
272 if self._batch_size > 0 and self._batch_count >= self._batch_size: 266 if self._batch_size > 0 and self._batch_count >= self._batch_size:
273 self._batch_count = 0 267 self._batch_count = 0
274 stop_when_done = True 268 stop_when_done = True
275 269
276 self._update_test_input(test_input) 270 self._update_test_input(test_input)
277 test_timeout_sec = self._timeout(test_input) 271 test_timeout_sec = self._timeout(test_input)
278 start = time.time() 272 start = time.time()
279 device_failed = False 273 device_failed = False
280 274
275 if self._driver and self._driver.has_crashed():
276 self._kill_driver()
277 if not self._driver:
278 self._driver = self._port.create_driver(self._worker_number)
279
280 if not self._driver:
281 # FIXME: Is this the best way to handle a device crashing in the mid dle of the test, or should we create
282 # a new failure type?
283 device_failed = True
284 return device_failed
285
281 self._caller.post('started_test', test_input, test_timeout_sec) 286 self._caller.post('started_test', test_input, test_timeout_sec)
282 result = single_test_runner.run_single_test( 287 result = single_test_runner.run_single_test(self._port, self._options, s elf._results_directory,
283 self._port, self._options, self._results_directory, self._name, 288 self._name, self._driver, test_input, stop_when_done)
284 self._primary_driver, self._secondary_driver, test_input,
285 stop_when_done)
286 289
287 result.shard_name = shard_name 290 result.shard_name = shard_name
288 result.worker_name = self._name 291 result.worker_name = self._name
289 result.total_run_time = time.time() - start 292 result.total_run_time = time.time() - start
290 result.test_number = self._num_tests 293 result.test_number = self._num_tests
291 self._num_tests += 1 294 self._num_tests += 1
292 self._caller.post('finished_test', result) 295 self._caller.post('finished_test', result)
293 self._clean_up_after_test(test_input, result) 296 self._clean_up_after_test(test_input, result)
294 return result.device_failed 297 return result.device_failed
295 298
296 def stop(self): 299 def stop(self):
297 _log.debug("%s cleaning up" % self._name) 300 _log.debug("%s cleaning up" % self._name)
298 self._kill_driver(self._primary_driver, "primary") 301 self._kill_driver()
299 self._kill_driver(self._secondary_driver, "secondary")
300 302
301 def _timeout(self, test_input): 303 def _timeout(self, test_input):
302 """Compute the appropriate timeout value for a test.""" 304 """Compute the appropriate timeout value for a test."""
303 # The driver watchdog uses 2.5x the timeout; we want to be 305 # The driver watchdog uses 2.5x the timeout; we want to be
304 # larger than that. We also add a little more padding if we're 306 # larger than that. We also add a little more padding if we're
305 # running tests in a separate thread. 307 # running tests in a separate thread.
306 # 308 #
307 # Note that we need to convert the test timeout from a 309 # Note that we need to convert the test timeout from a
308 # string value in milliseconds to a float for Python. 310 # string value in milliseconds to a float for Python.
309 311
310 # FIXME: Can we just return the test_input.timeout now? 312 # FIXME: Can we just return the test_input.timeout now?
311 driver_timeout_sec = 3.0 * float(test_input.timeout) / 1000.0 313 driver_timeout_sec = 3.0 * float(test_input.timeout) / 1000.0
312 314
313 def _kill_driver(self, driver, label): 315 def _kill_driver(self):
314 # Be careful about how and when we kill the driver; if driver.stop() 316 # Be careful about how and when we kill the driver; if driver.stop()
315 # raises an exception, this routine may get re-entered via __del__. 317 # raises an exception, this routine may get re-entered via __del__.
318 driver = self._driver
319 self._driver = None
316 if driver: 320 if driver:
317 _log.debug("%s killing %s driver" % (self._name, label)) 321 _log.debug("%s killing driver" % self._name)
318 driver.stop() 322 driver.stop()
319 323
324
320 def _clean_up_after_test(self, test_input, result): 325 def _clean_up_after_test(self, test_input, result):
321 test_name = test_input.test_name 326 test_name = test_input.test_name
322 327
323 if result.failures: 328 if result.failures:
324 # Check and kill the driver if we need to. 329 # Check and kill the driver if we need to.
325 if any([f.driver_needs_restart() for f in result.failures]): 330 if any([f.driver_needs_restart() for f in result.failures]):
326 # FIXME: Need more information in failure reporting so 331 self._kill_driver()
327 # we know which driver needs to be restarted. For now
328 # we kill both drivers.
329 self._kill_driver(self._primary_driver, "primary")
330 self._kill_driver(self._secondary_driver, "secondary")
331
332 # Reset the batch count since the shell just bounced. 332 # Reset the batch count since the shell just bounced.
333 self._batch_count = 0 333 self._batch_count = 0
334 334
335 # Print the error message(s). 335 # Print the error message(s).
336 _log.debug("%s %s failed:" % (self._name, test_name)) 336 _log.debug("%s %s failed:" % (self._name, test_name))
337 for f in result.failures: 337 for f in result.failures:
338 _log.debug("%s %s" % (self._name, f.message())) 338 _log.debug("%s %s" % (self._name, f.message()))
339 elif result.type == test_expectations.SKIP: 339 elif result.type == test_expectations.SKIP:
340 _log.debug("%s %s skipped" % (self._name, test_name)) 340 _log.debug("%s %s skipped" % (self._name, test_name))
341 else: 341 else:
(...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after
497 def split_at(seq, index): 497 def split_at(seq, index):
498 return (seq[:index], seq[index:]) 498 return (seq[:index], seq[index:])
499 499
500 num_old_per_new = divide_and_round_up(len(old_shards), max_new_shards) 500 num_old_per_new = divide_and_round_up(len(old_shards), max_new_shards)
501 new_shards = [] 501 new_shards = []
502 remaining_shards = old_shards 502 remaining_shards = old_shards
503 while remaining_shards: 503 while remaining_shards:
504 some_shards, remaining_shards = split_at(remaining_shards, num_old_p er_new) 504 some_shards, remaining_shards = split_at(remaining_shards, num_old_p er_new)
505 new_shards.append(TestShard('%s_%d' % (shard_name_prefix, len(new_sh ards) + 1), extract_and_flatten(some_shards))) 505 new_shards.append(TestShard('%s_%d' % (shard_name_prefix, len(new_sh ards) + 1), extract_and_flatten(some_shards)))
506 return new_shards 506 return new_shards
OLDNEW
« no previous file with comments | « no previous file | Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698