OLD | NEW |
1 # Copyright (C) 2011 Google Inc. All rights reserved. | 1 # Copyright (C) 2011 Google Inc. All rights reserved. |
2 # | 2 # |
3 # Redistribution and use in source and binary forms, with or without | 3 # Redistribution and use in source and binary forms, with or without |
4 # modification, are permitted provided that the following conditions are | 4 # modification, are permitted provided that the following conditions are |
5 # met: | 5 # met: |
6 # | 6 # |
7 # * Redistributions of source code must retain the above copyright | 7 # * Redistributions of source code must retain the above copyright |
8 # notice, this list of conditions and the following disclaimer. | 8 # notice, this list of conditions and the following disclaimer. |
9 # * Redistributions in binary form must reproduce the above | 9 # * Redistributions in binary form must reproduce the above |
10 # copyright notice, this list of conditions and the following disclaimer | 10 # copyright notice, this list of conditions and the following disclaimer |
(...skipping 19 matching lines...) Expand all Loading... |
30 import logging | 30 import logging |
31 import re | 31 import re |
32 import time | 32 import time |
33 | 33 |
34 from webkitpy.layout_tests.controllers import repaint_overlay | 34 from webkitpy.layout_tests.controllers import repaint_overlay |
35 from webkitpy.layout_tests.controllers import test_result_writer | 35 from webkitpy.layout_tests.controllers import test_result_writer |
36 from webkitpy.layout_tests.port.driver import DeviceFailure, DriverInput, Driver
Output | 36 from webkitpy.layout_tests.port.driver import DeviceFailure, DriverInput, Driver
Output |
37 from webkitpy.layout_tests.models import test_expectations | 37 from webkitpy.layout_tests.models import test_expectations |
38 from webkitpy.layout_tests.models import test_failures | 38 from webkitpy.layout_tests.models import test_failures |
39 from webkitpy.layout_tests.models.test_results import TestResult | 39 from webkitpy.layout_tests.models.test_results import TestResult |
| 40 from webkitpy.layout_tests.models import testharness_results |
40 | 41 |
41 | 42 |
42 _log = logging.getLogger(__name__) | 43 _log = logging.getLogger(__name__) |
43 | 44 |
44 | 45 |
45 def run_single_test(port, options, results_directory, worker_name, driver, test_
input, stop_when_done): | 46 def run_single_test(port, options, results_directory, worker_name, driver, test_
input, stop_when_done): |
46 runner = SingleTestRunner(port, options, results_directory, worker_name, dri
ver, test_input, stop_when_done) | 47 runner = SingleTestRunner(port, options, results_directory, worker_name, dri
ver, test_input, stop_when_done) |
47 try: | 48 try: |
48 return runner.run() | 49 return runner.run() |
49 except DeviceFailure as e: | 50 except DeviceFailure as e: |
(...skipping 216 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
266 return TestResult(self._test_name, failures, driver_output.test_time, dr
iver_output.has_stderr(), | 267 return TestResult(self._test_name, failures, driver_output.test_time, dr
iver_output.has_stderr(), |
267 pid=driver_output.pid, has_repaint_overlay=has_repaint
_overlay) | 268 pid=driver_output.pid, has_repaint_overlay=has_repaint
_overlay) |
268 | 269 |
269 def _compare_testharness_test(self, driver_output, expected_driver_output): | 270 def _compare_testharness_test(self, driver_output, expected_driver_output): |
270 if expected_driver_output.image or expected_driver_output.audio or expec
ted_driver_output.text: | 271 if expected_driver_output.image or expected_driver_output.audio or expec
ted_driver_output.text: |
271 return False, [] | 272 return False, [] |
272 | 273 |
273 if driver_output.image or driver_output.audio or self._is_render_tree(dr
iver_output.text): | 274 if driver_output.image or driver_output.audio or self._is_render_tree(dr
iver_output.text): |
274 return False, [] | 275 return False, [] |
275 | 276 |
276 failures = [] | |
277 found_a_pass = False | |
278 text = driver_output.text or '' | 277 text = driver_output.text or '' |
279 lines = text.strip().splitlines() | 278 |
280 lines = [line.strip() for line in lines] | 279 if not testharness_results.is_testharness_output(text): |
281 header = 'This is a testharness.js-based test.' | |
282 footer = 'Harness: the test ran to completion.' | |
283 if not lines or not header in lines: | |
284 return False, [] | 280 return False, [] |
285 if not footer in lines: | 281 if not testharness_results.is_testharness_output_passing(text): |
286 return True, [test_failures.FailureTestHarnessAssertion()] | |
287 | |
288 for line in lines: | |
289 if line == header or line == footer or line.startswith('PASS'): | |
290 continue | |
291 # CONSOLE output can happen during tests and shouldn't break them. | |
292 if line.startswith('CONSOLE'): | |
293 continue | |
294 | |
295 if line.startswith('FAIL') or line.startswith('TIMEOUT'): | |
296 return True, [test_failures.FailureTestHarnessAssertion()] | |
297 | |
298 # Fail the test if there is any unrecognized output. | |
299 return True, [test_failures.FailureTestHarnessAssertion()] | 282 return True, [test_failures.FailureTestHarnessAssertion()] |
300 return True, [] | 283 return True, [] |
301 | 284 |
302 def _is_render_tree(self, text): | 285 def _is_render_tree(self, text): |
303 return text and "layer at (0,0) size 800x600" in text | 286 return text and "layer at (0,0) size 800x600" in text |
304 | 287 |
305 def _compare_text(self, expected_text, actual_text): | 288 def _compare_text(self, expected_text, actual_text): |
306 failures = [] | 289 failures = [] |
307 if (expected_text and actual_text and | 290 if (expected_text and actual_text and |
308 # Assuming expected_text is already normalized. | 291 # Assuming expected_text is already normalized. |
(...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
428 elif reference_driver_output.image_hash != actual_driver_output.image_ha
sh: | 411 elif reference_driver_output.image_hash != actual_driver_output.image_ha
sh: |
429 diff, err_str = self._port.diff_image(reference_driver_output.image,
actual_driver_output.image) | 412 diff, err_str = self._port.diff_image(reference_driver_output.image,
actual_driver_output.image) |
430 if diff: | 413 if diff: |
431 failures.append(test_failures.FailureReftestMismatch(reference_f
ilename)) | 414 failures.append(test_failures.FailureReftestMismatch(reference_f
ilename)) |
432 elif err_str: | 415 elif err_str: |
433 _log.error(err_str) | 416 _log.error(err_str) |
434 else: | 417 else: |
435 _log.warning(" %s -> ref test hashes didn't match but diff pass
ed" % self._test_name) | 418 _log.warning(" %s -> ref test hashes didn't match but diff pass
ed" % self._test_name) |
436 | 419 |
437 return TestResult(self._test_name, failures, total_test_time, has_stderr
, pid=actual_driver_output.pid) | 420 return TestResult(self._test_name, failures, total_test_time, has_stderr
, pid=actual_driver_output.pid) |
OLD | NEW |