| Index: third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py
|
| diff --git a/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py b/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py
|
| index 7d8d3f4da55c52c523685bd2ad96509d8960752a..2a2c1504e2f3145dc87ae90fcb8527179c49b6cd 100644
|
| --- a/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py
|
| +++ b/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py
|
| @@ -26,7 +26,6 @@
|
| # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
| -
|
| import logging
|
| import re
|
| import time
|
| @@ -39,16 +38,12 @@ from webkitpy.layout_tests.models import test_failures
|
| from webkitpy.layout_tests.models.test_results import TestResult
|
| from webkitpy.layout_tests.models import testharness_results
|
|
|
| -
|
| _log = logging.getLogger(__name__)
|
|
|
|
|
| -def run_single_test(
|
| - port, options, results_directory, worker_name, primary_driver,
|
| - secondary_driver, test_input, stop_when_done):
|
| - runner = SingleTestRunner(
|
| - port, options, results_directory, worker_name, primary_driver,
|
| - secondary_driver, test_input, stop_when_done)
|
| +def run_single_test(port, options, results_directory, worker_name, primary_driver, secondary_driver, test_input, stop_when_done):
|
| + runner = SingleTestRunner(port, options, results_directory, worker_name, primary_driver, secondary_driver, test_input,
|
| + stop_when_done)
|
| try:
|
| return runner.run()
|
| except DeviceFailure as e:
|
| @@ -59,8 +54,7 @@ def run_single_test(
|
| class SingleTestRunner(object):
|
| (ALONGSIDE_TEST, PLATFORM_DIR, VERSION_DIR, UPDATE) = ('alongside', 'platform', 'version', 'update')
|
|
|
| - def __init__(self, port, options, results_directory, worker_name,
|
| - primary_driver, secondary_driver, test_input, stop_when_done):
|
| + def __init__(self, port, options, results_directory, worker_name, primary_driver, secondary_driver, test_input, stop_when_done):
|
| self._port = port
|
| self._filesystem = port.host.filesystem
|
| self._options = options
|
| @@ -78,8 +72,7 @@ class SingleTestRunner(object):
|
| # If this is a virtual test that uses the default flags instead of the
|
| # virtual flags for it's references, run it on the secondary driver so
|
| # that the primary driver does not need to be restarted.
|
| - if (secondary_driver and
|
| - self._port.is_virtual_test(self._test_name) and
|
| + if (secondary_driver and self._port.is_virtual_test(self._test_name) and
|
| not self._port.lookup_virtual_reference_args(self._test_name)):
|
| self._reference_driver = secondary_driver
|
|
|
| @@ -91,14 +84,13 @@ class SingleTestRunner(object):
|
| for suffix in ('.txt', '.png', '.wav'):
|
| expected_filename = self._port.expected_filename(self._test_name, suffix)
|
| if self._filesystem.exists(expected_filename):
|
| - _log.error('%s is a reftest, but has an unused expectation file. Please remove %s.',
|
| - self._test_name, expected_filename)
|
| + _log.error('%s is a reftest, but has an unused expectation file. Please remove %s.', self._test_name,
|
| + expected_filename)
|
|
|
| def _expected_driver_output(self):
|
| - return DriverOutput(self._port.expected_text(self._test_name),
|
| - self._port.expected_image(self._test_name),
|
| - self._port.expected_checksum(self._test_name),
|
| - self._port.expected_audio(self._test_name))
|
| + return DriverOutput(
|
| + self._port.expected_text(self._test_name), self._port.expected_image(self._test_name),
|
| + self._port.expected_checksum(self._test_name), self._port.expected_audio(self._test_name))
|
|
|
| def _should_fetch_expected_checksum(self):
|
| return self._should_run_pixel_test and not (self._options.new_baseline or self._options.reset_results)
|
| @@ -148,12 +140,15 @@ class SingleTestRunner(object):
|
| driver_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
|
| expected_driver_output = self._expected_driver_output()
|
| failures = self._handle_error(driver_output)
|
| - test_result = TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(),
|
| + test_result = TestResult(self._test_name,
|
| + failures,
|
| + driver_output.test_time,
|
| + driver_output.has_stderr(),
|
| pid=driver_output.pid)
|
| - test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, driver_output, expected_driver_output, test_result.failures)
|
| + test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, driver_output,
|
| + expected_driver_output, test_result.failures)
|
| return test_result
|
|
|
| -
|
| def _run_compare_test(self):
|
| driver_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
|
| expected_driver_output = self._expected_driver_output()
|
| @@ -161,23 +156,25 @@ class SingleTestRunner(object):
|
| test_result = self._compare_output(expected_driver_output, driver_output)
|
| if self._should_add_missing_baselines:
|
| self._add_missing_baselines(test_result, driver_output)
|
| - test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, driver_output, expected_driver_output, test_result.failures)
|
| + test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, driver_output,
|
| + expected_driver_output, test_result.failures)
|
| return test_result
|
|
|
| def _run_rebaseline(self):
|
| driver_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
|
| failures = self._handle_error(driver_output)
|
| - test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, driver_output, None, failures)
|
| + test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, driver_output,
|
| + None, failures)
|
| # FIXME: It the test crashed or timed out, it might be better to avoid
|
| # to write new baselines.
|
| self._overwrite_baselines(driver_output)
|
| - return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(),
|
| - pid=driver_output.pid)
|
| + return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(), pid=driver_output.pid)
|
|
|
| _render_tree_dump_pattern = re.compile(r"^layer at \(\d+,\d+\) size \d+x\d+\n")
|
|
|
| def _add_missing_baselines(self, test_result, driver_output):
|
| - missingImage = test_result.has_failure_matching_types(test_failures.FailureMissingImage, test_failures.FailureMissingImageHash)
|
| + missingImage = test_result.has_failure_matching_types(test_failures.FailureMissingImage,
|
| + test_failures.FailureMissingImageHash)
|
| if test_result.has_failure_matching_types(test_failures.FailureMissingResult):
|
| self._save_baseline_data(driver_output.text, '.txt', self._location_for_new_baseline(driver_output.text, '.txt'))
|
| if test_result.has_failure_matching_types(test_failures.FailureMissingAudio):
|
| @@ -245,17 +242,15 @@ class SingleTestRunner(object):
|
| testname = self._test_name
|
|
|
| if driver_output.crash:
|
| - failures.append(test_failures.FailureCrash(bool(reference_filename),
|
| - driver_output.crashed_process_name,
|
| - driver_output.crashed_pid,
|
| - self._port.output_contains_sanitizer_messages(driver_output.crash_log)))
|
| + failures.append(test_failures.FailureCrash(
|
| + bool(reference_filename), driver_output.crashed_process_name, driver_output.crashed_pid,
|
| + self._port.output_contains_sanitizer_messages(driver_output.crash_log)))
|
| if driver_output.error:
|
| _log.debug("%s %s crashed, (stderr lines):" % (self._worker_name, testname))
|
| else:
|
| _log.debug("%s %s crashed, (no stderr)" % (self._worker_name, testname))
|
| elif driver_output.leak:
|
| - failures.append(test_failures.FailureLeak(bool(reference_filename),
|
| - driver_output.leak_log))
|
| + failures.append(test_failures.FailureLeak(bool(reference_filename), driver_output.leak_log))
|
| _log.debug("%s %s leaked" % (self._worker_name, testname))
|
| elif driver_output.error:
|
| _log.debug("%s %s output stderr lines:" % (self._worker_name, testname))
|
| @@ -270,8 +265,7 @@ class SingleTestRunner(object):
|
| if driver_output.crash:
|
| # Don't continue any more if we already have a crash.
|
| # In case of timeouts, we continue since we still want to see the text and image output.
|
| - return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(),
|
| - pid=driver_output.pid)
|
| + return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(), pid=driver_output.pid)
|
|
|
| is_testharness_test, testharness_failures = self._compare_testharness_test(driver_output, expected_driver_output)
|
| if is_testharness_test:
|
| @@ -283,8 +277,12 @@ class SingleTestRunner(object):
|
| failures.extend(self._compare_image(expected_driver_output, driver_output))
|
| has_repaint_overlay = (repaint_overlay.result_contains_repaint_rects(expected_driver_output.text) or
|
| repaint_overlay.result_contains_repaint_rects(driver_output.text))
|
| - return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(),
|
| - pid=driver_output.pid, has_repaint_overlay=has_repaint_overlay)
|
| + return TestResult(self._test_name,
|
| + failures,
|
| + driver_output.test_time,
|
| + driver_output.has_stderr(),
|
| + pid=driver_output.pid,
|
| + has_repaint_overlay=has_repaint_overlay)
|
|
|
| def _compare_testharness_test(self, driver_output, expected_driver_output):
|
| if expected_driver_output.image or expected_driver_output.audio or expected_driver_output.text:
|
| @@ -307,8 +305,8 @@ class SingleTestRunner(object):
|
| def _compare_text(self, expected_text, actual_text):
|
| failures = []
|
| if (expected_text and actual_text and
|
| - # Assuming expected_text is already normalized.
|
| - self._port.do_text_results_differ(expected_text, self._get_normalized_output_text(actual_text))):
|
| + # Assuming expected_text is already normalized.
|
| + self._port.do_text_results_differ(expected_text, self._get_normalized_output_text(actual_text))):
|
| failures.append(test_failures.FailureTextMismatch())
|
| elif actual_text and not expected_text:
|
| failures.append(test_failures.FailureMissingResult())
|
| @@ -316,8 +314,7 @@ class SingleTestRunner(object):
|
|
|
| def _compare_audio(self, expected_audio, actual_audio):
|
| failures = []
|
| - if (expected_audio and actual_audio and
|
| - self._port.do_audio_results_differ(expected_audio, actual_audio)):
|
| + if (expected_audio and actual_audio and self._port.do_audio_results_differ(expected_audio, actual_audio)):
|
| failures.append(test_failures.FailureAudioMismatch())
|
| elif actual_audio and not expected_audio:
|
| failures.append(test_failures.FailureMissingAudio())
|
| @@ -371,7 +368,8 @@ class SingleTestRunner(object):
|
| test_result = self._compare_output(expected_driver_output, test_output)
|
|
|
| if test_output.crash:
|
| - test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, test_output, expected_driver_output, test_result.failures)
|
| + test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name,
|
| + test_output, expected_driver_output, test_result.failures)
|
| return test_result
|
|
|
| # A reftest can have multiple match references and multiple mismatch references;
|
| @@ -389,22 +387,32 @@ class SingleTestRunner(object):
|
| args = self._port.lookup_physical_reference_args(self._test_name)
|
| reference_test_name = self._port.relative_test_filename(reference_filename)
|
| reference_test_names.append(reference_test_name)
|
| - driver_input = DriverInput(reference_test_name, self._timeout, image_hash=test_output.image_hash, should_run_pixel_test=True, args=args)
|
| + driver_input = DriverInput(reference_test_name,
|
| + self._timeout,
|
| + image_hash=test_output.image_hash,
|
| + should_run_pixel_test=True,
|
| + args=args)
|
| reference_output = self._reference_driver.run_test(driver_input, self._stop_when_done)
|
| total_test_time += reference_output.test_time
|
| - test_result = self._compare_output_with_reference(reference_output, test_output, reference_filename, expectation == '!=')
|
| + test_result = self._compare_output_with_reference(reference_output, test_output, reference_filename,
|
| + expectation == '!=')
|
|
|
| if (expectation == '!=' and test_result.failures) or (expectation == '==' and not test_result.failures):
|
| break
|
|
|
| - assert(reference_output)
|
| - test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, test_output, reference_output, test_result.failures)
|
| + assert (reference_output)
|
| + test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, test_output,
|
| + reference_output, test_result.failures)
|
|
|
| # FIXME: We don't really deal with a mix of reftest types properly. We pass in a set() to reftest_type
|
| # and only really handle the first of the references in the result.
|
| reftest_type = list(set([reference_file[0] for reference_file in self._reference_files]))
|
| - return TestResult(self._test_name, test_result.failures, total_test_time,
|
| - test_result.has_stderr, reftest_type=reftest_type, pid=test_result.pid,
|
| + return TestResult(self._test_name,
|
| + test_result.failures,
|
| + total_test_time,
|
| + test_result.has_stderr,
|
| + reftest_type=reftest_type,
|
| + pid=test_result.pid,
|
| references=reference_test_names)
|
|
|
| # The returned TestResult always has 0 test_run_time. _run_reftest() calculates total_run_time from test outputs.
|
|
|