Index: third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py |
diff --git a/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py b/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py |
index c9da385d1839e445eac93d23c6a926e242048848..b0e979533a178e8ac07cd80fb93c1fa0b99f9ab1 100644 |
--- a/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py |
+++ b/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py |
@@ -85,9 +85,12 @@ class SingleTestRunner(object): |
if self._reference_files: |
# Detect and report a test which has a wrong combination of expectation files. |
# For example, if 'foo.html' has two expectation files, 'foo-expected.html' and |
- # 'foo-expected.txt', we should warn users. One test file must be used exclusively |
- # in either layout tests or reftests, but not in both. |
- for suffix in ('.txt', '.png', '.wav'): |
+ # 'foo-expected.png', we should warn users. One test file must be used exclusively |
+ # in either layout tests or reftests, but not in both. Text expectation is an |
+ # exception. |
+ for suffix in self._port.baseline_extensions(): |
+ if suffix == '.txt': |
+ continue |
expected_filename = self._port.expected_filename(self._test_name, suffix) |
if self._filesystem.exists(expected_filename): |
_log.error('%s is a reftest, but has an unused expectation file. Please remove %s.', |
@@ -128,15 +131,18 @@ class SingleTestRunner(object): |
def run(self): |
if self._options.enable_sanitizer: |
return self._run_sanitized_test() |
- if self._reference_files: |
- if self._options.reset_results: |
- reftest_type = set([reference_file[0] for reference_file in self._reference_files]) |
- result = TestResult(self._test_name, reftest_type=reftest_type) |
- result.type = test_expectations.SKIP |
- return result |
- return self._run_reftest() |
if self._options.reset_results: |
+ if self._reference_files: |
+ expected_txt_filename = self._port.expected_filename(self._test_name, '.txt') |
+ if not self._filesystem.exists(expected_txt_filename): |
+ reftest_type = set([reference_file[0] for reference_file in self._reference_files]) |
+ result = TestResult(self._test_name, reftest_type=reftest_type) |
+ result.type = test_expectations.SKIP |
+ return result |
+ self._should_run_pixel_test = False |
return self._run_rebaseline() |
+ if self._reference_files: |
+ return self._run_reftest() |
return self._run_compare_test() |
def _run_sanitized_test(self): |
@@ -363,18 +369,20 @@ class SingleTestRunner(object): |
def _run_reftest(self): |
test_output = self._driver.run_test(self._driver_input(), self._stop_when_done) |
total_test_time = test_output.test_time |
- reference_output = None |
+ expected_output = None |
test_result = None |
+ expected_text = self._port.expected_text(self._test_name) |
+ expected_text_output = DriverOutput(text=expected_text, image=None, image_hash=None, audio=None) |
+ |
# If the test crashed, or timed out, there's no point in running the reference at all. |
# This can save a lot of execution time if we have a lot of crashes or timeouts. |
if test_output.crash or test_output.timeout: |
- expected_driver_output = DriverOutput(text=None, image=None, image_hash=None, audio=None) |
- test_result = self._compare_output(expected_driver_output, test_output) |
+ test_result = self._compare_output(expected_text_output, test_output) |
if test_output.crash: |
test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, |
- self._test_name, test_output, expected_driver_output, test_result.failures) |
+ self._test_name, test_output, expected_text_output, test_result.failures) |
return test_result |
# A reftest can have multiple match references and multiple mismatch references; |
@@ -394,17 +402,23 @@ class SingleTestRunner(object): |
reference_test_names.append(reference_test_name) |
driver_input = DriverInput(reference_test_name, self._timeout, |
image_hash=test_output.image_hash, should_run_pixel_test=True, args=args) |
- reference_output = self._reference_driver.run_test(driver_input, self._stop_when_done) |
- total_test_time += reference_output.test_time |
+ expected_output = self._reference_driver.run_test(driver_input, self._stop_when_done) |
+ total_test_time += expected_output.test_time |
test_result = self._compare_output_with_reference( |
- reference_output, test_output, reference_filename, expectation == '!=') |
+ expected_output, test_output, reference_filename, expectation == '!=') |
if (expectation == '!=' and test_result.failures) or (expectation == '==' and not test_result.failures): |
break |
- assert(reference_output) |
+ assert(expected_output) |
+ |
+ if expected_text: |
+ text_output = DriverOutput(text=test_output.text, image=None, image_hash=None, audio=None) |
+ test_result.failures.extend(self._compare_output(expected_text_output, text_output).failures) |
+ expected_output.text = expected_text_output.text |
+ |
test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, |
- self._test_name, test_output, reference_output, test_result.failures) |
+ self._test_name, test_output, expected_output, test_result.failures) |
# FIXME: We don't really deal with a mix of reftest types properly. We pass in a set() to reftest_type |
# and only really handle the first of the references in the result. |