| Index: Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py
|
| diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py b/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py
|
| index 382bfccdb25e0af3258f33666096047fda71981b..a4207fe66262eebfcac0326ba18cf40c7bc84302 100644
|
| --- a/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py
|
| +++ b/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py
|
| @@ -48,7 +48,7 @@ def run_single_test(port, options, results_directory, worker_name, driver, test_
|
| try:
|
| return runner.run()
|
| except DeviceFailure as e:
|
| - _log.error("device failed: %s", str(e))
|
| + _log.error('device failed: %s', str(e))
|
| return TestResult(test_input.test_name, device_failed=True)
|
|
|
|
|
| @@ -78,13 +78,13 @@ class SingleTestRunner(object):
|
| expected_filename = self._port.expected_filename(self._test_name, suffix)
|
| if self._filesystem.exists(expected_filename):
|
| _log.error('%s is a reftest, but has an unused expectation file. Please remove %s.',
|
| - self._test_name, expected_filename)
|
| + self._test_name, expected_filename)
|
|
|
| def _expected_driver_output(self):
|
| return DriverOutput(self._port.expected_text(self._test_name),
|
| - self._port.expected_image(self._test_name),
|
| - self._port.expected_checksum(self._test_name),
|
| - self._port.expected_audio(self._test_name))
|
| + self._port.expected_image(self._test_name),
|
| + self._port.expected_checksum(self._test_name),
|
| + self._port.expected_audio(self._test_name))
|
|
|
| def _should_fetch_expected_checksum(self):
|
| return self._should_run_pixel_test and not (self._options.new_baseline or self._options.reset_results)
|
| @@ -143,13 +143,27 @@ class SingleTestRunner(object):
|
| test_result = self._compare_output(expected_driver_output, driver_output)
|
| if self._should_add_missing_baselines:
|
| self._add_missing_baselines(test_result, driver_output)
|
| - test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, driver_output, expected_driver_output, test_result.failures)
|
| + test_result_writer.write_test_result(
|
| + self._filesystem,
|
| + self._port,
|
| + self._results_directory,
|
| + self._test_name,
|
| + driver_output,
|
| + expected_driver_output,
|
| + test_result.failures)
|
| return test_result
|
|
|
| def _run_rebaseline(self):
|
| driver_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
|
| failures = self._handle_error(driver_output)
|
| - test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, driver_output, None, failures)
|
| + test_result_writer.write_test_result(
|
| + self._filesystem,
|
| + self._port,
|
| + self._results_directory,
|
| + self._test_name,
|
| + driver_output,
|
| + None,
|
| + failures)
|
| # FIXME: It the test crashed or timed out, it might be better to avoid
|
| # to write new baselines.
|
| self._overwrite_baselines(driver_output)
|
| @@ -159,7 +173,9 @@ class SingleTestRunner(object):
|
| _render_tree_dump_pattern = re.compile(r"^layer at \(\d+,\d+\) size \d+x\d+\n")
|
|
|
| def _add_missing_baselines(self, test_result, driver_output):
|
| - missingImage = test_result.has_failure_matching_types(test_failures.FailureMissingImage, test_failures.FailureMissingImageHash)
|
| + missingImage = test_result.has_failure_matching_types(
|
| + test_failures.FailureMissingImage,
|
| + test_failures.FailureMissingImageHash)
|
| if test_result.has_failure_matching_types(test_failures.FailureMissingResult):
|
| self._save_baseline_data(driver_output.text, '.txt', self._location_for_new_baseline(driver_output.text, '.txt'))
|
| if test_result.has_failure_matching_types(test_failures.FailureMissingAudio):
|
| @@ -202,7 +218,7 @@ class SingleTestRunner(object):
|
| raise AssertionError('unrecognized baseline location: %s' % location)
|
|
|
| fs.maybe_make_directory(output_dir)
|
| - output_basename = fs.basename(fs.splitext(self._test_name)[0] + "-expected" + extension)
|
| + output_basename = fs.basename(fs.splitext(self._test_name)[0] + '-expected' + extension)
|
| output_path = fs.join(output_dir, output_basename)
|
| _log.info('Writing new expected result "%s"' % port.relative_test_filename(output_path))
|
| port.update_baseline(output_path, data)
|
| @@ -231,17 +247,17 @@ class SingleTestRunner(object):
|
| driver_output.crashed_process_name,
|
| driver_output.crashed_pid))
|
| if driver_output.error:
|
| - _log.debug("%s %s crashed, (stderr lines):" % (self._worker_name, testname))
|
| + _log.debug('%s %s crashed, (stderr lines):' % (self._worker_name, testname))
|
| else:
|
| - _log.debug("%s %s crashed, (no stderr)" % (self._worker_name, testname))
|
| + _log.debug('%s %s crashed, (no stderr)' % (self._worker_name, testname))
|
| elif driver_output.leak:
|
| failures.append(test_failures.FailureLeak(bool(reference_filename),
|
| driver_output.leak_log))
|
| - _log.debug("%s %s leaked" % (self._worker_name, testname))
|
| + _log.debug('%s %s leaked' % (self._worker_name, testname))
|
| elif driver_output.error:
|
| - _log.debug("%s %s output stderr lines:" % (self._worker_name, testname))
|
| + _log.debug('%s %s output stderr lines:' % (self._worker_name, testname))
|
| for line in driver_output.error.splitlines():
|
| - _log.debug(" %s" % line)
|
| + _log.debug(' %s' % line)
|
| return failures
|
|
|
| def _compare_output(self, expected_driver_output, driver_output):
|
| @@ -283,13 +299,13 @@ class SingleTestRunner(object):
|
| return True, []
|
|
|
| def _is_render_tree(self, text):
|
| - return text and "layer at (0,0) size 800x600" in text
|
| + return text and 'layer at (0,0) size 800x600' in text
|
|
|
| def _compare_text(self, expected_text, actual_text):
|
| failures = []
|
| if (expected_text and actual_text and
|
| - # Assuming expected_text is already normalized.
|
| - self._port.do_text_results_differ(expected_text, self._get_normalized_output_text(actual_text))):
|
| + # Assuming expected_text is already normalized.
|
| + self._port.do_text_results_differ(expected_text, self._get_normalized_output_text(actual_text))):
|
| failures.append(test_failures.FailureTextMismatch())
|
| elif actual_text and not expected_text:
|
| failures.append(test_failures.FailureMissingResult())
|
| @@ -298,7 +314,7 @@ class SingleTestRunner(object):
|
| def _compare_audio(self, expected_audio, actual_audio):
|
| failures = []
|
| if (expected_audio and actual_audio and
|
| - self._port.do_audio_results_differ(expected_audio, actual_audio)):
|
| + self._port.do_audio_results_differ(expected_audio, actual_audio)):
|
| failures.append(test_failures.FailureAudioMismatch())
|
| elif actual_audio and not expected_audio:
|
| failures.append(test_failures.FailureMissingAudio())
|
| @@ -311,7 +327,7 @@ class SingleTestRunner(object):
|
| # changed to "\r\n" by our system (Python/Cygwin), resulting in
|
| # "\r\r\n", when, in fact, we wanted to compare the text output with
|
| # the normalized text expectation files.
|
| - return output.replace("\r\r\n", "\r\n").replace("\r\n", "\n")
|
| + return output.replace('\r\r\n', '\r\n').replace('\r\n', '\n')
|
|
|
| # FIXME: This function also creates the image diff. Maybe that work should
|
| # be handled elsewhere?
|
| @@ -368,14 +384,25 @@ class SingleTestRunner(object):
|
| reference_test_names.append(reference_test_name)
|
| driver_input = DriverInput(reference_test_name, self._timeout, image_hash=None, should_run_pixel_test=True, args=args)
|
| reference_output = self._driver.run_test(driver_input, self._stop_when_done)
|
| - test_result = self._compare_output_with_reference(reference_output, test_output, reference_filename, expectation == '!=')
|
| + test_result = self._compare_output_with_reference(
|
| + reference_output,
|
| + test_output,
|
| + reference_filename,
|
| + expectation == '!=')
|
|
|
| if (expectation == '!=' and test_result.failures) or (expectation == '==' and not test_result.failures):
|
| break
|
| total_test_time += test_result.test_run_time
|
|
|
| assert(reference_output)
|
| - test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, test_output, reference_output, test_result.failures)
|
| + test_result_writer.write_test_result(
|
| + self._filesystem,
|
| + self._port,
|
| + self._results_directory,
|
| + self._test_name,
|
| + test_output,
|
| + reference_output,
|
| + test_result.failures)
|
|
|
| # FIXME: We don't really deal with a mix of reftest types properly. We pass in a set() to reftest_type
|
| # and only really handle the first of the references in the result.
|
| @@ -406,7 +433,7 @@ class SingleTestRunner(object):
|
| elif err_str:
|
| _log.error(err_str)
|
| else:
|
| - _log.warning(" %s -> ref test hashes matched but diff failed" % self._test_name)
|
| + _log.warning(' %s -> ref test hashes matched but diff failed' % self._test_name)
|
|
|
| elif reference_driver_output.image_hash != actual_driver_output.image_hash:
|
| diff, err_str = self._port.diff_image(reference_driver_output.image, actual_driver_output.image)
|
|
|