OLD | NEW |
1 # Copyright (C) 2011 Google Inc. All rights reserved. | 1 # Copyright (C) 2011 Google Inc. All rights reserved. |
2 # | 2 # |
3 # Redistribution and use in source and binary forms, with or without | 3 # Redistribution and use in source and binary forms, with or without |
4 # modification, are permitted provided that the following conditions are | 4 # modification, are permitted provided that the following conditions are |
5 # met: | 5 # met: |
6 # | 6 # |
7 # * Redistributions of source code must retain the above copyright | 7 # * Redistributions of source code must retain the above copyright |
8 # notice, this list of conditions and the following disclaimer. | 8 # notice, this list of conditions and the following disclaimer. |
9 # * Redistributions in binary form must reproduce the above | 9 # * Redistributions in binary form must reproduce the above |
10 # copyright notice, this list of conditions and the following disclaimer | 10 # copyright notice, this list of conditions and the following disclaimer |
(...skipping 30 matching lines...) Expand all Loading... |
41 | 41 |
42 | 42 |
43 _log = logging.getLogger(__name__) | 43 _log = logging.getLogger(__name__) |
44 | 44 |
45 | 45 |
46 def run_single_test(port, options, results_directory, worker_name, driver, test_
input, stop_when_done): | 46 def run_single_test(port, options, results_directory, worker_name, driver, test_
input, stop_when_done): |
47 runner = SingleTestRunner(port, options, results_directory, worker_name, dri
ver, test_input, stop_when_done) | 47 runner = SingleTestRunner(port, options, results_directory, worker_name, dri
ver, test_input, stop_when_done) |
48 try: | 48 try: |
49 return runner.run() | 49 return runner.run() |
50 except DeviceFailure as e: | 50 except DeviceFailure as e: |
51 _log.error("device failed: %s", str(e)) | 51 _log.error('device failed: %s', str(e)) |
52 return TestResult(test_input.test_name, device_failed=True) | 52 return TestResult(test_input.test_name, device_failed=True) |
53 | 53 |
54 | 54 |
55 class SingleTestRunner(object): | 55 class SingleTestRunner(object): |
56 (ALONGSIDE_TEST, PLATFORM_DIR, VERSION_DIR, UPDATE) = ('alongside', 'platfor
m', 'version', 'update') | 56 (ALONGSIDE_TEST, PLATFORM_DIR, VERSION_DIR, UPDATE) = ('alongside', 'platfor
m', 'version', 'update') |
57 | 57 |
58 def __init__(self, port, options, results_directory, worker_name, driver, te
st_input, stop_when_done): | 58 def __init__(self, port, options, results_directory, worker_name, driver, te
st_input, stop_when_done): |
59 self._port = port | 59 self._port = port |
60 self._filesystem = port.host.filesystem | 60 self._filesystem = port.host.filesystem |
61 self._options = options | 61 self._options = options |
62 self._results_directory = results_directory | 62 self._results_directory = results_directory |
63 self._driver = driver | 63 self._driver = driver |
64 self._timeout = test_input.timeout | 64 self._timeout = test_input.timeout |
65 self._worker_name = worker_name | 65 self._worker_name = worker_name |
66 self._test_name = test_input.test_name | 66 self._test_name = test_input.test_name |
67 self._should_run_pixel_test = test_input.should_run_pixel_test | 67 self._should_run_pixel_test = test_input.should_run_pixel_test |
68 self._reference_files = test_input.reference_files | 68 self._reference_files = test_input.reference_files |
69 self._should_add_missing_baselines = test_input.should_add_missing_basel
ines | 69 self._should_add_missing_baselines = test_input.should_add_missing_basel
ines |
70 self._stop_when_done = stop_when_done | 70 self._stop_when_done = stop_when_done |
71 | 71 |
72 if self._reference_files: | 72 if self._reference_files: |
73 # Detect and report a test which has a wrong combination of expectat
ion files. | 73 # Detect and report a test which has a wrong combination of expectat
ion files. |
74 # For example, if 'foo.html' has two expectation files, 'foo-expecte
d.html' and | 74 # For example, if 'foo.html' has two expectation files, 'foo-expecte
d.html' and |
75 # 'foo-expected.txt', we should warn users. One test file must be us
ed exclusively | 75 # 'foo-expected.txt', we should warn users. One test file must be us
ed exclusively |
76 # in either layout tests or reftests, but not in both. | 76 # in either layout tests or reftests, but not in both. |
77 for suffix in ('.txt', '.png', '.wav'): | 77 for suffix in ('.txt', '.png', '.wav'): |
78 expected_filename = self._port.expected_filename(self._test_name
, suffix) | 78 expected_filename = self._port.expected_filename(self._test_name
, suffix) |
79 if self._filesystem.exists(expected_filename): | 79 if self._filesystem.exists(expected_filename): |
80 _log.error('%s is a reftest, but has an unused expectation f
ile. Please remove %s.', | 80 _log.error('%s is a reftest, but has an unused expectation f
ile. Please remove %s.', |
81 self._test_name, expected_filename) | 81 self._test_name, expected_filename) |
82 | 82 |
83 def _expected_driver_output(self): | 83 def _expected_driver_output(self): |
84 return DriverOutput(self._port.expected_text(self._test_name), | 84 return DriverOutput(self._port.expected_text(self._test_name), |
85 self._port.expected_image(self._test_name), | 85 self._port.expected_image(self._test_name), |
86 self._port.expected_checksum(self._test_name), | 86 self._port.expected_checksum(self._test_name), |
87 self._port.expected_audio(self._test_name)) | 87 self._port.expected_audio(self._test_name)) |
88 | 88 |
89 def _should_fetch_expected_checksum(self): | 89 def _should_fetch_expected_checksum(self): |
90 return self._should_run_pixel_test and not (self._options.new_baseline o
r self._options.reset_results) | 90 return self._should_run_pixel_test and not (self._options.new_baseline o
r self._options.reset_results) |
91 | 91 |
92 def _driver_input(self): | 92 def _driver_input(self): |
93 # The image hash is used to avoid doing an image dump if the | 93 # The image hash is used to avoid doing an image dump if the |
94 # checksums match, so it should be set to a blank value if we | 94 # checksums match, so it should be set to a blank value if we |
95 # are generating a new baseline. (Otherwise, an image from a | 95 # are generating a new baseline. (Otherwise, an image from a |
96 # previous run will be copied into the baseline.""" | 96 # previous run will be copied into the baseline.""" |
97 image_hash = None | 97 image_hash = None |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
136 return TestResult(self._test_name, failures, driver_output.test_time, dr
iver_output.has_stderr(), | 136 return TestResult(self._test_name, failures, driver_output.test_time, dr
iver_output.has_stderr(), |
137 pid=driver_output.pid) | 137 pid=driver_output.pid) |
138 | 138 |
139 def _run_compare_test(self): | 139 def _run_compare_test(self): |
140 driver_output = self._driver.run_test(self._driver_input(), self._stop_w
hen_done) | 140 driver_output = self._driver.run_test(self._driver_input(), self._stop_w
hen_done) |
141 expected_driver_output = self._expected_driver_output() | 141 expected_driver_output = self._expected_driver_output() |
142 | 142 |
143 test_result = self._compare_output(expected_driver_output, driver_output
) | 143 test_result = self._compare_output(expected_driver_output, driver_output
) |
144 if self._should_add_missing_baselines: | 144 if self._should_add_missing_baselines: |
145 self._add_missing_baselines(test_result, driver_output) | 145 self._add_missing_baselines(test_result, driver_output) |
146 test_result_writer.write_test_result(self._filesystem, self._port, self.
_results_directory, self._test_name, driver_output, expected_driver_output, test
_result.failures) | 146 test_result_writer.write_test_result( |
| 147 self._filesystem, |
| 148 self._port, |
| 149 self._results_directory, |
| 150 self._test_name, |
| 151 driver_output, |
| 152 expected_driver_output, |
| 153 test_result.failures) |
147 return test_result | 154 return test_result |
148 | 155 |
149 def _run_rebaseline(self): | 156 def _run_rebaseline(self): |
150 driver_output = self._driver.run_test(self._driver_input(), self._stop_w
hen_done) | 157 driver_output = self._driver.run_test(self._driver_input(), self._stop_w
hen_done) |
151 failures = self._handle_error(driver_output) | 158 failures = self._handle_error(driver_output) |
152 test_result_writer.write_test_result(self._filesystem, self._port, self.
_results_directory, self._test_name, driver_output, None, failures) | 159 test_result_writer.write_test_result( |
| 160 self._filesystem, |
| 161 self._port, |
| 162 self._results_directory, |
| 163 self._test_name, |
| 164 driver_output, |
| 165 None, |
| 166 failures) |
153 # FIXME: It the test crashed or timed out, it might be better to avoid | 167 # FIXME: It the test crashed or timed out, it might be better to avoid |
154 # to write new baselines. | 168 # to write new baselines. |
155 self._overwrite_baselines(driver_output) | 169 self._overwrite_baselines(driver_output) |
156 return TestResult(self._test_name, failures, driver_output.test_time, dr
iver_output.has_stderr(), | 170 return TestResult(self._test_name, failures, driver_output.test_time, dr
iver_output.has_stderr(), |
157 pid=driver_output.pid) | 171 pid=driver_output.pid) |
158 | 172 |
159 _render_tree_dump_pattern = re.compile(r"^layer at \(\d+,\d+\) size \d+x\d+\
n") | 173 _render_tree_dump_pattern = re.compile(r"^layer at \(\d+,\d+\) size \d+x\d+\
n") |
160 | 174 |
161 def _add_missing_baselines(self, test_result, driver_output): | 175 def _add_missing_baselines(self, test_result, driver_output): |
162 missingImage = test_result.has_failure_matching_types(test_failures.Fail
ureMissingImage, test_failures.FailureMissingImageHash) | 176 missingImage = test_result.has_failure_matching_types( |
| 177 test_failures.FailureMissingImage, |
| 178 test_failures.FailureMissingImageHash) |
163 if test_result.has_failure_matching_types(test_failures.FailureMissingRe
sult): | 179 if test_result.has_failure_matching_types(test_failures.FailureMissingRe
sult): |
164 self._save_baseline_data(driver_output.text, '.txt', self._location_
for_new_baseline(driver_output.text, '.txt')) | 180 self._save_baseline_data(driver_output.text, '.txt', self._location_
for_new_baseline(driver_output.text, '.txt')) |
165 if test_result.has_failure_matching_types(test_failures.FailureMissingAu
dio): | 181 if test_result.has_failure_matching_types(test_failures.FailureMissingAu
dio): |
166 self._save_baseline_data(driver_output.audio, '.wav', self._location
_for_new_baseline(driver_output.audio, '.wav')) | 182 self._save_baseline_data(driver_output.audio, '.wav', self._location
_for_new_baseline(driver_output.audio, '.wav')) |
167 if missingImage: | 183 if missingImage: |
168 self._save_baseline_data(driver_output.image, '.png', self._location
_for_new_baseline(driver_output.image, '.png')) | 184 self._save_baseline_data(driver_output.image, '.png', self._location
_for_new_baseline(driver_output.image, '.png')) |
169 | 185 |
170 def _location_for_new_baseline(self, data, extension): | 186 def _location_for_new_baseline(self, data, extension): |
171 if self._options.add_platform_exceptions: | 187 if self._options.add_platform_exceptions: |
172 return self.VERSION_DIR | 188 return self.VERSION_DIR |
(...skipping 22 matching lines...) Expand all Loading... |
195 elif location == self.VERSION_DIR: | 211 elif location == self.VERSION_DIR: |
196 output_dir = fs.join(port.baseline_version_dir(), fs.dirname(self._t
est_name)) | 212 output_dir = fs.join(port.baseline_version_dir(), fs.dirname(self._t
est_name)) |
197 elif location == self.PLATFORM_DIR: | 213 elif location == self.PLATFORM_DIR: |
198 output_dir = fs.join(port.baseline_platform_dir(), fs.dirname(self._
test_name)) | 214 output_dir = fs.join(port.baseline_platform_dir(), fs.dirname(self._
test_name)) |
199 elif location == self.UPDATE: | 215 elif location == self.UPDATE: |
200 output_dir = fs.dirname(port.expected_filename(self._test_name, exte
nsion)) | 216 output_dir = fs.dirname(port.expected_filename(self._test_name, exte
nsion)) |
201 else: | 217 else: |
202 raise AssertionError('unrecognized baseline location: %s' % location
) | 218 raise AssertionError('unrecognized baseline location: %s' % location
) |
203 | 219 |
204 fs.maybe_make_directory(output_dir) | 220 fs.maybe_make_directory(output_dir) |
205 output_basename = fs.basename(fs.splitext(self._test_name)[0] + "-expect
ed" + extension) | 221 output_basename = fs.basename(fs.splitext(self._test_name)[0] + '-expect
ed' + extension) |
206 output_path = fs.join(output_dir, output_basename) | 222 output_path = fs.join(output_dir, output_basename) |
207 _log.info('Writing new expected result "%s"' % port.relative_test_filena
me(output_path)) | 223 _log.info('Writing new expected result "%s"' % port.relative_test_filena
me(output_path)) |
208 port.update_baseline(output_path, data) | 224 port.update_baseline(output_path, data) |
209 | 225 |
210 def _handle_error(self, driver_output, reference_filename=None): | 226 def _handle_error(self, driver_output, reference_filename=None): |
211 """Returns test failures if some unusual errors happen in driver's run. | 227 """Returns test failures if some unusual errors happen in driver's run. |
212 | 228 |
213 Args: | 229 Args: |
214 driver_output: The output from the driver. | 230 driver_output: The output from the driver. |
215 reference_filename: The full path to the reference file which produced
the driver_output. | 231 reference_filename: The full path to the reference file which produced
the driver_output. |
216 This arg is optional and should be used only in reftests until we
have a better way to know | 232 This arg is optional and should be used only in reftests until we
have a better way to know |
217 which html file is used for producing the driver_output. | 233 which html file is used for producing the driver_output. |
218 """ | 234 """ |
219 failures = [] | 235 failures = [] |
220 fs = self._filesystem | 236 fs = self._filesystem |
221 if driver_output.timeout: | 237 if driver_output.timeout: |
222 failures.append(test_failures.FailureTimeout(bool(reference_filename
))) | 238 failures.append(test_failures.FailureTimeout(bool(reference_filename
))) |
223 | 239 |
224 if reference_filename: | 240 if reference_filename: |
225 testname = self._port.relative_test_filename(reference_filename) | 241 testname = self._port.relative_test_filename(reference_filename) |
226 else: | 242 else: |
227 testname = self._test_name | 243 testname = self._test_name |
228 | 244 |
229 if driver_output.crash: | 245 if driver_output.crash: |
230 failures.append(test_failures.FailureCrash(bool(reference_filename), | 246 failures.append(test_failures.FailureCrash(bool(reference_filename), |
231 driver_output.crashed_pro
cess_name, | 247 driver_output.crashed_pro
cess_name, |
232 driver_output.crashed_pid
)) | 248 driver_output.crashed_pid
)) |
233 if driver_output.error: | 249 if driver_output.error: |
234 _log.debug("%s %s crashed, (stderr lines):" % (self._worker_name
, testname)) | 250 _log.debug('%s %s crashed, (stderr lines):' % (self._worker_name
, testname)) |
235 else: | 251 else: |
236 _log.debug("%s %s crashed, (no stderr)" % (self._worker_name, te
stname)) | 252 _log.debug('%s %s crashed, (no stderr)' % (self._worker_name, te
stname)) |
237 elif driver_output.leak: | 253 elif driver_output.leak: |
238 failures.append(test_failures.FailureLeak(bool(reference_filename), | 254 failures.append(test_failures.FailureLeak(bool(reference_filename), |
239 driver_output.leak_log)) | 255 driver_output.leak_log)) |
240 _log.debug("%s %s leaked" % (self._worker_name, testname)) | 256 _log.debug('%s %s leaked' % (self._worker_name, testname)) |
241 elif driver_output.error: | 257 elif driver_output.error: |
242 _log.debug("%s %s output stderr lines:" % (self._worker_name, testna
me)) | 258 _log.debug('%s %s output stderr lines:' % (self._worker_name, testna
me)) |
243 for line in driver_output.error.splitlines(): | 259 for line in driver_output.error.splitlines(): |
244 _log.debug(" %s" % line) | 260 _log.debug(' %s' % line) |
245 return failures | 261 return failures |
246 | 262 |
247 def _compare_output(self, expected_driver_output, driver_output): | 263 def _compare_output(self, expected_driver_output, driver_output): |
248 failures = [] | 264 failures = [] |
249 failures.extend(self._handle_error(driver_output)) | 265 failures.extend(self._handle_error(driver_output)) |
250 | 266 |
251 if driver_output.crash: | 267 if driver_output.crash: |
252 # Don't continue any more if we already have a crash. | 268 # Don't continue any more if we already have a crash. |
253 # In case of timeouts, we continue since we still want to see the te
xt and image output. | 269 # In case of timeouts, we continue since we still want to see the te
xt and image output. |
254 return TestResult(self._test_name, failures, driver_output.test_time
, driver_output.has_stderr(), | 270 return TestResult(self._test_name, failures, driver_output.test_time
, driver_output.has_stderr(), |
(...skipping 21 matching lines...) Expand all Loading... |
276 | 292 |
277 text = driver_output.text or '' | 293 text = driver_output.text or '' |
278 | 294 |
279 if not testharness_results.is_testharness_output(text): | 295 if not testharness_results.is_testharness_output(text): |
280 return False, [] | 296 return False, [] |
281 if not testharness_results.is_testharness_output_passing(text): | 297 if not testharness_results.is_testharness_output_passing(text): |
282 return True, [test_failures.FailureTestHarnessAssertion()] | 298 return True, [test_failures.FailureTestHarnessAssertion()] |
283 return True, [] | 299 return True, [] |
284 | 300 |
285 def _is_render_tree(self, text): | 301 def _is_render_tree(self, text): |
286 return text and "layer at (0,0) size 800x600" in text | 302 return text and 'layer at (0,0) size 800x600' in text |
287 | 303 |
288 def _compare_text(self, expected_text, actual_text): | 304 def _compare_text(self, expected_text, actual_text): |
289 failures = [] | 305 failures = [] |
290 if (expected_text and actual_text and | 306 if (expected_text and actual_text and |
291 # Assuming expected_text is already normalized. | 307 # Assuming expected_text is already normalized. |
292 self._port.do_text_results_differ(expected_text, self._get_normalize
d_output_text(actual_text))): | 308 self._port.do_text_results_differ(expected_text, self._get_norma
lized_output_text(actual_text))): |
293 failures.append(test_failures.FailureTextMismatch()) | 309 failures.append(test_failures.FailureTextMismatch()) |
294 elif actual_text and not expected_text: | 310 elif actual_text and not expected_text: |
295 failures.append(test_failures.FailureMissingResult()) | 311 failures.append(test_failures.FailureMissingResult()) |
296 return failures | 312 return failures |
297 | 313 |
298 def _compare_audio(self, expected_audio, actual_audio): | 314 def _compare_audio(self, expected_audio, actual_audio): |
299 failures = [] | 315 failures = [] |
300 if (expected_audio and actual_audio and | 316 if (expected_audio and actual_audio and |
301 self._port.do_audio_results_differ(expected_audio, actual_audio)): | 317 self._port.do_audio_results_differ(expected_audio, actual_audio)
): |
302 failures.append(test_failures.FailureAudioMismatch()) | 318 failures.append(test_failures.FailureAudioMismatch()) |
303 elif actual_audio and not expected_audio: | 319 elif actual_audio and not expected_audio: |
304 failures.append(test_failures.FailureMissingAudio()) | 320 failures.append(test_failures.FailureMissingAudio()) |
305 return failures | 321 return failures |
306 | 322 |
307 def _get_normalized_output_text(self, output): | 323 def _get_normalized_output_text(self, output): |
308 """Returns the normalized text output, i.e. the output in which | 324 """Returns the normalized text output, i.e. the output in which |
309 the end-of-line characters are normalized to "\n".""" | 325 the end-of-line characters are normalized to "\n".""" |
310 # Running tests on Windows produces "\r\n". The "\n" part is helpfully | 326 # Running tests on Windows produces "\r\n". The "\n" part is helpfully |
311 # changed to "\r\n" by our system (Python/Cygwin), resulting in | 327 # changed to "\r\n" by our system (Python/Cygwin), resulting in |
312 # "\r\r\n", when, in fact, we wanted to compare the text output with | 328 # "\r\r\n", when, in fact, we wanted to compare the text output with |
313 # the normalized text expectation files. | 329 # the normalized text expectation files. |
314 return output.replace("\r\r\n", "\r\n").replace("\r\n", "\n") | 330 return output.replace('\r\r\n', '\r\n').replace('\r\n', '\n') |
315 | 331 |
316 # FIXME: This function also creates the image diff. Maybe that work should | 332 # FIXME: This function also creates the image diff. Maybe that work should |
317 # be handled elsewhere? | 333 # be handled elsewhere? |
318 def _compare_image(self, expected_driver_output, driver_output): | 334 def _compare_image(self, expected_driver_output, driver_output): |
319 failures = [] | 335 failures = [] |
320 # If we didn't produce a hash file, this test must be text-only. | 336 # If we didn't produce a hash file, this test must be text-only. |
321 if driver_output.image_hash is None: | 337 if driver_output.image_hash is None: |
322 return failures | 338 return failures |
323 if not expected_driver_output.image: | 339 if not expected_driver_output.image: |
324 failures.append(test_failures.FailureMissingImage()) | 340 failures.append(test_failures.FailureMissingImage()) |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
361 reference_test_names = [] | 377 reference_test_names = [] |
362 for expectation, reference_filename in putAllMismatchBeforeMatch(self._r
eference_files): | 378 for expectation, reference_filename in putAllMismatchBeforeMatch(self._r
eference_files): |
363 if self._port.lookup_virtual_test_base(self._test_name): | 379 if self._port.lookup_virtual_test_base(self._test_name): |
364 args = self._port.lookup_virtual_test_args(self._test_name) | 380 args = self._port.lookup_virtual_test_args(self._test_name) |
365 else: | 381 else: |
366 args = self._port.lookup_physical_test_args(self._test_name) | 382 args = self._port.lookup_physical_test_args(self._test_name) |
367 reference_test_name = self._port.relative_test_filename(reference_fi
lename) | 383 reference_test_name = self._port.relative_test_filename(reference_fi
lename) |
368 reference_test_names.append(reference_test_name) | 384 reference_test_names.append(reference_test_name) |
369 driver_input = DriverInput(reference_test_name, self._timeout, image
_hash=None, should_run_pixel_test=True, args=args) | 385 driver_input = DriverInput(reference_test_name, self._timeout, image
_hash=None, should_run_pixel_test=True, args=args) |
370 reference_output = self._driver.run_test(driver_input, self._stop_wh
en_done) | 386 reference_output = self._driver.run_test(driver_input, self._stop_wh
en_done) |
371 test_result = self._compare_output_with_reference(reference_output,
test_output, reference_filename, expectation == '!=') | 387 test_result = self._compare_output_with_reference( |
| 388 reference_output, |
| 389 test_output, |
| 390 reference_filename, |
| 391 expectation == '!=') |
372 | 392 |
373 if (expectation == '!=' and test_result.failures) or (expectation ==
'==' and not test_result.failures): | 393 if (expectation == '!=' and test_result.failures) or (expectation ==
'==' and not test_result.failures): |
374 break | 394 break |
375 total_test_time += test_result.test_run_time | 395 total_test_time += test_result.test_run_time |
376 | 396 |
377 assert(reference_output) | 397 assert(reference_output) |
378 test_result_writer.write_test_result(self._filesystem, self._port, self.
_results_directory, self._test_name, test_output, reference_output, test_result.
failures) | 398 test_result_writer.write_test_result( |
| 399 self._filesystem, |
| 400 self._port, |
| 401 self._results_directory, |
| 402 self._test_name, |
| 403 test_output, |
| 404 reference_output, |
| 405 test_result.failures) |
379 | 406 |
380 # FIXME: We don't really deal with a mix of reftest types properly. We p
ass in a set() to reftest_type | 407 # FIXME: We don't really deal with a mix of reftest types properly. We p
ass in a set() to reftest_type |
381 # and only really handle the first of the references in the result. | 408 # and only really handle the first of the references in the result. |
382 reftest_type = list(set([reference_file[0] for reference_file in self._r
eference_files])) | 409 reftest_type = list(set([reference_file[0] for reference_file in self._r
eference_files])) |
383 return TestResult(self._test_name, test_result.failures, total_test_time
+ test_result.test_run_time, | 410 return TestResult(self._test_name, test_result.failures, total_test_time
+ test_result.test_run_time, |
384 test_result.has_stderr, reftest_type=reftest_type, pid
=test_result.pid, | 411 test_result.has_stderr, reftest_type=reftest_type, pid
=test_result.pid, |
385 references=reference_test_names) | 412 references=reference_test_names) |
386 | 413 |
387 def _compare_output_with_reference(self, reference_driver_output, actual_dri
ver_output, reference_filename, mismatch): | 414 def _compare_output_with_reference(self, reference_driver_output, actual_dri
ver_output, reference_filename, mismatch): |
388 total_test_time = reference_driver_output.test_time + actual_driver_outp
ut.test_time | 415 total_test_time = reference_driver_output.test_time + actual_driver_outp
ut.test_time |
(...skipping 10 matching lines...) Expand all Loading... |
399 if not reference_driver_output.image_hash and not actual_driver_output.i
mage_hash: | 426 if not reference_driver_output.image_hash and not actual_driver_output.i
mage_hash: |
400 failures.append(test_failures.FailureReftestNoImagesGenerated(refere
nce_filename)) | 427 failures.append(test_failures.FailureReftestNoImagesGenerated(refere
nce_filename)) |
401 elif mismatch: | 428 elif mismatch: |
402 if reference_driver_output.image_hash == actual_driver_output.image_
hash: | 429 if reference_driver_output.image_hash == actual_driver_output.image_
hash: |
403 diff, err_str = self._port.diff_image(reference_driver_output.im
age, actual_driver_output.image) | 430 diff, err_str = self._port.diff_image(reference_driver_output.im
age, actual_driver_output.image) |
404 if not diff: | 431 if not diff: |
405 failures.append(test_failures.FailureReftestMismatchDidNotOc
cur(reference_filename)) | 432 failures.append(test_failures.FailureReftestMismatchDidNotOc
cur(reference_filename)) |
406 elif err_str: | 433 elif err_str: |
407 _log.error(err_str) | 434 _log.error(err_str) |
408 else: | 435 else: |
409 _log.warning(" %s -> ref test hashes matched but diff faile
d" % self._test_name) | 436 _log.warning(' %s -> ref test hashes matched but diff faile
d' % self._test_name) |
410 | 437 |
411 elif reference_driver_output.image_hash != actual_driver_output.image_ha
sh: | 438 elif reference_driver_output.image_hash != actual_driver_output.image_ha
sh: |
412 diff, err_str = self._port.diff_image(reference_driver_output.image,
actual_driver_output.image) | 439 diff, err_str = self._port.diff_image(reference_driver_output.image,
actual_driver_output.image) |
413 if diff: | 440 if diff: |
414 failures.append(test_failures.FailureReftestMismatch(reference_f
ilename)) | 441 failures.append(test_failures.FailureReftestMismatch(reference_f
ilename)) |
415 elif err_str: | 442 elif err_str: |
416 _log.error(err_str) | 443 _log.error(err_str) |
417 else: | 444 else: |
418 _log.warning(" %s -> ref test hashes didn't match but diff pass
ed" % self._test_name) | 445 _log.warning(" %s -> ref test hashes didn't match but diff pass
ed" % self._test_name) |
419 | 446 |
420 return TestResult(self._test_name, failures, total_test_time, has_stderr
, pid=actual_driver_output.pid) | 447 return TestResult(self._test_name, failures, total_test_time, has_stderr
, pid=actual_driver_output.pid) |
OLD | NEW |