| Index: third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/port/test.py
|
| diff --git a/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/port/test.py b/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/port/test.py
|
| index b82e96ff89f8370332cabf0a02feab25cb2a7835..58bc7da6896bbd3a0b921dfe8f15a0d030630cb4 100644
|
| --- a/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/port/test.py
|
| +++ b/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/port/test.py
|
| @@ -109,6 +109,7 @@ TOTAL_CRASHES = 80
|
| UNEXPECTED_PASSES = 1
|
| UNEXPECTED_FAILURES = 26
|
|
|
| +
|
| def unit_test_list():
|
| tests = TestList()
|
| tests.add('failures/expected/crash.html', crash=True)
|
| @@ -126,25 +127,27 @@ def unit_test_list():
|
| actual_checksum='image_checksum_fail-checksum',
|
| actual_image='image_checksum_fail-png')
|
| tests.add('failures/expected/audio.html',
|
| - actual_audio=base64.b64encode('audio_fail-wav'), expected_audio='audio-wav',
|
| - actual_text=None, expected_text=None,
|
| - actual_image=None, expected_image=None,
|
| + actual_audio=base64.b64encode('audio_fail-wav'),
|
| + expected_audio='audio-wav',
|
| + actual_text=None,
|
| + expected_text=None,
|
| + actual_image=None,
|
| + expected_image=None,
|
| actual_checksum=None)
|
| tests.add('failures/expected/keyboard.html', keyboard=True)
|
| - tests.add('failures/expected/missing_check.html',
|
| - expected_image='missing_check-png')
|
| + tests.add('failures/expected/missing_check.html', expected_image='missing_check-png')
|
| tests.add('failures/expected/missing_image.html', expected_image=None)
|
| - tests.add('failures/expected/missing_audio.html', expected_audio=None,
|
| - actual_text=None, expected_text=None,
|
| - actual_image=None, expected_image=None,
|
| + tests.add('failures/expected/missing_audio.html',
|
| + expected_audio=None,
|
| + actual_text=None,
|
| + expected_text=None,
|
| + actual_image=None,
|
| + expected_image=None,
|
| actual_checksum=None)
|
| tests.add('failures/expected/missing_text.html', expected_text=None)
|
| - tests.add('failures/expected/newlines_leading.html',
|
| - expected_text="\nfoo\n", actual_text="foo\n")
|
| - tests.add('failures/expected/newlines_trailing.html',
|
| - expected_text="foo\n\n", actual_text="foo\n")
|
| - tests.add('failures/expected/newlines_with_excess_CR.html',
|
| - expected_text="foo\r\r\r\n", actual_text="foo\n")
|
| + tests.add('failures/expected/newlines_leading.html', expected_text="\nfoo\n", actual_text="foo\n")
|
| + tests.add('failures/expected/newlines_trailing.html', expected_text="foo\n\n", actual_text="foo\n")
|
| + tests.add('failures/expected/newlines_with_excess_CR.html', expected_text="foo\r\r\r\n", actual_text="foo\n")
|
| tests.add('failures/expected/text.html', actual_text='text_fail-png')
|
| tests.add('failures/expected/crash_then_text.html')
|
| tests.add('failures/expected/skip_text.html', actual_text='text diff')
|
| @@ -152,19 +155,19 @@ def unit_test_list():
|
| tests.add('failures/unexpected/missing_text.html', expected_text=None)
|
| tests.add('failures/unexpected/missing_check.html', expected_image='missing-check-png')
|
| tests.add('failures/unexpected/missing_image.html', expected_image=None)
|
| - tests.add('failures/unexpected/missing_render_tree_dump.html', actual_text="""layer at (0,0) size 800x600
|
| + tests.add('failures/unexpected/missing_render_tree_dump.html',
|
| + actual_text="""layer at (0,0) size 800x600
|
| RenderView at (0,0) size 800x600
|
| layer at (0,0) size 800x34
|
| RenderBlock {HTML} at (0,0) size 800x34
|
| RenderBody {BODY} at (8,8) size 784x18
|
| RenderText {#text} at (0,0) size 133x18
|
| text run at (0,0) width 133: "This is an image test!"
|
| -""", expected_text=None)
|
| +""",
|
| + expected_text=None)
|
| tests.add('failures/unexpected/crash.html', crash=True)
|
| - tests.add('failures/unexpected/crash-with-stderr.html', crash=True,
|
| - error="mock-std-error-output")
|
| - tests.add('failures/unexpected/web-process-crash-with-stderr.html', web_process_crash=True,
|
| - error="mock-std-error-output")
|
| + tests.add('failures/unexpected/crash-with-stderr.html', crash=True, error="mock-std-error-output")
|
| + tests.add('failures/unexpected/web-process-crash-with-stderr.html', web_process_crash=True, error="mock-std-error-output")
|
| tests.add('failures/unexpected/pass.html')
|
| tests.add('failures/unexpected/text-checksum.html',
|
| actual_text='text-checksum_fail-txt',
|
| @@ -173,8 +176,7 @@ layer at (0,0) size 800x34
|
| actual_text='text-image-checksum_fail-txt',
|
| actual_image='text-image-checksum_fail-pngtEXtchecksum\x00checksum_fail',
|
| actual_checksum='text-image-checksum_fail-checksum')
|
| - tests.add('failures/unexpected/checksum-with-matching-image.html',
|
| - actual_checksum='text-image-checksum_fail-checksum')
|
| + tests.add('failures/unexpected/checksum-with-matching-image.html', actual_checksum='text-image-checksum_fail-checksum')
|
| tests.add('failures/unexpected/skip_pass.html')
|
| tests.add('failures/unexpected/text.html', actual_text='text_fail-txt')
|
| tests.add('failures/unexpected/text_then_crash.html')
|
| @@ -187,13 +189,15 @@ layer at (0,0) size 800x34
|
| tests.add('passes/error.html', error='stuff going to stderr')
|
| tests.add('passes/image.html')
|
| tests.add('passes/audio.html',
|
| - actual_audio=base64.b64encode('audio-wav'), expected_audio='audio-wav',
|
| - actual_text=None, expected_text=None,
|
| - actual_image=None, expected_image=None,
|
| + actual_audio=base64.b64encode('audio-wav'),
|
| + expected_audio='audio-wav',
|
| + actual_text=None,
|
| + expected_text=None,
|
| + actual_image=None,
|
| + expected_image=None,
|
| actual_checksum=None)
|
| tests.add('passes/platform_image.html')
|
| - tests.add('passes/checksum_in_image.html',
|
| - expected_image='tEXtchecksum\x00checksum_in_image-checksum')
|
| + tests.add('passes/checksum_in_image.html', expected_image='tEXtchecksum\x00checksum_in_image-checksum')
|
| tests.add('passes/skipped/skip.html')
|
|
|
| # Note that here the checksums don't match but the images do, so this test passes "unexpectedly".
|
| @@ -202,8 +206,7 @@ layer at (0,0) size 800x34
|
|
|
| # Text output files contain "\r\n" on Windows. This may be
|
| # helpfully filtered to "\r\r\n" by our Python/Cygwin tooling.
|
| - tests.add('passes/text.html',
|
| - expected_text='\nfoo\n\n', actual_text='\nfoo\r\n\r\r\n')
|
| + tests.add('passes/text.html', expected_text='\nfoo\n\n', actual_text='\nfoo\r\n\r\r\n')
|
|
|
| # For reftests.
|
| tests.add_reftest('passes/reftest.html', 'passes/reftest-expected.html', same_image=True)
|
| @@ -217,7 +220,10 @@ layer at (0,0) size 800x34
|
| tests.add_reftest('passes/phpreftest.php', 'passes/phpreftest-expected-mismatch.svg', same_image=False)
|
| tests.add_reftest('failures/expected/reftest.html', 'failures/expected/reftest-expected.html', same_image=False)
|
| tests.add_reftest('failures/expected/mismatch.html', 'failures/expected/mismatch-expected-mismatch.html', same_image=True)
|
| - tests.add_reftest('failures/unexpected/crash-reftest.html', 'failures/unexpected/crash-reftest-expected.html', same_image=True, crash=True)
|
| + tests.add_reftest('failures/unexpected/crash-reftest.html',
|
| + 'failures/unexpected/crash-reftest-expected.html',
|
| + same_image=True,
|
| + crash=True)
|
| tests.add_reftest('failures/unexpected/reftest.html', 'failures/unexpected/reftest-expected.html', same_image=False)
|
| tests.add_reftest('failures/unexpected/mismatch.html', 'failures/unexpected/mismatch-expected-mismatch.html', same_image=True)
|
| tests.add('failures/unexpected/reftest-nopixel.html', actual_checksum=None, actual_image=None, is_reftest=True)
|
| @@ -253,11 +259,11 @@ layer at (0,0) size 800x34
|
|
|
| # For testing --pixel-test-directories.
|
| tests.add('failures/unexpected/pixeldir/image_in_pixeldir.html',
|
| - actual_image='image_in_pixeldir-pngtEXtchecksum\x00checksum_fail',
|
| - expected_image='image_in_pixeldir-pngtEXtchecksum\x00checksum-png')
|
| + actual_image='image_in_pixeldir-pngtEXtchecksum\x00checksum_fail',
|
| + expected_image='image_in_pixeldir-pngtEXtchecksum\x00checksum-png')
|
| tests.add('failures/unexpected/image_not_in_pixeldir.html',
|
| - actual_image='image_not_in_pixeldir-pngtEXtchecksum\x00checksum_fail',
|
| - expected_image='image_not_in_pixeldir-pngtEXtchecksum\x00checksum-png')
|
| + actual_image='image_not_in_pixeldir-pngtEXtchecksum\x00checksum_fail',
|
| + expected_image='image_not_in_pixeldir-pngtEXtchecksum\x00checksum-png')
|
|
|
| # For testing that virtual test suites don't expand names containing themselves
|
| # See webkit.org/b/97925 and base_unittest.PortTest.test_tests().
|
| @@ -266,7 +272,6 @@ layer at (0,0) size 800x34
|
|
|
| return tests
|
|
|
| -
|
| # Here we use a non-standard location for the layout tests, to ensure that
|
| # this works. The path contains a '.' in the name because we've seen bugs
|
| # related to this before.
|
| @@ -353,7 +358,8 @@ Bug(test) passes/text.html [ Pass ]
|
| add_file(test, '-expected.txt', test.expected_text)
|
| add_file(test, '-expected.png', test.expected_image)
|
|
|
| - filesystem.write_text_file(filesystem.join(LAYOUT_TEST_DIR, 'virtual', 'virtual_passes', 'passes', 'args-expected.txt'), 'args-txt --virtual-arg')
|
| + filesystem.write_text_file(
|
| + filesystem.join(LAYOUT_TEST_DIR, 'virtual', 'virtual_passes', 'passes', 'args-expected.txt'), 'args-txt --virtual-arg')
|
| # Clear the list of written files so that we can watch what happens during testing.
|
| filesystem.clear_written_files()
|
|
|
| @@ -369,20 +375,17 @@ class TestPort(Port):
|
| # The list should be sorted so that a later platform will reuse
|
| # an earlier platform's baselines if they are the same (e.g.,
|
| # 'mac10.10' should precede 'mac10.9').
|
| - ALL_BASELINE_VARIANTS = (
|
| - 'test-linux-trusty', 'test-linux-precise', 'test-linux-x86',
|
| - 'test-mac-mac10.11', 'test-mac-mac10.10',
|
| - 'test-win-win10', 'test-win-win7'
|
| - )
|
| + ALL_BASELINE_VARIANTS = ('test-linux-trusty', 'test-linux-precise', 'test-linux-x86', 'test-mac-mac10.11', 'test-mac-mac10.10',
|
| + 'test-win-win10', 'test-win-win7')
|
|
|
| FALLBACK_PATHS = {
|
| - 'win7': ['test-win-win7', 'test-win-win10'],
|
| - 'win10': ['test-win-win10'],
|
| - 'mac10.10': ['test-mac-mac10.10', 'test-mac-mac10.11'],
|
| - 'mac10.11': ['test-mac-mac10.11'],
|
| - 'trusty': ['test-linux-trusty', 'test-win-win7'],
|
| - 'precise': ['test-linux-precise', 'test-linux-trusty', 'test-win-win7'],
|
| - 'linux32': ['test-linux-x86', 'test-linux-precise', 'test-linux-trusty', 'test-win-win7'],
|
| + 'win7': ['test-win-win7', 'test-win-win10'],
|
| + 'win10': ['test-win-win10'],
|
| + 'mac10.10': ['test-mac-mac10.10', 'test-mac-mac10.11'],
|
| + 'mac10.11': ['test-mac-mac10.11'],
|
| + 'trusty': ['test-linux-trusty', 'test-win-win7'],
|
| + 'precise': ['test-linux-precise', 'test-linux-trusty', 'test-win-win7'],
|
| + 'linux32': ['test-linux-x86', 'test-linux-precise', 'test-linux-trusty', 'test-win-win7'],
|
| }
|
|
|
| @classmethod
|
| @@ -471,9 +474,7 @@ class TestPort(Port):
|
| return '/test.checkout'
|
|
|
| def _skipped_tests_for_unsupported_features(self, test_list):
|
| - return set(['failures/expected/skip_text.html',
|
| - 'failures/unexpected/skip_pass.html',
|
| - 'virtual/skipped/failures/expected'])
|
| + return set(['failures/expected/skip_text.html', 'failures/unexpected/skip_pass.html', 'virtual/skipped/failures/expected'])
|
|
|
| def name(self):
|
| return self._name
|
| @@ -530,38 +531,32 @@ class TestPort(Port):
|
| test_configurations = []
|
| for version, architecture in self._all_systems():
|
| for build_type in self._all_build_types():
|
| - test_configurations.append(TestConfiguration(
|
| - version=version,
|
| - architecture=architecture,
|
| - build_type=build_type))
|
| + test_configurations.append(TestConfiguration(version=version, architecture=architecture, build_type=build_type))
|
| return test_configurations
|
|
|
| def _all_systems(self):
|
| - return (('mac10.10', 'x86'),
|
| - ('mac10.11', 'x86'),
|
| - ('win7', 'x86'),
|
| - ('win10', 'x86'),
|
| - ('linux32', 'x86'),
|
| - ('precise', 'x86_64'),
|
| - ('trusty', 'x86_64'))
|
| + return (('mac10.10', 'x86'), ('mac10.11', 'x86'), ('win7', 'x86'), ('win10', 'x86'), ('linux32', 'x86'),
|
| + ('precise', 'x86_64'), ('trusty', 'x86_64'))
|
|
|
| def _all_build_types(self):
|
| return ('debug', 'release')
|
|
|
| def configuration_specifier_macros(self):
|
| """To avoid surprises when introducing new macros, these are intentionally fixed in time."""
|
| - return {
|
| - 'mac': ['mac10.10', 'mac10.11'],
|
| - 'win': ['win7', 'win10'],
|
| - 'linux': ['linux32', 'precise', 'trusty']
|
| - }
|
| + return {'mac': ['mac10.10', 'mac10.11'], 'win': ['win7', 'win10'], 'linux': ['linux32', 'precise', 'trusty']}
|
|
|
| def virtual_test_suites(self):
|
| return [
|
| - VirtualTestSuite(prefix='virtual_passes', base='passes', args=['--virtual-arg']),
|
| - VirtualTestSuite(prefix='skipped', base='failures/expected', args=['--virtual-arg2']),
|
| - VirtualTestSuite(prefix='references_use_default_args', base='passes/reftest.html',
|
| - args=['--virtual-arg'], references_use_default_args=True),
|
| + VirtualTestSuite(prefix='virtual_passes',
|
| + base='passes',
|
| + args=['--virtual-arg']),
|
| + VirtualTestSuite(prefix='skipped',
|
| + base='failures/expected',
|
| + args=['--virtual-arg2']),
|
| + VirtualTestSuite(prefix='references_use_default_args',
|
| + base='passes/reftest.html',
|
| + args=['--virtual-arg'],
|
| + references_use_default_args=True),
|
| ]
|
|
|
|
|
| @@ -576,7 +571,8 @@ class TestDriver(Driver):
|
|
|
| def cmd_line(self, pixel_tests, per_test_args):
|
| pixel_tests_flag = '-p' if pixel_tests else ''
|
| - return [self._port._path_to_driver()] + [pixel_tests_flag] + self._port.get_option('additional_driver_flag', []) + per_test_args
|
| + return [self._port._path_to_driver()] + [pixel_tests_flag] + self._port.get_option('additional_driver_flag',
|
| + []) + per_test_args
|
|
|
| def run_test(self, driver_input, stop_when_done):
|
| if not self.started:
|
| @@ -654,11 +650,19 @@ class TestDriver(Driver):
|
| image = None
|
| else:
|
| image = test.actual_image
|
| - return DriverOutput(actual_text, image, test.actual_checksum, audio,
|
| - crash=(crash or web_process_crash), crashed_process_name=crashed_process_name,
|
| - crashed_pid=crashed_pid, crash_log=crash_log,
|
| - test_time=time.time() - start_time, timeout=test.timeout, error=test.error, pid=self.pid,
|
| - leak=test.leak)
|
| + return DriverOutput(actual_text,
|
| + image,
|
| + test.actual_checksum,
|
| + audio,
|
| + crash=(crash or web_process_crash),
|
| + crashed_process_name=crashed_process_name,
|
| + crashed_pid=crashed_pid,
|
| + crash_log=crash_log,
|
| + test_time=time.time() - start_time,
|
| + timeout=test.timeout,
|
| + error=test.error,
|
| + pid=self.pid,
|
| + leak=test.leak)
|
|
|
| def stop(self):
|
| self.started = False
|
|
|