Index: third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py |
diff --git a/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py b/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py |
index 9be70ec5698bd5db165b1b68220b1c62e762bada..8ba23ec137b40a0e74ebd2b02cf5334d4f951590 100644 |
--- a/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py |
+++ b/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py |
@@ -70,10 +70,7 @@ def parse_args(extra_args=None, tests_included=False, new_results=False, print_n |
args.extend(extra_args) |
if not tests_included: |
# We use the glob to test that globbing works. |
- args.extend(['passes', |
- 'http/tests', |
- 'websocket/tests', |
- 'failures/expected/*']) |
+ args.extend(['passes', 'http/tests', 'websocket/tests', 'failures/expected/*']) |
return run_webkit_tests.parse_args(args) |
@@ -88,15 +85,15 @@ def passing_run(extra_args=None, port_obj=None, tests_included=False, host=None, |
logging_stream = StringIO.StringIO() |
stdout = StringIO.StringIO() |
- run_details = run_webkit_tests.run(port_obj, options, parsed_args, |
- logging_stream=logging_stream, stdout=stdout) |
+ run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream, stdout=stdout) |
return run_details.exit_code == 0 |
def logging_run(extra_args=None, port_obj=None, tests_included=False, host=None, new_results=False, shared_port=True): |
options, parsed_args = parse_args(extra_args=extra_args, |
tests_included=tests_included, |
- print_nothing=False, new_results=new_results) |
+ print_nothing=False, |
+ new_results=new_results) |
host = host or MockHost() |
if not port_obj: |
port_obj = host.port_factory.get(port_name=options.platform, options=options) |
@@ -113,8 +110,7 @@ def run_and_capture(port_obj, options, parsed_args, shared_port=True): |
oc.capture_output() |
logging_stream = StringIO.StringIO() |
stdout = StringIO.StringIO() |
- run_details = run_webkit_tests.run(port_obj, options, parsed_args, |
- logging_stream=logging_stream, stdout=stdout) |
+ run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream, stdout=stdout) |
finally: |
oc.restore_output() |
return (run_details, logging_stream) |
@@ -151,8 +147,7 @@ def get_test_results(args, host=None, port_obj=None): |
logging_stream = StringIO.StringIO() |
stdout = StringIO.StringIO() |
try: |
- run_details = run_webkit_tests.run(port_obj, options, parsed_args, |
- logging_stream=logging_stream, stdout=stdout) |
+ run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream, stdout=stdout) |
finally: |
oc.restore_output() |
@@ -208,15 +203,14 @@ class RunTest(unittest.TestCase, StreamTestingMixin): |
self.assertEqual(details.exit_code, test.UNEXPECTED_PASSES) |
self.assertEqual(details.all_retry_results[0].total, test.UNEXPECTED_PASSES) |
- expected_tests = details.initial_results.total - details.initial_results.expected_skips - len(details.initial_results.unexpected_results_by_name) - test.TOTAL_CRASHES |
+ expected_tests = details.initial_results.total - details.initial_results.expected_skips - len( |
+ details.initial_results.unexpected_results_by_name) - test.TOTAL_CRASHES |
expected_summary_str = '' |
if details.initial_results.expected_failures > 0: |
- expected_summary_str = " (%d passed, %d didn't)" % (expected_tests - details.initial_results.expected_failures, details.initial_results.expected_failures) |
+ expected_summary_str = " (%d passed, %d didn't)" % (expected_tests - details.initial_results.expected_failures, |
+ details.initial_results.expected_failures) |
one_line_summary = "%d tests ran as expected%s, %d didn't (%d didn't run):\n" % ( |
- expected_tests, |
- expected_summary_str, |
- len(details.initial_results.unexpected_results_by_name), |
- test.TOTAL_CRASHES) |
+ expected_tests, expected_summary_str, len(details.initial_results.unexpected_results_by_name), test.TOTAL_CRASHES) |
self.assertTrue(one_line_summary in logging_stream.buflist) |
# Ensure the results were summarized properly. |
@@ -254,15 +248,15 @@ class RunTest(unittest.TestCase, StreamTestingMixin): |
def test_child_processes_2(self): |
if self.should_test_processes: |
- _, regular_output, _ = logging_run( |
- ['--debug-rwt-logging', '--child-processes', '2'], shared_port=False) |
+ _, regular_output, _ = logging_run(['--debug-rwt-logging', '--child-processes', '2'], shared_port=False) |
self.assertTrue(any(['Running 2 ' in line for line in regular_output.buflist])) |
def test_child_processes_min(self): |
if self.should_test_processes: |
_, regular_output, _ = logging_run( |
['--debug-rwt-logging', '--child-processes', '2', '-i', 'passes/virtual_passes', 'passes'], |
- tests_included=True, shared_port=False) |
+ tests_included=True, |
+ shared_port=False) |
self.assertTrue(any(['Running 1 ' in line for line in regular_output.buflist])) |
def test_dryrun(self): |
@@ -285,12 +279,18 @@ class RunTest(unittest.TestCase, StreamTestingMixin): |
# Exceptions raised in a separate process are re-packaged into |
# WorkerExceptions (a subclass of BaseException), which have a string capture of the stack which can |
# be printed, but don't display properly in the unit test exception handlers. |
- self.assertRaises(BaseException, logging_run, |
- ['failures/expected/exception.html', '--child-processes', '1'], tests_included=True) |
+ self.assertRaises(BaseException, |
+ logging_run, |
+ ['failures/expected/exception.html', '--child-processes', '1'], |
+ tests_included=True) |
if self.should_test_processes: |
- self.assertRaises(BaseException, logging_run, |
- ['--child-processes', '2', '--skipped=ignore', 'failures/expected/exception.html', 'passes/text.html'], tests_included=True, shared_port=False) |
+ self.assertRaises( |
+ BaseException, |
+ logging_run, |
+ ['--child-processes', '2', '--skipped=ignore', 'failures/expected/exception.html', 'passes/text.html'], |
+ tests_included=True, |
+ shared_port=False) |
def test_device_failure(self): |
# Test that we handle a device going offline during a test properly. |
@@ -311,7 +311,10 @@ class RunTest(unittest.TestCase, StreamTestingMixin): |
self.assertEqual(details.exit_code, test_run_results.INTERRUPTED_EXIT_STATUS) |
if self.should_test_processes: |
- _, regular_output, _ = logging_run(['failures/expected/keyboard.html', 'passes/text.html', '--child-processes', '2', '--skipped=ignore'], tests_included=True, shared_port=False) |
+ _, regular_output, _ = logging_run( |
+ ['failures/expected/keyboard.html', 'passes/text.html', '--child-processes', '2', '--skipped=ignore'], |
+ tests_included=True, |
+ shared_port=False) |
self.assertTrue(any(['Interrupted, exiting' in line for line in regular_output.buflist])) |
def test_no_tests_found(self): |
@@ -330,9 +333,12 @@ class RunTest(unittest.TestCase, StreamTestingMixin): |
self.assertContains(err, 'No tests to run.\n') |
def test_natural_order(self): |
- tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html'] |
+ tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', |
+ 'passes/args.html'] |
tests_run = get_tests_run(['--order=natural'] + tests_to_run) |
- self.assertEqual(['failures/expected/missing_text.html', 'failures/expected/text.html', 'passes/args.html', 'passes/audio.html'], tests_run) |
+ self.assertEqual( |
+ ['failures/expected/missing_text.html', 'failures/expected/text.html', 'passes/args.html', 'passes/audio.html' |
+ ], tests_run) |
def test_natural_order_test_specified_multiple_times(self): |
tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html'] |
@@ -340,12 +346,14 @@ class RunTest(unittest.TestCase, StreamTestingMixin): |
self.assertEqual(['passes/args.html', 'passes/args.html', 'passes/audio.html', 'passes/audio.html'], tests_run) |
def test_random_order(self): |
- tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html'] |
+ tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', |
+ 'passes/args.html'] |
tests_run = get_tests_run(['--order=random'] + tests_to_run) |
self.assertEqual(sorted(tests_to_run), sorted(tests_run)) |
def test_random_daily_seed_order(self): |
- tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html'] |
+ tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', |
+ 'passes/args.html'] |
tests_run = get_tests_run(['--order=random-seeded'] + tests_to_run) |
self.assertEqual(sorted(tests_to_run), sorted(tests_run)) |
@@ -356,7 +364,8 @@ class RunTest(unittest.TestCase, StreamTestingMixin): |
self.assertEqual(tests_run.count('passes/args.html'), 2) |
def test_no_order(self): |
- tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html'] |
+ tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', |
+ 'passes/args.html'] |
tests_run = get_tests_run(['--order=none'] + tests_to_run) |
self.assertEqual(tests_to_run, tests_run) |
@@ -368,7 +377,8 @@ class RunTest(unittest.TestCase, StreamTestingMixin): |
def test_no_order_with_directory_entries_in_natural_order(self): |
tests_to_run = ['http/tests/ssl', 'perf/foo', 'http/tests/passes'] |
tests_run = get_tests_run(['--order=none'] + tests_to_run) |
- self.assertEqual(tests_run, ['http/tests/ssl/text.html', 'perf/foo/test.html', 'http/tests/passes/image.html', 'http/tests/passes/text.html']) |
+ self.assertEqual(tests_run, ['http/tests/ssl/text.html', 'perf/foo/test.html', 'http/tests/passes/image.html', |
+ 'http/tests/passes/text.html']) |
def test_repeat_each(self): |
tests_to_run = ['passes/image.html', 'passes/text.html'] |
@@ -387,8 +397,7 @@ class RunTest(unittest.TestCase, StreamTestingMixin): |
num_tests_run_by_default = len(tests_run) |
# Check that nothing changes when we specify skipped=default. |
- self.assertEqual(len(get_tests_run(['--skipped=default', 'passes'])), |
- num_tests_run_by_default) |
+ self.assertEqual(len(get_tests_run(['--skipped=default', 'passes'])), num_tests_run_by_default) |
# Now check that we run one more test (the skipped one). |
tests_run = get_tests_run(['--skipped=ignore', 'passes']) |
@@ -412,7 +421,8 @@ class RunTest(unittest.TestCase, StreamTestingMixin): |
host = MockHost() |
_, err, _ = logging_run( |
['--iterations', '2', '--repeat-each', '4', '--debug-rwt-logging', 'passes/text.html', 'failures/expected/text.html'], |
- tests_included=True, host=host) |
+ tests_included=True, |
+ host=host) |
self.assertContains(err, "All 16 tests ran as expected (8 passed, 8 didn't).\n") |
def test_run_chunk(self): |
@@ -467,8 +477,9 @@ class RunTest(unittest.TestCase, StreamTestingMixin): |
def test_reftest_crash_log_is_saved(self): |
host = MockHost() |
self.assertTrue(logging_run(['failures/unexpected/crash-reftest.html'], tests_included=True, host=host)) |
- self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-results/failures/unexpected/crash-reftest-crash-log.txt'), |
- 'reftest crash log') |
+ self.assertEqual( |
+ host.filesystem.read_text_file('/tmp/layout-test-results/failures/unexpected/crash-reftest-crash-log.txt'), |
+ 'reftest crash log') |
def test_test_list(self): |
host = MockHost() |
@@ -523,16 +534,18 @@ class RunTest(unittest.TestCase, StreamTestingMixin): |
# Test that we update expectations in place. If the expectation |
# is missing, update the expected generic location. |
host = MockHost() |
- details, err, _ = logging_run(['--no-show-results', '--retry-failures', |
- 'failures/expected/missing_image.html', |
- 'failures/unexpected/missing_text.html', |
- 'failures/unexpected/text-image-checksum.html'], |
- tests_included=True, host=host) |
+ details, err, _ = logging_run( |
+ ['--no-show-results', '--retry-failures', 'failures/expected/missing_image.html', |
+ 'failures/unexpected/missing_text.html', 'failures/unexpected/text-image-checksum.html'], |
+ tests_included=True, |
+ host=host) |
file_list = host.filesystem.written_files.keys() |
self.assertEqual(details.exit_code, 2) |
json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json') |
- self.assertTrue(json_string.find('"text-image-checksum.html":{"expected":"PASS","actual":"IMAGE+TEXT","is_unexpected":true') != -1) |
- self.assertTrue(json_string.find('"missing_text.html":{"expected":"PASS","is_missing_text":true,"actual":"MISSING","is_unexpected":true') != -1) |
+ self.assertTrue(json_string.find('"text-image-checksum.html":{"expected":"PASS","actual":"IMAGE+TEXT","is_unexpected":true') |
+ != -1) |
+ self.assertTrue(json_string.find( |
+ '"missing_text.html":{"expected":"PASS","is_missing_text":true,"actual":"MISSING","is_unexpected":true') != -1) |
self.assertTrue(json_string.find('"num_regressions":2') != -1) |
self.assertTrue(json_string.find('"num_flaky":0') != -1) |
@@ -557,11 +570,9 @@ class RunTest(unittest.TestCase, StreamTestingMixin): |
def test_pixel_test_directories(self): |
host = MockHost() |
- |
"""Both tests have failing checksum. We include only the first in pixel tests so only that should fail.""" |
args = ['--pixel-tests', '--retry-failures', '--pixel-test-directory', 'failures/unexpected/pixeldir', |
- 'failures/unexpected/pixeldir/image_in_pixeldir.html', |
- 'failures/unexpected/image_not_in_pixeldir.html'] |
+ 'failures/unexpected/pixeldir/image_in_pixeldir.html', 'failures/unexpected/image_not_in_pixeldir.html'] |
details, err, _ = logging_run(extra_args=args, host=host, tests_included=True) |
self.assertEqual(details.exit_code, 1) |
@@ -572,18 +583,24 @@ class RunTest(unittest.TestCase, StreamTestingMixin): |
def test_crash_with_stderr(self): |
host = MockHost() |
_, regular_output, _ = logging_run(['failures/unexpected/crash-with-stderr.html'], tests_included=True, host=host) |
- self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('{"crash-with-stderr.html":{"expected":"PASS","actual":"CRASH","has_stderr":true,"is_unexpected":true') != -1) |
+ self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find( |
+ '{"crash-with-stderr.html":{"expected":"PASS","actual":"CRASH","has_stderr":true,"is_unexpected":true') != -1) |
def test_no_image_failure_with_image_diff(self): |
host = MockHost() |
- _, regular_output, _ = logging_run(['failures/unexpected/checksum-with-matching-image.html'], tests_included=True, host=host) |
- self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('"num_regressions":0') != -1) |
+ _, regular_output, _ = logging_run( |
+ ['failures/unexpected/checksum-with-matching-image.html'], |
+ tests_included=True, |
+ host=host) |
+ self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('"num_regressions":0') != |
+ -1) |
def test_exit_after_n_failures_upload(self): |
host = MockHost() |
details, regular_output, user = logging_run( |
- ['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'], |
- tests_included=True, host=host) |
+ ['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'], |
+ tests_included=True, |
+ host=host) |
# By returning False, we know that the incremental results were generated and then deleted. |
self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/incremental_results.json')) |
@@ -602,7 +619,8 @@ class RunTest(unittest.TestCase, StreamTestingMixin): |
def test_exit_after_n_failures(self): |
# Unexpected failures should result in tests stopping. |
- tests_run = get_tests_run(['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1']) |
+ tests_run = get_tests_run(['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', |
+ '1']) |
self.assertEqual(['failures/unexpected/text-image-checksum.html'], tests_run) |
# But we'll keep going for expected ones. |
@@ -615,7 +633,8 @@ class RunTest(unittest.TestCase, StreamTestingMixin): |
self.assertEqual(['failures/unexpected/crash.html'], tests_run) |
# Same with timeouts. |
- tests_run = get_tests_run(['failures/unexpected/timeout.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1']) |
+ tests_run = get_tests_run(['failures/unexpected/timeout.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1' |
+ ]) |
self.assertEqual(['failures/unexpected/timeout.html'], tests_run) |
# But we'll keep going for expected ones. |
@@ -650,7 +669,10 @@ class RunTest(unittest.TestCase, StreamTestingMixin): |
def test_retrying_default_value(self): |
host = MockHost() |
- details, err, _ = logging_run(['--debug-rwt-logging', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host) |
+ details, err, _ = logging_run( |
+ ['--debug-rwt-logging', 'failures/unexpected/text-image-checksum.html'], |
+ tests_included=True, |
+ host=host) |
self.assertEqual(details.exit_code, 1) |
self.assertFalse('Retrying' in err.getvalue()) |
@@ -688,7 +710,10 @@ class RunTest(unittest.TestCase, StreamTestingMixin): |
# Now we test that --clobber-old-results does remove the old entries and the old retries, |
# and that we don't retry again. |
host = MockHost() |
- details, err, _ = logging_run(['--no-retry-failures', '--clobber-old-results', 'failures/flaky'], tests_included=True, host=host) |
+ details, err, _ = logging_run( |
+ ['--no-retry-failures', '--clobber-old-results', 'failures/flaky'], |
+ tests_included=True, |
+ host=host) |
self.assertEqual(details.exit_code, 1) |
self.assertTrue('Clobbering old results' in err.getvalue()) |
self.assertTrue('flaky/text.html' in err.getvalue()) |
@@ -712,32 +737,49 @@ class RunTest(unittest.TestCase, StreamTestingMixin): |
def test_retrying_force_pixel_tests(self): |
host = MockHost() |
- details, err, _ = logging_run(['--no-pixel-tests', '--retry-failures', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host) |
+ details, err, _ = logging_run( |
+ ['--no-pixel-tests', '--retry-failures', 'failures/unexpected/text-image-checksum.html'], |
+ tests_included=True, |
+ host=host) |
self.assertEqual(details.exit_code, 1) |
self.assertTrue('Retrying' in err.getvalue()) |
self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt')) |
self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.png')) |
- self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retry_1/failures/unexpected/text-image-checksum-actual.txt')) |
- self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retry_2/failures/unexpected/text-image-checksum-actual.txt')) |
- self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retry_3/failures/unexpected/text-image-checksum-actual.txt')) |
- self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retry_1/failures/unexpected/text-image-checksum-actual.png')) |
- self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retry_2/failures/unexpected/text-image-checksum-actual.png')) |
- self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retry_3/failures/unexpected/text-image-checksum-actual.png')) |
+ self.assertTrue(host.filesystem.exists( |
+ '/tmp/layout-test-results/retry_1/failures/unexpected/text-image-checksum-actual.txt')) |
+ self.assertTrue(host.filesystem.exists( |
+ '/tmp/layout-test-results/retry_2/failures/unexpected/text-image-checksum-actual.txt')) |
+ self.assertTrue(host.filesystem.exists( |
+ '/tmp/layout-test-results/retry_3/failures/unexpected/text-image-checksum-actual.txt')) |
+ self.assertTrue(host.filesystem.exists( |
+ '/tmp/layout-test-results/retry_1/failures/unexpected/text-image-checksum-actual.png')) |
+ self.assertTrue(host.filesystem.exists( |
+ '/tmp/layout-test-results/retry_2/failures/unexpected/text-image-checksum-actual.png')) |
+ self.assertTrue(host.filesystem.exists( |
+ '/tmp/layout-test-results/retry_3/failures/unexpected/text-image-checksum-actual.png')) |
json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json') |
json = parse_full_results(json_string) |
self.assertEqual(json["tests"]["failures"]["unexpected"]["text-image-checksum.html"], |
- {"expected": "PASS", "actual": "TEXT IMAGE+TEXT IMAGE+TEXT IMAGE+TEXT", "is_unexpected": True}) |
+ {"expected": "PASS", |
+ "actual": "TEXT IMAGE+TEXT IMAGE+TEXT IMAGE+TEXT", |
+ "is_unexpected": True}) |
self.assertFalse(json["pixel_tests_enabled"]) |
self.assertEqual(details.enabled_pixel_tests_in_retry, True) |
def test_retrying_uses_retry_directories(self): |
host = MockHost() |
- details, err, _ = logging_run(['--debug-rwt-logging', '--retry-failures', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host) |
+ details, err, _ = logging_run( |
+ ['--debug-rwt-logging', '--retry-failures', 'failures/unexpected/text-image-checksum.html'], |
+ tests_included=True, |
+ host=host) |
self.assertEqual(details.exit_code, 1) |
self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt')) |
- self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retry_1/failures/unexpected/text-image-checksum-actual.txt')) |
- self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retry_2/failures/unexpected/text-image-checksum-actual.txt')) |
- self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retry_3/failures/unexpected/text-image-checksum-actual.txt')) |
+ self.assertTrue(host.filesystem.exists( |
+ '/tmp/layout-test-results/retry_1/failures/unexpected/text-image-checksum-actual.txt')) |
+ self.assertTrue(host.filesystem.exists( |
+ '/tmp/layout-test-results/retry_2/failures/unexpected/text-image-checksum-actual.txt')) |
+ self.assertTrue(host.filesystem.exists( |
+ '/tmp/layout-test-results/retry_3/failures/unexpected/text-image-checksum-actual.txt')) |
def test_run_order__inline(self): |
# These next tests test that we run the tests in ascending alphabetical |
@@ -750,8 +792,8 @@ class RunTest(unittest.TestCase, StreamTestingMixin): |
self.assertEqual(tests_run, sorted(tests_run)) |
def test_virtual(self): |
- self.assertTrue(passing_run(['passes/text.html', 'passes/args.html', |
- 'virtual/passes/text.html', 'virtual/passes/args.html'])) |
+ self.assertTrue(passing_run(['passes/text.html', 'passes/args.html', 'virtual/passes/text.html', 'virtual/passes/args.html' |
+ ])) |
def test_reftest_run(self): |
tests_run = get_tests_run(['passes/reftest.html']) |
@@ -790,8 +832,7 @@ class RunTest(unittest.TestCase, StreamTestingMixin): |
def test_reftest_with_virtual_reference(self): |
_, err, _ = logging_run(['--details', 'virtual/virtual_passes/passes/reftest.html'], tests_included=True) |
self.assertTrue('ref: virtual/virtual_passes/passes/reftest-expected.html' in err.getvalue()) |
- self.assertTrue( |
- re.search('args: --virtual-arg\s*reference_args: --virtual-arg\s*ref:', err.getvalue())) |
+ self.assertTrue(re.search('args: --virtual-arg\s*reference_args: --virtual-arg\s*ref:', err.getvalue())) |
def test_reftest_virtual_references_use_default_args(self): |
test_name = 'virtual/references_use_default_args/passes/reftest.html' |
@@ -813,8 +854,10 @@ class RunTest(unittest.TestCase, StreamTestingMixin): |
def test_additional_expectations(self): |
host = MockHost() |
host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) failures/unexpected/mismatch.html [ Failure ]\n') |
- self.assertTrue(passing_run(['--additional-expectations', '/tmp/overrides.txt', 'failures/unexpected/mismatch.html'], |
- tests_included=True, host=host)) |
+ self.assertTrue(passing_run( |
+ ['--additional-expectations', '/tmp/overrides.txt', 'failures/unexpected/mismatch.html'], |
+ tests_included=True, |
+ host=host)) |
@staticmethod |
def has_test_of_type(tests, type): |
@@ -862,7 +905,9 @@ class RunTest(unittest.TestCase, StreamTestingMixin): |
port_name = 'mac-mac10.11' |
out = StringIO.StringIO() |
err = StringIO.StringIO() |
- self.assertEqual(run_webkit_tests.main(['--platform', port_name, 'fast/harness/results.html'], out, err), test_run_results.UNEXPECTED_ERROR_EXIT_STATUS) |
+ self.assertEqual( |
+ run_webkit_tests.main(['--platform', port_name, 'fast/harness/results.html'], out, |
+ err), test_run_results.UNEXPECTED_ERROR_EXIT_STATUS) |
def test_verbose_in_child_processes(self): |
# When we actually run multiple processes, we may have to reconfigure logging in the |
@@ -874,13 +919,15 @@ class RunTest(unittest.TestCase, StreamTestingMixin): |
if not self.should_test_processes: |
return |
- options, parsed_args = parse_args(['--verbose', '--fully-parallel', '--child-processes', '2', 'passes/text.html', 'passes/image.html'], tests_included=True, print_nothing=False) |
+ options, parsed_args = parse_args( |
+ ['--verbose', '--fully-parallel', '--child-processes', '2', 'passes/text.html', 'passes/image.html'], |
+ tests_included=True, |
+ print_nothing=False) |
host = MockHost() |
port_obj = host.port_factory.get(port_name=options.platform, options=options) |
logging_stream = StringIO.StringIO() |
stdout = StringIO.StringIO() |
- run_webkit_tests.run(port_obj, options, parsed_args, |
- logging_stream=logging_stream, stdout=stdout) |
+ run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream, stdout=stdout) |
self.assertTrue('text.html passed' in logging_stream.getvalue()) |
self.assertTrue('image.html passed' in logging_stream.getvalue()) |
@@ -889,7 +936,8 @@ class RunTest(unittest.TestCase, StreamTestingMixin): |
# get output or mack mock ports work again. |
host = Host() |
_, err, _ = logging_run(['--platform', 'mock-win', '--driver-logging', 'fast/harness/results.html'], |
- tests_included=True, host=host) |
+ tests_included=True, |
+ host=host) |
self.assertTrue('OUT:' in err.getvalue()) |
def test_write_full_results_to(self): |
@@ -901,15 +949,13 @@ class RunTest(unittest.TestCase, StreamTestingMixin): |
def test_buildbot_results_are_printed_on_early_exit(self): |
stdout = StringIO.StringIO() |
stderr = StringIO.StringIO() |
- res = run_webkit_tests.main(['--platform', 'test', '--exit-after-n-failures', '1', |
- 'failures/unexpected/missing_text.html', |
- 'failures/unexpected/missing_image.html'], |
- stdout, stderr) |
+ res = run_webkit_tests.main( |
+ ['--platform', 'test', '--exit-after-n-failures', '1', 'failures/unexpected/missing_text.html', |
+ 'failures/unexpected/missing_image.html'], stdout, stderr) |
self.assertEqual(res, test_run_results.EARLY_EXIT_STATUS) |
- self.assertEqual(stdout.getvalue(), |
- ('\n' |
- 'Regressions: Unexpected missing results (1)\n' |
- ' failures/unexpected/missing_image.html [ Missing ]\n\n')) |
+ self.assertEqual(stdout.getvalue(), ('\n' |
+ 'Regressions: Unexpected missing results (1)\n' |
+ ' failures/unexpected/missing_image.html [ Missing ]\n\n')) |
class EndToEndTest(unittest.TestCase): |
@@ -926,17 +972,23 @@ class EndToEndTest(unittest.TestCase): |
self.assertTrue("multiple-mismatch-success.html" not in json["tests"]["reftests"]["foo"]) |
self.assertTrue("multiple-both-success.html" not in json["tests"]["reftests"]["foo"]) |
- self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-match-failure.html"], |
- {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["=="], "is_unexpected": True}) |
- self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-mismatch-failure.html"], |
- {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["!="], "is_unexpected": True}) |
- self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-both-failure.html"], |
- {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["==", "!="], "is_unexpected": True}) |
+ self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-match-failure.html"], {"expected": "PASS", |
+ "actual": "IMAGE", |
+ "reftest_type": ["=="], |
+ "is_unexpected": True}) |
+ self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-mismatch-failure.html"], {"expected": "PASS", |
+ "actual": "IMAGE", |
+ "reftest_type": ["!="], |
+ "is_unexpected": True}) |
+ self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-both-failure.html"], {"expected": "PASS", |
+ "actual": "IMAGE", |
+ "reftest_type": ["==", "!="], |
+ "is_unexpected": True}) |
class RebaselineTest(unittest.TestCase, StreamTestingMixin): |
def assertBaselines(self, file_list, file, extensions, err): |
- "assert that the file_list contains the baselines.""" |
+ "assert that the file_list contains the baselines." "" |
for ext in extensions: |
baseline = file + "-expected" + ext |
baseline_msg = 'Writing new expected result "%s"\n' % baseline |
@@ -952,7 +1004,9 @@ class RebaselineTest(unittest.TestCase, StreamTestingMixin): |
host = MockHost() |
details, err, _ = logging_run( |
['--pixel-tests', '--reset-results', 'passes/image.html', 'failures/expected/missing_image.html'], |
- tests_included=True, host=host, new_results=True) |
+ tests_included=True, |
+ host=host, |
+ new_results=True) |
file_list = host.filesystem.written_files.keys() |
self.assertEqual(details.exit_code, 0) |
self.assertEqual(len(file_list), 9) |
@@ -963,11 +1017,12 @@ class RebaselineTest(unittest.TestCase, StreamTestingMixin): |
# Test that we update expectations in place. If the expectation |
# is missing, update the expected generic location. |
host = MockHost() |
- details, err, _ = logging_run(['--no-show-results', |
- 'failures/unexpected/missing_text.html', |
- 'failures/unexpected/missing_image.html', |
- 'failures/unexpected/missing_render_tree_dump.html'], |
- tests_included=True, host=host, new_results=True) |
+ details, err, _ = logging_run( |
+ ['--no-show-results', 'failures/unexpected/missing_text.html', 'failures/unexpected/missing_image.html', |
+ 'failures/unexpected/missing_render_tree_dump.html'], |
+ tests_included=True, |
+ host=host, |
+ new_results=True) |
file_list = host.filesystem.written_files.keys() |
self.assertEqual(details.exit_code, 3) |
self.assertEqual(len(file_list), 11) |
@@ -988,12 +1043,13 @@ Bug(foo) failures/unexpected/missing_image.html [ NeedsRebaseline ] |
Bug(foo) failures/unexpected/missing_audio.html [ NeedsManualRebaseline ] |
Bug(foo) failures/unexpected/missing_render_tree_dump.html [ Missing ] |
""") |
- details, err, _ = logging_run(['--no-show-results', |
- 'failures/unexpected/missing_text.html', |
- 'failures/unexpected/missing_image.html', |
- 'failures/unexpected/missing_audio.html', |
- 'failures/unexpected/missing_render_tree_dump.html'], |
- tests_included=True, host=host, new_results=True, port_obj=port) |
+ details, err, _ = logging_run( |
+ ['--no-show-results', 'failures/unexpected/missing_text.html', 'failures/unexpected/missing_image.html', |
+ 'failures/unexpected/missing_audio.html', 'failures/unexpected/missing_render_tree_dump.html'], |
+ tests_included=True, |
+ host=host, |
+ new_results=True, |
+ port_obj=port) |
file_list = host.filesystem.written_files.keys() |
self.assertEqual(details.exit_code, 0) |
self.assertEqual(len(file_list), 8) |
@@ -1014,12 +1070,13 @@ Bug(foo) failures/unexpected/missing_image.html [ NeedsRebaseline ] |
Bug(foo) failures/unexpected/missing_audio.html [ NeedsManualRebaseline ] |
Bug(foo) failures/unexpected/missing_render_tree_dump.html [ Missing ] |
""") |
- details, err, _ = logging_run(['--pixel-tests', '--reset-results', |
- 'failures/unexpected/missing_text.html', |
- 'failures/unexpected/missing_image.html', |
- 'failures/unexpected/missing_audio.html', |
- 'failures/unexpected/missing_render_tree_dump.html'], |
- tests_included=True, host=host, new_results=True, port_obj=port) |
+ details, err, _ = logging_run( |
+ ['--pixel-tests', '--reset-results', 'failures/unexpected/missing_text.html', 'failures/unexpected/missing_image.html', |
+ 'failures/unexpected/missing_audio.html', 'failures/unexpected/missing_render_tree_dump.html'], |
+ tests_included=True, |
+ host=host, |
+ new_results=True, |
+ port_obj=port) |
file_list = host.filesystem.written_files.keys() |
self.assertEqual(details.exit_code, 0) |
self.assertEqual(len(file_list), 12) |
@@ -1033,19 +1090,21 @@ Bug(foo) failures/unexpected/missing_render_tree_dump.html [ Missing ] |
host = MockHost() |
details, err, _ = logging_run( |
['--pixel-tests', '--new-baseline', 'passes/image.html', 'failures/expected/missing_image.html'], |
- tests_included=True, host=host, new_results=True) |
+ tests_included=True, |
+ host=host, |
+ new_results=True) |
file_list = host.filesystem.written_files.keys() |
self.assertEqual(details.exit_code, 0) |
self.assertEqual(len(file_list), 9) |
- self.assertBaselines(file_list, |
- "platform/test-mac-mac10.10/passes/image", [".txt", ".png"], err) |
- self.assertBaselines(file_list, |
- "platform/test-mac-mac10.10/failures/expected/missing_image", [".txt", ".png"], err) |
+ self.assertBaselines(file_list, "platform/test-mac-mac10.10/passes/image", [".txt", ".png"], err) |
+ self.assertBaselines(file_list, "platform/test-mac-mac10.10/failures/expected/missing_image", [".txt", ".png"], err) |
class PortTest(unittest.TestCase): |
def assert_mock_port_works(self, port_name, args=[]): |
- self.assertTrue(passing_run(args + ['--platform', 'mock-' + port_name, 'fast/harness/results.html'], tests_included=True, host=Host())) |
+ self.assertTrue(passing_run(args + ['--platform', 'mock-' + port_name, 'fast/harness/results.html'], |
+ tests_included=True, |
+ host=Host())) |
def disabled_test_mac_lion(self): |
self.assert_mock_port_works('mac-lion') |
@@ -1060,7 +1119,6 @@ class MainTest(unittest.TestCase): |
raise KeyboardInterrupt |
def successful_run(port, options, args, printer): |
- |
class FakeRunDetails(object): |
exit_code = test_run_results.UNEXPECTED_ERROR_EXIT_STATUS |