| Index: Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py
|
| diff --git a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py
|
| index 7e0aa4da1c508e02c5b08d2fbf341e98850db15d..2afba1842b8e7762658adb494a325a822405897f 100644
|
| --- a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py
|
| +++ b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py
|
| @@ -161,12 +161,13 @@ def get_test_results(args, host=None, port_obj=None):
|
|
|
|
|
| def parse_full_results(full_results_text):
|
| - json_to_eval = full_results_text.replace("ADD_RESULTS(", "").replace(");", "")
|
| + json_to_eval = full_results_text.replace('ADD_RESULTS(', '').replace(');', '')
|
| compressed_results = json.loads(json_to_eval)
|
| return compressed_results
|
|
|
|
|
| class StreamTestingMixin(object):
|
| +
|
| def assertContains(self, stream, string):
|
| self.assertTrue(string in stream.getvalue())
|
|
|
| @@ -178,6 +179,7 @@ class StreamTestingMixin(object):
|
|
|
|
|
| class RunTest(unittest.TestCase, StreamTestingMixin):
|
| +
|
| def setUp(self):
|
| # A real PlatformInfo object is used here instead of a
|
| # MockPlatformInfo because we need to actually check for
|
| @@ -202,10 +204,12 @@ class RunTest(unittest.TestCase, StreamTestingMixin):
|
| self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES)
|
| self.assertEqual(details.retry_results.total, test.UNEXPECTED_FAILURES)
|
|
|
| - expected_tests = details.initial_results.total - details.initial_results.expected_skips - len(details.initial_results.unexpected_results_by_name)
|
| + expected_tests = details.initial_results.total - details.initial_results.expected_skips - \
|
| + len(details.initial_results.unexpected_results_by_name)
|
| expected_summary_str = ''
|
| if details.initial_results.expected_failures > 0:
|
| - expected_summary_str = " (%d passed, %d didn't)" % (expected_tests - details.initial_results.expected_failures, details.initial_results.expected_failures)
|
| + expected_summary_str = " (%d passed, %d didn't)" % (
|
| + expected_tests - details.initial_results.expected_failures, details.initial_results.expected_failures)
|
| one_line_summary = "%d tests ran as expected%s, %d didn't:\n" % (
|
| expected_tests,
|
| expected_summary_str,
|
| @@ -217,7 +221,7 @@ class RunTest(unittest.TestCase, StreamTestingMixin):
|
|
|
| # Ensure the results were written out and displayed.
|
| failing_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/failing_results.json')
|
| - json_to_eval = failing_results_text.replace("ADD_RESULTS(", "").replace(");", "")
|
| + json_to_eval = failing_results_text.replace('ADD_RESULTS(', '').replace(');', '')
|
| self.assertEqual(json.loads(json_to_eval), details.summarized_failing_results)
|
|
|
| full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
|
| @@ -235,15 +239,15 @@ class RunTest(unittest.TestCase, StreamTestingMixin):
|
| if not self.should_test_processes:
|
| return
|
| save_env_webkit_test_max_locked_shards = None
|
| - if "WEBKIT_TEST_MAX_LOCKED_SHARDS" in os.environ:
|
| - save_env_webkit_test_max_locked_shards = os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"]
|
| - del os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"]
|
| + if 'WEBKIT_TEST_MAX_LOCKED_SHARDS' in os.environ:
|
| + save_env_webkit_test_max_locked_shards = os.environ['WEBKIT_TEST_MAX_LOCKED_SHARDS']
|
| + del os.environ['WEBKIT_TEST_MAX_LOCKED_SHARDS']
|
| _, regular_output, _ = logging_run(['--debug-rwt-logging', '--child-processes', '2'], shared_port=False)
|
| try:
|
| self.assertTrue(any(['1 locked' in line for line in regular_output.buflist]))
|
| finally:
|
| if save_env_webkit_test_max_locked_shards:
|
| - os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"] = save_env_webkit_test_max_locked_shards
|
| + os.environ['WEBKIT_TEST_MAX_LOCKED_SHARDS'] = save_env_webkit_test_max_locked_shards
|
|
|
| def test_child_processes_2(self):
|
| if self.should_test_processes:
|
| @@ -279,11 +283,11 @@ class RunTest(unittest.TestCase, StreamTestingMixin):
|
| # WorkerExceptions (a subclass of BaseException), which have a string capture of the stack which can
|
| # be printed, but don't display properly in the unit test exception handlers.
|
| self.assertRaises(BaseException, logging_run,
|
| - ['failures/expected/exception.html', '--child-processes', '1'], tests_included=True)
|
| + ['failures/expected/exception.html', '--child-processes', '1'], tests_included=True)
|
|
|
| if self.should_test_processes:
|
| self.assertRaises(BaseException, logging_run,
|
| - ['--child-processes', '2', '--skipped=ignore', 'failures/expected/exception.html', 'passes/text.html'], tests_included=True, shared_port=False)
|
| + ['--child-processes', '2', '--skipped=ignore', 'failures/expected/exception.html', 'passes/text.html'], tests_included=True, shared_port=False)
|
|
|
| def test_device_failure(self):
|
| # Test that we handle a device going offline during a test properly.
|
| @@ -304,7 +308,8 @@ class RunTest(unittest.TestCase, StreamTestingMixin):
|
| self.assertEqual(details.exit_code, test_run_results.INTERRUPTED_EXIT_STATUS)
|
|
|
| if self.should_test_processes:
|
| - _, regular_output, _ = logging_run(['failures/expected/keyboard.html', 'passes/text.html', '--child-processes', '2', '--skipped=ignore'], tests_included=True, shared_port=False)
|
| + _, regular_output, _ = logging_run(
|
| + ['failures/expected/keyboard.html', 'passes/text.html', '--child-processes', '2', '--skipped=ignore'], tests_included=True, shared_port=False)
|
| self.assertTrue(any(['Interrupted, exiting' in line for line in regular_output.buflist]))
|
|
|
| def test_no_tests_found(self):
|
| @@ -323,9 +328,17 @@ class RunTest(unittest.TestCase, StreamTestingMixin):
|
| self.assertContains(err, 'No tests to run.\n')
|
|
|
| def test_natural_order(self):
|
| - tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
|
| + tests_to_run = [
|
| + 'passes/audio.html',
|
| + 'failures/expected/text.html',
|
| + 'failures/expected/missing_text.html',
|
| + 'passes/args.html']
|
| tests_run = get_tests_run(['--order=natural'] + tests_to_run)
|
| - self.assertEqual(['failures/expected/missing_text.html', 'failures/expected/text.html', 'passes/args.html', 'passes/audio.html'], tests_run)
|
| + self.assertEqual(['failures/expected/missing_text.html',
|
| + 'failures/expected/text.html',
|
| + 'passes/args.html',
|
| + 'passes/audio.html'],
|
| + tests_run)
|
|
|
| def test_natural_order_test_specified_multiple_times(self):
|
| tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
|
| @@ -333,12 +346,20 @@ class RunTest(unittest.TestCase, StreamTestingMixin):
|
| self.assertEqual(['passes/args.html', 'passes/args.html', 'passes/audio.html', 'passes/audio.html'], tests_run)
|
|
|
| def test_random_order(self):
|
| - tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
|
| + tests_to_run = [
|
| + 'passes/audio.html',
|
| + 'failures/expected/text.html',
|
| + 'failures/expected/missing_text.html',
|
| + 'passes/args.html']
|
| tests_run = get_tests_run(['--order=random'] + tests_to_run)
|
| self.assertEqual(sorted(tests_to_run), sorted(tests_run))
|
|
|
| def test_random_daily_seed_order(self):
|
| - tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
|
| + tests_to_run = [
|
| + 'passes/audio.html',
|
| + 'failures/expected/text.html',
|
| + 'failures/expected/missing_text.html',
|
| + 'passes/args.html']
|
| tests_run = get_tests_run(['--order=random-seeded'] + tests_to_run)
|
| self.assertEqual(sorted(tests_to_run), sorted(tests_run))
|
|
|
| @@ -349,7 +370,11 @@ class RunTest(unittest.TestCase, StreamTestingMixin):
|
| self.assertEqual(tests_run.count('passes/args.html'), 2)
|
|
|
| def test_no_order(self):
|
| - tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
|
| + tests_to_run = [
|
| + 'passes/audio.html',
|
| + 'failures/expected/text.html',
|
| + 'failures/expected/missing_text.html',
|
| + 'passes/args.html']
|
| tests_run = get_tests_run(['--order=none'] + tests_to_run)
|
| self.assertEqual(tests_to_run, tests_run)
|
|
|
| @@ -361,7 +386,11 @@ class RunTest(unittest.TestCase, StreamTestingMixin):
|
| def test_no_order_with_directory_entries_in_natural_order(self):
|
| tests_to_run = ['http/tests/ssl', 'perf/foo', 'http/tests/passes']
|
| tests_run = get_tests_run(['--order=none'] + tests_to_run)
|
| - self.assertEqual(tests_run, ['http/tests/ssl/text.html', 'perf/foo/test.html', 'http/tests/passes/image.html', 'http/tests/passes/text.html'])
|
| + self.assertEqual(tests_run,
|
| + ['http/tests/ssl/text.html',
|
| + 'perf/foo/test.html',
|
| + 'http/tests/passes/image.html',
|
| + 'http/tests/passes/text.html'])
|
|
|
| def test_repeat_each(self):
|
| tests_to_run = ['passes/image.html', 'passes/text.html']
|
| @@ -381,7 +410,7 @@ class RunTest(unittest.TestCase, StreamTestingMixin):
|
|
|
| # Check that nothing changes when we specify skipped=default.
|
| self.assertEqual(len(get_tests_run(['--skipped=default', 'passes'])),
|
| - num_tests_run_by_default)
|
| + num_tests_run_by_default)
|
|
|
| # Now check that we run one more test (the skipped one).
|
| tests_run = get_tests_run(['--skipped=ignore', 'passes'])
|
| @@ -466,7 +495,7 @@ class RunTest(unittest.TestCase, StreamTestingMixin):
|
| host = MockHost()
|
| self.assertTrue(passing_run(host=host))
|
| self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-results/passes/error-stderr.txt'),
|
| - 'stuff going to stderr')
|
| + 'stuff going to stderr')
|
|
|
| def test_test_list(self):
|
| host = MockHost()
|
| @@ -522,15 +551,19 @@ class RunTest(unittest.TestCase, StreamTestingMixin):
|
| # is missing, update the expected generic location.
|
| host = MockHost()
|
| details, err, _ = logging_run(['--no-show-results', '--retry-failures',
|
| - 'failures/expected/missing_image.html',
|
| - 'failures/unexpected/missing_text.html',
|
| - 'failures/unexpected/text-image-checksum.html'],
|
| - tests_included=True, host=host)
|
| + 'failures/expected/missing_image.html',
|
| + 'failures/unexpected/missing_text.html',
|
| + 'failures/unexpected/text-image-checksum.html'],
|
| + tests_included=True, host=host)
|
| file_list = host.filesystem.written_files.keys()
|
| self.assertEqual(details.exit_code, 2)
|
| json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
|
| - self.assertTrue(json_string.find('"text-image-checksum.html":{"expected":"PASS","actual":"IMAGE+TEXT","is_unexpected":true') != -1)
|
| - self.assertTrue(json_string.find('"missing_text.html":{"expected":"PASS","is_missing_text":true,"actual":"MISSING","is_unexpected":true') != -1)
|
| + self.assertTrue(
|
| + json_string.find('"text-image-checksum.html":{"expected":"PASS","actual":"IMAGE+TEXT","is_unexpected":true') != -
|
| + 1)
|
| + self.assertTrue(
|
| + json_string.find('"missing_text.html":{"expected":"PASS","is_missing_text":true,"actual":"MISSING","is_unexpected":true') != -
|
| + 1)
|
| self.assertTrue(json_string.find('"num_regressions":2') != -1)
|
| self.assertTrue(json_string.find('"num_flaky":0') != -1)
|
|
|
| @@ -570,18 +603,22 @@ class RunTest(unittest.TestCase, StreamTestingMixin):
|
| def test_crash_with_stderr(self):
|
| host = MockHost()
|
| _, regular_output, _ = logging_run(['failures/unexpected/crash-with-stderr.html'], tests_included=True, host=host)
|
| - self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('{"crash-with-stderr.html":{"expected":"PASS","actual":"CRASH","has_stderr":true,"is_unexpected":true') != -1)
|
| + self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find(
|
| + '{"crash-with-stderr.html":{"expected":"PASS","actual":"CRASH","has_stderr":true,"is_unexpected":true') != -1)
|
|
|
| def test_no_image_failure_with_image_diff(self):
|
| host = MockHost()
|
| - _, regular_output, _ = logging_run(['failures/unexpected/checksum-with-matching-image.html'], tests_included=True, host=host)
|
| - self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('"num_regressions":0') != -1)
|
| + _, regular_output, _ = logging_run(
|
| + ['failures/unexpected/checksum-with-matching-image.html'], tests_included=True, host=host)
|
| + self.assertTrue(
|
| + host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('"num_regressions":0') != -
|
| + 1)
|
|
|
| def test_exit_after_n_failures_upload(self):
|
| host = MockHost()
|
| details, regular_output, user = logging_run(
|
| - ['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'],
|
| - tests_included=True, host=host)
|
| + ['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'],
|
| + tests_included=True, host=host)
|
|
|
| # By returning False, we know that the incremental results were generated and then deleted.
|
| self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/incremental_results.json'))
|
| @@ -600,7 +637,8 @@ class RunTest(unittest.TestCase, StreamTestingMixin):
|
|
|
| def test_exit_after_n_failures(self):
|
| # Unexpected failures should result in tests stopping.
|
| - tests_run = get_tests_run(['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'])
|
| + tests_run = get_tests_run(
|
| + ['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'])
|
| self.assertEqual(['failures/unexpected/text-image-checksum.html'], tests_run)
|
|
|
| # But we'll keep going for expected ones.
|
| @@ -613,7 +651,10 @@ class RunTest(unittest.TestCase, StreamTestingMixin):
|
| self.assertEqual(['failures/unexpected/crash.html'], tests_run)
|
|
|
| # Same with timeouts.
|
| - tests_run = get_tests_run(['failures/unexpected/timeout.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
|
| + tests_run = get_tests_run(['failures/unexpected/timeout.html',
|
| + 'passes/text.html',
|
| + '--exit-after-n-crashes-or-timeouts',
|
| + '1'])
|
| self.assertEqual(['failures/unexpected/timeout.html'], tests_run)
|
|
|
| # But we'll keep going for expected ones.
|
| @@ -648,7 +689,8 @@ class RunTest(unittest.TestCase, StreamTestingMixin):
|
|
|
| def test_retrying_default_value(self):
|
| host = MockHost()
|
| - details, err, _ = logging_run(['--debug-rwt-logging', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
|
| + details, err, _ = logging_run(
|
| + ['--debug-rwt-logging', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
|
| self.assertEqual(details.exit_code, 1)
|
| self.assertFalse('Retrying' in err.getvalue())
|
|
|
| @@ -684,7 +726,8 @@ class RunTest(unittest.TestCase, StreamTestingMixin):
|
| # Now we test that --clobber-old-results does remove the old entries and the old retries,
|
| # and that we don't retry again.
|
| host = MockHost()
|
| - details, err, _ = logging_run(['--no-retry-failures', '--clobber-old-results', 'failures/flaky'], tests_included=True, host=host)
|
| + details, err, _ = logging_run(
|
| + ['--no-retry-failures', '--clobber-old-results', 'failures/flaky'], tests_included=True, host=host)
|
| self.assertEqual(details.exit_code, 1)
|
| self.assertTrue('Clobbering old results' in err.getvalue())
|
| self.assertTrue('flaky/text.html' in err.getvalue())
|
| @@ -706,26 +749,31 @@ class RunTest(unittest.TestCase, StreamTestingMixin):
|
|
|
| def test_retrying_force_pixel_tests(self):
|
| host = MockHost()
|
| - details, err, _ = logging_run(['--no-pixel-tests', '--retry-failures', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
|
| + details, err, _ = logging_run(
|
| + ['--no-pixel-tests', '--retry-failures', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
|
| self.assertEqual(details.exit_code, 1)
|
| self.assertTrue('Retrying' in err.getvalue())
|
| self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt'))
|
| self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.png'))
|
| - self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.txt'))
|
| - self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.png'))
|
| + self.assertTrue(
|
| + host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.txt'))
|
| + self.assertTrue(
|
| + host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.png'))
|
| json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
|
| json = parse_full_results(json_string)
|
| - self.assertEqual(json["tests"]["failures"]["unexpected"]["text-image-checksum.html"],
|
| - {"expected": "PASS", "actual": "TEXT IMAGE+TEXT", "is_unexpected": True})
|
| - self.assertFalse(json["pixel_tests_enabled"])
|
| + self.assertEqual(json['tests']['failures']['unexpected']['text-image-checksum.html'],
|
| + {'expected': 'PASS', 'actual': 'TEXT IMAGE+TEXT', 'is_unexpected': True})
|
| + self.assertFalse(json['pixel_tests_enabled'])
|
| self.assertEqual(details.enabled_pixel_tests_in_retry, True)
|
|
|
| def test_retrying_uses_retries_directory(self):
|
| host = MockHost()
|
| - details, err, _ = logging_run(['--debug-rwt-logging', '--retry-failures', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
|
| + details, err, _ = logging_run(
|
| + ['--debug-rwt-logging', '--retry-failures', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
|
| self.assertEqual(details.exit_code, 1)
|
| self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt'))
|
| - self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.txt'))
|
| + self.assertTrue(
|
| + host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.txt'))
|
|
|
| def test_run_order__inline(self):
|
| # These next tests test that we run the tests in ascending alphabetical
|
| @@ -766,9 +814,9 @@ class RunTest(unittest.TestCase, StreamTestingMixin):
|
| _, err, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host)
|
| results = parse_full_results(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json'))
|
|
|
| - self.assertEqual(results["tests"]["reftests"]["foo"]["unlistedtest.html"]["actual"], "MISSING"),
|
| - self.assertEqual(results["num_regressions"], 5)
|
| - self.assertEqual(results["num_flaky"], 0)
|
| + self.assertEqual(results['tests']['reftests']['foo']['unlistedtest.html']['actual'], 'MISSING'),
|
| + self.assertEqual(results['num_regressions'], 5)
|
| + self.assertEqual(results['num_flaky'], 0)
|
|
|
| def test_reftest_crash(self):
|
| test_results = get_test_results(['failures/unexpected/crash-reftest.html'])
|
| @@ -816,7 +864,7 @@ class RunTest(unittest.TestCase, StreamTestingMixin):
|
| self.assertFalse(any(path.endswith('-pretty-diff.html') for path in written_files.keys()))
|
|
|
| full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
|
| - full_results = json.loads(full_results_text.replace("ADD_RESULTS(", "").replace(");", ""))
|
| + full_results = json.loads(full_results_text.replace('ADD_RESULTS(', '').replace(');', ''))
|
| self.assertEqual(full_results['has_wdiff'], False)
|
| self.assertEqual(full_results['has_pretty_patch'], False)
|
|
|
| @@ -837,7 +885,12 @@ class RunTest(unittest.TestCase, StreamTestingMixin):
|
| port_name = 'mac-lion'
|
| out = StringIO.StringIO()
|
| err = StringIO.StringIO()
|
| - self.assertEqual(run_webkit_tests.main(['--platform', port_name, 'fast/harness/results.html'], out, err), test_run_results.UNEXPECTED_ERROR_EXIT_STATUS)
|
| + self.assertEqual(run_webkit_tests.main(['--platform',
|
| + port_name,
|
| + 'fast/harness/results.html'],
|
| + out,
|
| + err),
|
| + test_run_results.UNEXPECTED_ERROR_EXIT_STATUS)
|
|
|
| def test_verbose_in_child_processes(self):
|
| # When we actually run multiple processes, we may have to reconfigure logging in the
|
| @@ -849,7 +902,8 @@ class RunTest(unittest.TestCase, StreamTestingMixin):
|
| if not self.should_test_processes:
|
| return
|
|
|
| - options, parsed_args = parse_args(['--verbose', '--fully-parallel', '--child-processes', '2', 'passes/text.html', 'passes/image.html'], tests_included=True, print_nothing=False)
|
| + options, parsed_args = parse_args(['--verbose', '--fully-parallel', '--child-processes',
|
| + '2', 'passes/text.html', 'passes/image.html'], tests_included=True, print_nothing=False)
|
| host = MockHost()
|
| port_obj = host.port_factory.get(port_name=options.platform, options=options)
|
| logging_stream = StringIO.StringIO()
|
| @@ -873,6 +927,7 @@ class RunTest(unittest.TestCase, StreamTestingMixin):
|
|
|
|
|
| class EndToEndTest(unittest.TestCase):
|
| +
|
| def test_reftest_with_two_notrefs(self):
|
| # Test that we update expectations in place. If the expectation
|
| # is missing, update the expected generic location.
|
| @@ -882,23 +937,24 @@ class EndToEndTest(unittest.TestCase):
|
|
|
| json_string = host.filesystem.read_text_file('/tmp/layout-test-results/failing_results.json')
|
| json = parse_full_results(json_string)
|
| - self.assertTrue("multiple-match-success.html" not in json["tests"]["reftests"]["foo"])
|
| - self.assertTrue("multiple-mismatch-success.html" not in json["tests"]["reftests"]["foo"])
|
| - self.assertTrue("multiple-both-success.html" not in json["tests"]["reftests"]["foo"])
|
| + self.assertTrue('multiple-match-success.html' not in json['tests']['reftests']['foo'])
|
| + self.assertTrue('multiple-mismatch-success.html' not in json['tests']['reftests']['foo'])
|
| + self.assertTrue('multiple-both-success.html' not in json['tests']['reftests']['foo'])
|
|
|
| - self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-match-failure.html"],
|
| - {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["=="], "is_unexpected": True})
|
| - self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-mismatch-failure.html"],
|
| - {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["!="], "is_unexpected": True})
|
| - self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-both-failure.html"],
|
| - {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["==", "!="], "is_unexpected": True})
|
| + self.assertEqual(json['tests']['reftests']['foo']['multiple-match-failure.html'],
|
| + {'expected': 'PASS', 'actual': 'IMAGE', 'reftest_type': ['=='], 'is_unexpected': True})
|
| + self.assertEqual(json['tests']['reftests']['foo']['multiple-mismatch-failure.html'],
|
| + {'expected': 'PASS', 'actual': 'IMAGE', 'reftest_type': ['!='], 'is_unexpected': True})
|
| + self.assertEqual(json['tests']['reftests']['foo']['multiple-both-failure.html'],
|
| + {'expected': 'PASS', 'actual': 'IMAGE', 'reftest_type': ['==', '!='], 'is_unexpected': True})
|
|
|
|
|
| class RebaselineTest(unittest.TestCase, StreamTestingMixin):
|
| +
|
| def assertBaselines(self, file_list, file, extensions, err):
|
| - "assert that the file_list contains the baselines."""
|
| + 'assert that the file_list contains the baselines.'''
|
| for ext in extensions:
|
| - baseline = file + "-expected" + ext
|
| + baseline = file + '-expected' + ext
|
| baseline_msg = 'Writing new expected result "%s"\n' % baseline
|
| self.assertTrue(any(f.find(baseline) != -1 for f in file_list))
|
| self.assertContains(err, baseline_msg)
|
| @@ -916,24 +972,24 @@ class RebaselineTest(unittest.TestCase, StreamTestingMixin):
|
| file_list = host.filesystem.written_files.keys()
|
| self.assertEqual(details.exit_code, 0)
|
| self.assertEqual(len(file_list), 8)
|
| - self.assertBaselines(file_list, "passes/image", [".txt", ".png"], err)
|
| - self.assertBaselines(file_list, "failures/expected/missing_image", [".txt", ".png"], err)
|
| + self.assertBaselines(file_list, 'passes/image', ['.txt', '.png'], err)
|
| + self.assertBaselines(file_list, 'failures/expected/missing_image', ['.txt', '.png'], err)
|
|
|
| def test_missing_results(self):
|
| # Test that we update expectations in place. If the expectation
|
| # is missing, update the expected generic location.
|
| host = MockHost()
|
| details, err, _ = logging_run(['--no-show-results',
|
| - 'failures/unexpected/missing_text.html',
|
| - 'failures/unexpected/missing_image.html',
|
| - 'failures/unexpected/missing_render_tree_dump.html'],
|
| - tests_included=True, host=host, new_results=True)
|
| + 'failures/unexpected/missing_text.html',
|
| + 'failures/unexpected/missing_image.html',
|
| + 'failures/unexpected/missing_render_tree_dump.html'],
|
| + tests_included=True, host=host, new_results=True)
|
| file_list = host.filesystem.written_files.keys()
|
| self.assertEqual(details.exit_code, 3)
|
| self.assertEqual(len(file_list), 10)
|
| - self.assertBaselines(file_list, "failures/unexpected/missing_text", [".txt"], err)
|
| - self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_image", [".png"], err)
|
| - self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_render_tree_dump", [".txt"], err)
|
| + self.assertBaselines(file_list, 'failures/unexpected/missing_text', ['.txt'], err)
|
| + self.assertBaselines(file_list, 'platform/test/failures/unexpected/missing_image', ['.png'], err)
|
| + self.assertBaselines(file_list, 'platform/test/failures/unexpected/missing_render_tree_dump', ['.txt'], err)
|
|
|
| def test_missing_results_not_added_if_expected_missing(self):
|
| # Test that we update expectations in place. If the expectation
|
| @@ -949,11 +1005,11 @@ Bug(foo) failures/unexpected/missing_audio.html [ NeedsManualRebaseline ]
|
| Bug(foo) failures/unexpected/missing_render_tree_dump.html [ Missing ]
|
| """)
|
| details, err, _ = logging_run(['--no-show-results',
|
| - 'failures/unexpected/missing_text.html',
|
| - 'failures/unexpected/missing_image.html',
|
| - 'failures/unexpected/missing_audio.html',
|
| - 'failures/unexpected/missing_render_tree_dump.html'],
|
| - tests_included=True, host=host, new_results=True, port_obj=port)
|
| + 'failures/unexpected/missing_text.html',
|
| + 'failures/unexpected/missing_image.html',
|
| + 'failures/unexpected/missing_audio.html',
|
| + 'failures/unexpected/missing_render_tree_dump.html'],
|
| + tests_included=True, host=host, new_results=True, port_obj=port)
|
| file_list = host.filesystem.written_files.keys()
|
| self.assertEqual(details.exit_code, 0)
|
| self.assertEqual(len(file_list), 7)
|
| @@ -975,17 +1031,17 @@ Bug(foo) failures/unexpected/missing_audio.html [ NeedsManualRebaseline ]
|
| Bug(foo) failures/unexpected/missing_render_tree_dump.html [ Missing ]
|
| """)
|
| details, err, _ = logging_run(['--pixel-tests', '--reset-results',
|
| - 'failures/unexpected/missing_text.html',
|
| - 'failures/unexpected/missing_image.html',
|
| - 'failures/unexpected/missing_audio.html',
|
| - 'failures/unexpected/missing_render_tree_dump.html'],
|
| - tests_included=True, host=host, new_results=True, port_obj=port)
|
| + 'failures/unexpected/missing_text.html',
|
| + 'failures/unexpected/missing_image.html',
|
| + 'failures/unexpected/missing_audio.html',
|
| + 'failures/unexpected/missing_render_tree_dump.html'],
|
| + tests_included=True, host=host, new_results=True, port_obj=port)
|
| file_list = host.filesystem.written_files.keys()
|
| self.assertEqual(details.exit_code, 0)
|
| self.assertEqual(len(file_list), 11)
|
| - self.assertBaselines(file_list, "failures/unexpected/missing_text", [".txt"], err)
|
| - self.assertBaselines(file_list, "failures/unexpected/missing_image", [".png"], err)
|
| - self.assertBaselines(file_list, "failures/unexpected/missing_render_tree_dump", [".txt"], err)
|
| + self.assertBaselines(file_list, 'failures/unexpected/missing_text', ['.txt'], err)
|
| + self.assertBaselines(file_list, 'failures/unexpected/missing_image', ['.png'], err)
|
| + self.assertBaselines(file_list, 'failures/unexpected/missing_render_tree_dump', ['.txt'], err)
|
|
|
| def test_new_baseline(self):
|
| # Test that we update the platform expectations in the version-specific directories
|
| @@ -998,20 +1054,26 @@ Bug(foo) failures/unexpected/missing_render_tree_dump.html [ Missing ]
|
| self.assertEqual(details.exit_code, 0)
|
| self.assertEqual(len(file_list), 8)
|
| self.assertBaselines(file_list,
|
| - "platform/test-mac-leopard/passes/image", [".txt", ".png"], err)
|
| + 'platform/test-mac-leopard/passes/image', ['.txt', '.png'], err)
|
| self.assertBaselines(file_list,
|
| - "platform/test-mac-leopard/failures/expected/missing_image", [".txt", ".png"], err)
|
| + 'platform/test-mac-leopard/failures/expected/missing_image', ['.txt', '.png'], err)
|
|
|
|
|
| class PortTest(unittest.TestCase):
|
| +
|
| def assert_mock_port_works(self, port_name, args=[]):
|
| - self.assertTrue(passing_run(args + ['--platform', 'mock-' + port_name, 'fast/harness/results.html'], tests_included=True, host=Host()))
|
| + self.assertTrue(passing_run(args + ['--platform',
|
| + 'mock-' + port_name,
|
| + 'fast/harness/results.html'],
|
| + tests_included=True,
|
| + host=Host()))
|
|
|
| def disabled_test_mac_lion(self):
|
| self.assert_mock_port_works('mac-lion')
|
|
|
|
|
| class MainTest(unittest.TestCase):
|
| +
|
| def test_exception_handling(self):
|
| orig_run_fn = run_webkit_tests.run
|
|
|
| @@ -1056,6 +1118,6 @@ class MainTest(unittest.TestCase):
|
| stdout, stderr)
|
| self.assertEqual(res, test_run_results.EARLY_EXIT_STATUS)
|
| self.assertEqual(stdout.getvalue(),
|
| - ('\n'
|
| - 'Regressions: Unexpected missing results (1)\n'
|
| - ' failures/unexpected/missing_image.html [ Missing ]\n\n'))
|
| + ('\n'
|
| + 'Regressions: Unexpected missing results (1)\n'
|
| + ' failures/unexpected/missing_image.html [ Missing ]\n\n'))
|
|
|