Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(51)

Side by Side Diff: Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py

Issue 454223003: Remove the concept of 'integration tests' from test-webkitpy. (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: merge forward, remove dead / uneeded code Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 # Copyright (C) 2010 Google Inc. All rights reserved.
2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Sze ged
3 # Copyright (C) 2011 Apple Inc. All rights reserved.
4 #
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are
7 # met:
8 #
9 # * Redistributions of source code must retain the above copyright
10 # notice, this list of conditions and the following disclaimer.
11 # * Redistributions in binary form must reproduce the above
12 # copyright notice, this list of conditions and the following disclaimer
13 # in the documentation and/or other materials provided with the
14 # distribution.
15 # * Neither the name of Google Inc. nor the names of its
16 # contributors may be used to endorse or promote products derived from
17 # this software without specific prior written permission.
18 #
19 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 import codecs
32 import json
33 import logging
34 import os
35 import platform
36 import Queue
37 import re
38 import StringIO
39 import sys
40 import thread
41 import time
42 import threading
43 import webkitpy.thirdparty.unittest2 as unittest
44
45 from webkitpy.common.system import outputcapture, path
46 from webkitpy.common.system.crashlogs_unittest import make_mock_crash_report_dar win
47 from webkitpy.common.system.systemhost import SystemHost
48 from webkitpy.common.host import Host
49 from webkitpy.common.host_mock import MockHost
50
51 from webkitpy.layout_tests import port
52 from webkitpy.layout_tests import run_webkit_tests
53 from webkitpy.layout_tests.models import test_run_results
54 from webkitpy.layout_tests.port import Port
55 from webkitpy.layout_tests.port import test
56 from webkitpy.test.skip import skip_if
57 from webkitpy.tool import grammar
58 from webkitpy.tool.mocktool import MockOptions
59
60
61 def parse_args(extra_args=None, tests_included=False, new_results=False, print_n othing=True):
62 extra_args = extra_args or []
63 args = []
64 if not '--platform' in extra_args:
65 args.extend(['--platform', 'test'])
66 if not new_results:
67 args.append('--no-new-test-results')
68
69 if not '--child-processes' in extra_args:
70 args.extend(['--child-processes', 1])
71 args.extend(extra_args)
72 if not tests_included:
73 # We use the glob to test that globbing works.
74 args.extend(['passes',
75 'http/tests',
76 'websocket/tests',
77 'failures/expected/*'])
78 return run_webkit_tests.parse_args(args)
79
80
81 def passing_run(extra_args=None, port_obj=None, tests_included=False, host=None, shared_port=True):
82 options, parsed_args = parse_args(extra_args, tests_included)
83 if not port_obj:
84 host = host or MockHost()
85 port_obj = host.port_factory.get(port_name=options.platform, options=opt ions)
86
87 if shared_port:
88 port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj
89
90 logging_stream = StringIO.StringIO()
91 run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_s tream=logging_stream)
92 return run_details.exit_code == 0
93
94
95 def logging_run(extra_args=None, port_obj=None, tests_included=False, host=None, new_results=False, shared_port=True):
96 options, parsed_args = parse_args(extra_args=extra_args,
97 tests_included=tests_included,
98 print_nothing=False, new_results=new_resul ts)
99 host = host or MockHost()
100 if not port_obj:
101 port_obj = host.port_factory.get(port_name=options.platform, options=opt ions)
102
103 run_details, output = run_and_capture(port_obj, options, parsed_args, shared _port)
104 return (run_details, output, host.user)
105
106
107 def run_and_capture(port_obj, options, parsed_args, shared_port=True):
108 if shared_port:
109 port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj
110 oc = outputcapture.OutputCapture()
111 try:
112 oc.capture_output()
113 logging_stream = StringIO.StringIO()
114 run_details = run_webkit_tests.run(port_obj, options, parsed_args, loggi ng_stream=logging_stream)
115 finally:
116 oc.restore_output()
117 return (run_details, logging_stream)
118
119
120 def get_tests_run(args, host=None, port_obj=None):
121 results = get_test_results(args, host=host, port_obj=port_obj)
122 return [result.test_name for result in results]
123
124
125 def get_test_batches(args, host=None):
126 results = get_test_results(args, host)
127 batches = []
128 batch = []
129 current_pid = None
130 for result in results:
131 if batch and result.pid != current_pid:
132 batches.append(batch)
133 batch = []
134 batch.append(result.test_name)
135 if batch:
136 batches.append(batch)
137 return batches
138
139
140 def get_test_results(args, host=None, port_obj=None):
141 options, parsed_args = parse_args(args, tests_included=True)
142
143 host = host or MockHost()
144 port_obj = port_obj or host.port_factory.get(port_name=options.platform, opt ions=options)
145
146 oc = outputcapture.OutputCapture()
147 oc.capture_output()
148 logging_stream = StringIO.StringIO()
149 try:
150 run_details = run_webkit_tests.run(port_obj, options, parsed_args, loggi ng_stream=logging_stream)
151 finally:
152 oc.restore_output()
153
154 all_results = []
155 if run_details.initial_results:
156 all_results.extend(run_details.initial_results.all_results)
157
158 if run_details.retry_results:
159 all_results.extend(run_details.retry_results.all_results)
160 return all_results
161
162
163 def parse_full_results(full_results_text):
164 json_to_eval = full_results_text.replace("ADD_RESULTS(", "").replace(");", " ")
165 compressed_results = json.loads(json_to_eval)
166 return compressed_results
167
168
169 class StreamTestingMixin(object):
170 def assertContains(self, stream, string):
171 self.assertTrue(string in stream.getvalue())
172
173 def assertEmpty(self, stream):
174 self.assertFalse(stream.getvalue())
175
176 def assertNotEmpty(self, stream):
177 self.assertTrue(stream.getvalue())
178
179
180 class RunTest(unittest.TestCase, StreamTestingMixin):
181 def setUp(self):
182 # A real PlatformInfo object is used here instead of a
183 # MockPlatformInfo because we need to actually check for
184 # Windows and Mac to skip some tests.
185 self._platform = SystemHost().platform
186
187 # FIXME: Remove this when we fix test-webkitpy to work
188 # properly on cygwin (bug 63846).
189 self.should_test_processes = not self._platform.is_win()
190
191 def test_basic(self):
192 options, args = parse_args(tests_included=True)
193 logging_stream = StringIO.StringIO()
194 host = MockHost()
195 port_obj = host.port_factory.get(options.platform, options)
196 details = run_webkit_tests.run(port_obj, options, args, logging_stream)
197
198 # These numbers will need to be updated whenever we add new tests.
199 self.assertEqual(details.initial_results.total, test.TOTAL_TESTS)
200 self.assertEqual(details.initial_results.expected_skips, test.TOTAL_SKIP S)
201 self.assertEqual(len(details.initial_results.unexpected_results_by_name) , test.UNEXPECTED_PASSES + test.UNEXPECTED_FAILURES)
202 self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES)
203 self.assertEqual(details.retry_results.total, test.UNEXPECTED_FAILURES)
204
205 expected_tests = details.initial_results.total - details.initial_results .expected_skips - len(details.initial_results.unexpected_results_by_name)
206 expected_summary_str = ''
207 if details.initial_results.expected_failures > 0:
208 expected_summary_str = " (%d passed, %d didn't)" % (expected_tests - details.initial_results.expected_failures, details.initial_results.expected_fai lures)
209 one_line_summary = "%d tests ran as expected%s, %d didn't:\n" % (
210 expected_tests,
211 expected_summary_str,
212 len(details.initial_results.unexpected_results_by_name))
213 self.assertTrue(one_line_summary in logging_stream.buflist)
214
215 # Ensure the results were summarized properly.
216 self.assertEqual(details.summarized_failing_results['num_regressions'], details.exit_code)
217
218 # Ensure the results were written out and displayed.
219 failing_results_text = host.filesystem.read_text_file('/tmp/layout-test- results/failing_results.json')
220 json_to_eval = failing_results_text.replace("ADD_RESULTS(", "").replace( ");", "")
221 self.assertEqual(json.loads(json_to_eval), details.summarized_failing_re sults)
222
223 full_results_text = host.filesystem.read_text_file('/tmp/layout-test-res ults/full_results.json')
224 self.assertEqual(json.loads(full_results_text), details.summarized_full_ results)
225
226 self.assertEqual(host.user.opened_urls, [path.abspath_to_uri(MockHost(). platform, '/tmp/layout-test-results/results.html')])
227
228
229 def test_batch_size(self):
230 batch_tests_run = get_test_batches(['--batch-size', '2'])
231 for batch in batch_tests_run:
232 self.assertTrue(len(batch) <= 2, '%s had too many tests' % ', '.join (batch))
233
234 def test_max_locked_shards(self):
235 # Tests for the default of using one locked shard even in the case of mo re than one child process.
236 if not self.should_test_processes:
237 return
238 save_env_webkit_test_max_locked_shards = None
239 if "WEBKIT_TEST_MAX_LOCKED_SHARDS" in os.environ:
240 save_env_webkit_test_max_locked_shards = os.environ["WEBKIT_TEST_MAX _LOCKED_SHARDS"]
241 del os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"]
242 _, regular_output, _ = logging_run(['--debug-rwt-logging', '--child-proc esses', '2'], shared_port=False)
243 try:
244 self.assertTrue(any(['1 locked' in line for line in regular_output.b uflist]))
245 finally:
246 if save_env_webkit_test_max_locked_shards:
247 os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"] = save_env_webkit_te st_max_locked_shards
248
249 def test_child_processes_2(self):
250 if self.should_test_processes:
251 _, regular_output, _ = logging_run(
252 ['--debug-rwt-logging', '--child-processes', '2'], shared_port=F alse)
253 self.assertTrue(any(['Running 2 ' in line for line in regular_output .buflist]))
254
255 def test_child_processes_min(self):
256 if self.should_test_processes:
257 _, regular_output, _ = logging_run(
258 ['--debug-rwt-logging', '--child-processes', '2', '-i', 'passes/ passes', 'passes'],
259 tests_included=True, shared_port=False)
260 self.assertTrue(any(['Running 1 ' in line for line in regular_output .buflist]))
261
262 def test_dryrun(self):
263 tests_run = get_tests_run(['--dry-run'])
264 self.assertEqual(tests_run, [])
265
266 tests_run = get_tests_run(['-n'])
267 self.assertEqual(tests_run, [])
268
269 def test_enable_sanitizer(self):
270 self.assertTrue(passing_run(['--enable-sanitizer', 'failures/expected/te xt.html']))
271
272 def test_exception_raised(self):
273 # Exceptions raised by a worker are treated differently depending on
274 # whether they are in-process or out. inline exceptions work as normal,
275 # which allows us to get the full stack trace and traceback from the
276 # worker. The downside to this is that it could be any error, but this
277 # is actually useful in testing.
278 #
279 # Exceptions raised in a separate process are re-packaged into
280 # WorkerExceptions (a subclass of BaseException), which have a string ca pture of the stack which can
281 # be printed, but don't display properly in the unit test exception hand lers.
282 self.assertRaises(BaseException, logging_run,
283 ['failures/expected/exception.html', '--child-processes', '1'], test s_included=True)
284
285 if self.should_test_processes:
286 self.assertRaises(BaseException, logging_run,
287 ['--child-processes', '2', '--skipped=ignore', 'failures/expecte d/exception.html', 'passes/text.html'], tests_included=True, shared_port=False)
288
289 def test_device_failure(self):
290 # Test that we handle a device going offline during a test properly.
291 details, regular_output, _ = logging_run(['failures/expected/device_fail ure.html'], tests_included=True)
292 self.assertEqual(details.exit_code, 0)
293 self.assertTrue('worker/0 has failed' in regular_output.getvalue())
294
295 def test_full_results_html(self):
296 host = MockHost()
297 details, _, _ = logging_run(['--full-results-html'], host=host)
298 self.assertEqual(details.exit_code, 0)
299 self.assertEqual(len(host.user.opened_urls), 1)
300
301 def test_keyboard_interrupt(self):
302 # Note that this also tests running a test marked as SKIP if
303 # you specify it explicitly.
304 details, _, _ = logging_run(['failures/expected/keyboard.html', '--child -processes', '1'], tests_included=True)
305 self.assertEqual(details.exit_code, test_run_results.INTERRUPTED_EXIT_ST ATUS)
306
307 if self.should_test_processes:
308 _, regular_output, _ = logging_run(['failures/expected/keyboard.html ', 'passes/text.html', '--child-processes', '2', '--skipped=ignore'], tests_incl uded=True, shared_port=False)
309 self.assertTrue(any(['Interrupted, exiting' in line for line in regu lar_output.buflist]))
310
311 def test_no_tests_found(self):
312 details, err, _ = logging_run(['resources'], tests_included=True)
313 self.assertEqual(details.exit_code, test_run_results.NO_TESTS_EXIT_STATU S)
314 self.assertContains(err, 'No tests to run.\n')
315
316 def test_no_tests_found_2(self):
317 details, err, _ = logging_run(['foo'], tests_included=True)
318 self.assertEqual(details.exit_code, test_run_results.NO_TESTS_EXIT_STATU S)
319 self.assertContains(err, 'No tests to run.\n')
320
321 def test_no_tests_found_3(self):
322 details, err, _ = logging_run(['--run-chunk', '5:400', 'foo/bar.html'], tests_included=True)
323 self.assertEqual(details.exit_code, test_run_results.NO_TESTS_EXIT_STATU S)
324 self.assertContains(err, 'No tests to run.\n')
325
326 def test_natural_order(self):
327 tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'fai lures/expected/missing_text.html', 'passes/args.html']
328 tests_run = get_tests_run(['--order=natural'] + tests_to_run)
329 self.assertEqual(['failures/expected/missing_text.html', 'failures/expec ted/text.html', 'passes/args.html', 'passes/audio.html'], tests_run)
330
331 def test_natural_order_test_specified_multiple_times(self):
332 tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.h tml', 'passes/args.html']
333 tests_run = get_tests_run(['--order=natural'] + tests_to_run)
334 self.assertEqual(['passes/args.html', 'passes/args.html', 'passes/audio. html', 'passes/audio.html'], tests_run)
335
336 def test_random_order(self):
337 tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'fai lures/expected/missing_text.html', 'passes/args.html']
338 tests_run = get_tests_run(['--order=random'] + tests_to_run)
339 self.assertEqual(sorted(tests_to_run), sorted(tests_run))
340
341 def test_random_daily_seed_order(self):
342 tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'fai lures/expected/missing_text.html', 'passes/args.html']
343 tests_run = get_tests_run(['--order=random-seeded'] + tests_to_run)
344 self.assertEqual(sorted(tests_to_run), sorted(tests_run))
345
346 def test_random_order_test_specified_multiple_times(self):
347 tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.h tml', 'passes/args.html']
348 tests_run = get_tests_run(['--order=random'] + tests_to_run)
349 self.assertEqual(tests_run.count('passes/audio.html'), 2)
350 self.assertEqual(tests_run.count('passes/args.html'), 2)
351
352 def test_no_order(self):
353 tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'fai lures/expected/missing_text.html', 'passes/args.html']
354 tests_run = get_tests_run(['--order=none'] + tests_to_run)
355 self.assertEqual(tests_to_run, tests_run)
356
357 def test_no_order_test_specified_multiple_times(self):
358 tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.h tml', 'passes/args.html']
359 tests_run = get_tests_run(['--order=none'] + tests_to_run)
360 self.assertEqual(tests_to_run, tests_run)
361
362 def test_no_order_with_directory_entries_in_natural_order(self):
363 tests_to_run = ['http/tests/ssl', 'perf/foo', 'http/tests/passes']
364 tests_run = get_tests_run(['--order=none'] + tests_to_run)
365 self.assertEqual(tests_run, ['http/tests/ssl/text.html', 'perf/foo/test. html', 'http/tests/passes/image.html', 'http/tests/passes/text.html'])
366
367 def test_repeat_each(self):
368 tests_to_run = ['passes/image.html', 'passes/text.html']
369 tests_run = get_tests_run(['--repeat-each', '2'] + tests_to_run)
370 self.assertEqual(tests_run, ['passes/image.html', 'passes/image.html', ' passes/text.html', 'passes/text.html'])
371
372 def test_ignore_flag(self):
373 # Note that passes/image.html is expected to be run since we specified i t directly.
374 tests_run = get_tests_run(['-i', 'passes', 'passes/image.html'])
375 self.assertFalse('passes/text.html' in tests_run)
376 self.assertTrue('passes/image.html' in tests_run)
377
378 def test_skipped_flag(self):
379 tests_run = get_tests_run(['passes'])
380 self.assertFalse('passes/skipped/skip.html' in tests_run)
381 num_tests_run_by_default = len(tests_run)
382
383 # Check that nothing changes when we specify skipped=default.
384 self.assertEqual(len(get_tests_run(['--skipped=default', 'passes'])),
385 num_tests_run_by_default)
386
387 # Now check that we run one more test (the skipped one).
388 tests_run = get_tests_run(['--skipped=ignore', 'passes'])
389 self.assertTrue('passes/skipped/skip.html' in tests_run)
390 self.assertEqual(len(tests_run), num_tests_run_by_default + 1)
391
392 # Now check that we only run the skipped test.
393 self.assertEqual(get_tests_run(['--skipped=only', 'passes']), ['passes/s kipped/skip.html'])
394
395 # Now check that we don't run anything.
396 self.assertEqual(get_tests_run(['--skipped=always', 'passes/skipped/skip .html']), [])
397
398 def test_iterations(self):
399 tests_to_run = ['passes/image.html', 'passes/text.html']
400 tests_run = get_tests_run(['--iterations', '2'] + tests_to_run)
401 self.assertEqual(tests_run, ['passes/image.html', 'passes/text.html', 'p asses/image.html', 'passes/text.html'])
402
403 def test_repeat_each_iterations_num_tests(self):
404 # The total number of tests should be: number_of_tests *
405 # repeat_each * iterations
406 host = MockHost()
407 _, err, _ = logging_run(
408 ['--iterations', '2', '--repeat-each', '4', '--debug-rwt-logging', ' passes/text.html', 'failures/expected/text.html'],
409 tests_included=True, host=host)
410 self.assertContains(err, "All 16 tests ran as expected (8 passed, 8 didn 't).\n")
411
412 def test_run_chunk(self):
413 # Test that we actually select the right chunk
414 all_tests_run = get_tests_run(['passes', 'failures'])
415 chunk_tests_run = get_tests_run(['--run-chunk', '1:4', 'passes', 'failur es'])
416 self.assertEqual(all_tests_run[4:8], chunk_tests_run)
417
418 # Test that we wrap around if the number of tests is not evenly divisibl e by the chunk size
419 tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platfo rm_image.html', 'passes/text.html']
420 chunk_tests_run = get_tests_run(['--run-chunk', '1:3'] + tests_to_run)
421 self.assertEqual(['passes/text.html', 'passes/error.html', 'passes/image .html'], chunk_tests_run)
422
423 def test_run_part(self):
424 # Test that we actually select the right part
425 tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platfo rm_image.html', 'passes/text.html']
426 tests_run = get_tests_run(['--run-part', '1:2'] + tests_to_run)
427 self.assertEqual(['passes/error.html', 'passes/image.html'], tests_run)
428
429 # Test that we wrap around if the number of tests is not evenly divisibl e by the chunk size
430 # (here we end up with 3 parts, each with 2 tests, and we only have 4 te sts total, so the
431 # last part repeats the first two tests).
432 chunk_tests_run = get_tests_run(['--run-part', '3:3'] + tests_to_run)
433 self.assertEqual(['passes/error.html', 'passes/image.html'], chunk_tests _run)
434
435 def test_run_singly(self):
436 batch_tests_run = get_test_batches(['--run-singly'])
437 for batch in batch_tests_run:
438 self.assertEqual(len(batch), 1, '%s had too many tests' % ', '.join( batch))
439
440 def test_skip_failing_tests(self):
441 # This tests that we skip both known failing and known flaky tests. Beca use there are
442 # no known flaky tests in the default test_expectations, we add addition al expectations.
443 host = MockHost()
444 host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) passes/ima ge.html [ ImageOnlyFailure Pass ]\n')
445
446 batches = get_test_batches(['--skip-failing-tests', '--additional-expect ations', '/tmp/overrides.txt'], host=host)
447 has_passes_text = False
448 for batch in batches:
449 self.assertFalse('failures/expected/text.html' in batch)
450 self.assertFalse('passes/image.html' in batch)
451 has_passes_text = has_passes_text or ('passes/text.html' in batch)
452 self.assertTrue(has_passes_text)
453
454 def test_single_file(self):
455 tests_run = get_tests_run(['passes/text.html'])
456 self.assertEqual(tests_run, ['passes/text.html'])
457
458 def test_single_file_with_prefix(self):
459 tests_run = get_tests_run(['LayoutTests/passes/text.html'])
460 self.assertEqual(['passes/text.html'], tests_run)
461
462 def test_single_skipped_file(self):
463 tests_run = get_tests_run(['failures/expected/keybaord.html'])
464 self.assertEqual([], tests_run)
465
466 def test_stderr_is_saved(self):
467 host = MockHost()
468 self.assertTrue(passing_run(host=host))
469 self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-result s/passes/error-stderr.txt'),
470 'stuff going to stderr')
471
472 def test_test_list(self):
473 host = MockHost()
474 filename = '/tmp/foo.txt'
475 host.filesystem.write_text_file(filename, 'passes/text.html')
476 tests_run = get_tests_run(['--test-list=%s' % filename], host=host)
477 self.assertEqual(['passes/text.html'], tests_run)
478 host.filesystem.remove(filename)
479 details, err, user = logging_run(['--test-list=%s' % filename], tests_in cluded=True, host=host)
480 self.assertEqual(details.exit_code, test_run_results.NO_TESTS_EXIT_STATU S)
481 self.assertNotEmpty(err)
482
483 def test_test_list_with_prefix(self):
484 host = MockHost()
485 filename = '/tmp/foo.txt'
486 host.filesystem.write_text_file(filename, 'LayoutTests/passes/text.html' )
487 tests_run = get_tests_run(['--test-list=%s' % filename], host=host)
488 self.assertEqual(['passes/text.html'], tests_run)
489
490 def test_smoke_test(self):
491 host = MockHost()
492 smoke_test_filename = test.LAYOUT_TEST_DIR + '/SmokeTests'
493 host.filesystem.write_text_file(smoke_test_filename, 'passes/text.html\n ')
494
495 # Test the default smoke testing.
496 tests_run = get_tests_run(['--smoke'], host=host)
497 self.assertEqual(['passes/text.html'], tests_run)
498
499 # Test running the smoke tests plus some manually-specified tests.
500 tests_run = get_tests_run(['--smoke', 'passes/image.html'], host=host)
501 self.assertEqual(['passes/image.html', 'passes/text.html'], tests_run)
502
503 # Test running the smoke tests plus some manually-specified tests.
504 tests_run = get_tests_run(['--no-smoke', 'passes/image.html'], host=host )
505 self.assertEqual(['passes/image.html'], tests_run)
506
507 # Test that we don't run just the smoke tests by default on a normal tes t port.
508 tests_run = get_tests_run([], host=host)
509 self.assertNotEqual(['passes/text.html'], tests_run)
510
511 # Create a port that does run only the smoke tests by default, and verif y that works as expected.
512 port_obj = host.port_factory.get('test')
513 port_obj.default_smoke_test_only = lambda: True
514 tests_run = get_tests_run([], host=host, port_obj=port_obj)
515 self.assertEqual(['passes/text.html'], tests_run)
516
517 # Verify that --no-smoke continues to work on a smoke-by-default port.
518 tests_run = get_tests_run(['--no-smoke'], host=host, port_obj=port_obj)
519 self.assertNotEqual(['passes/text.html'], tests_run)
520
521 def test_missing_and_unexpected_results(self):
522 # Test that we update expectations in place. If the expectation
523 # is missing, update the expected generic location.
524 host = MockHost()
525 details, err, _ = logging_run(['--no-show-results', '--retry-failures',
526 'failures/expected/missing_image.html',
527 'failures/unexpected/missing_text.html',
528 'failures/unexpected/text-image-checksum.html'],
529 tests_included=True, host=host)
530 file_list = host.filesystem.written_files.keys()
531 self.assertEqual(details.exit_code, 2)
532 json_string = host.filesystem.read_text_file('/tmp/layout-test-results/f ull_results.json')
533 self.assertTrue(json_string.find('"text-image-checksum.html":{"expected" :"PASS","actual":"IMAGE+TEXT","is_unexpected":true') != -1)
534 self.assertTrue(json_string.find('"missing_text.html":{"expected":"PASS" ,"is_missing_text":true,"actual":"MISSING","is_unexpected":true') != -1)
535 self.assertTrue(json_string.find('"num_regressions":2') != -1)
536 self.assertTrue(json_string.find('"num_flaky":0') != -1)
537
538 def test_different_failure_on_retry(self):
539 # This tests that if a test fails two different ways -- both unexpected
540 # -- we treat it as a failure rather than a flaky result. We use the
541 # initial failure for simplicity and consistency w/ the flakiness
542 # dashboard, even if the second failure is worse.
543
544 details, err, _ = logging_run(['--retry-failures', 'failures/unexpected/ text_then_crash.html'], tests_included=True)
545 self.assertEqual(details.exit_code, 1)
546 self.assertEqual(details.summarized_failing_results['tests']['failures'] ['unexpected']['text_then_crash.html']['actual'],
547 'TEXT CRASH')
548
549 # If we get a test that fails two different ways -- but the second one i s expected --
550 # we should treat it as a flaky result and report the initial unexpected failure type
551 # to the dashboard. However, the test should be considered passing.
552 details, err, _ = logging_run(['--retry-failures', 'failures/expected/cr ash_then_text.html'], tests_included=True)
553 self.assertEqual(details.exit_code, 0)
554 self.assertEqual(details.summarized_failing_results['tests']['failures'] ['expected']['crash_then_text.html']['actual'],
555 'CRASH FAIL')
556
557 def test_pixel_test_directories(self):
558 host = MockHost()
559
560 """Both tests have failing checksum. We include only the first in pixel tests so only that should fail."""
561 args = ['--pixel-tests', '--retry-failures', '--pixel-test-directory', ' failures/unexpected/pixeldir',
562 'failures/unexpected/pixeldir/image_in_pixeldir.html',
563 'failures/unexpected/image_not_in_pixeldir.html']
564 details, err, _ = logging_run(extra_args=args, host=host, tests_included =True)
565
566 self.assertEqual(details.exit_code, 1)
567 expected_token = '"pixeldir":{"image_in_pixeldir.html":{"expected":"PASS ","actual":"IMAGE","is_unexpected":true'
568 json_string = host.filesystem.read_text_file('/tmp/layout-test-results/f ull_results.json')
569 self.assertTrue(json_string.find(expected_token) != -1)
570
571 def test_crash_with_stderr(self):
572 host = MockHost()
573 _, regular_output, _ = logging_run(['failures/unexpected/crash-with-stde rr.html'], tests_included=True, host=host)
574 self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results /full_results.json').find('{"crash-with-stderr.html":{"expected":"PASS","actual" :"CRASH","has_stderr":true,"is_unexpected":true') != -1)
575
576 def test_no_image_failure_with_image_diff(self):
577 host = MockHost()
578 _, regular_output, _ = logging_run(['failures/unexpected/checksum-with-m atching-image.html'], tests_included=True, host=host)
579 self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results /full_results.json').find('"num_regressions":0') != -1)
580
581 def test_exit_after_n_failures_upload(self):
582 host = MockHost()
583 details, regular_output, user = logging_run(
584 ['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'],
585 tests_included=True, host=host)
586
587 # By returning False, we know that the incremental results were generate d and then deleted.
588 self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/increm ental_results.json'))
589
590 self.assertEqual(details.exit_code, test_run_results.EARLY_EXIT_STATUS)
591
592 # This checks that passes/text.html is considered SKIPped.
593 self.assertTrue('"skipped":1' in host.filesystem.read_text_file('/tmp/la yout-test-results/full_results.json'))
594
595 # This checks that we told the user we bailed out.
596 self.assertTrue('Exiting early after 1 failures. 1 tests run.\n' in regu lar_output.getvalue())
597
598 # This checks that neither test ran as expected.
599 # FIXME: This log message is confusing; tests that were skipped should b e called out separately.
600 self.assertTrue('0 tests ran as expected, 2 didn\'t:\n' in regular_outpu t.getvalue())
601
602 def test_exit_after_n_failures(self):
603 # Unexpected failures should result in tests stopping.
604 tests_run = get_tests_run(['failures/unexpected/text-image-checksum.html ', 'passes/text.html', '--exit-after-n-failures', '1'])
605 self.assertEqual(['failures/unexpected/text-image-checksum.html'], tests _run)
606
607 # But we'll keep going for expected ones.
608 tests_run = get_tests_run(['failures/expected/text.html', 'passes/text.h tml', '--exit-after-n-failures', '1'])
609 self.assertEqual(['failures/expected/text.html', 'passes/text.html'], te sts_run)
610
611 def test_exit_after_n_crashes(self):
612 # Unexpected crashes should result in tests stopping.
613 tests_run = get_tests_run(['failures/unexpected/crash.html', 'passes/tex t.html', '--exit-after-n-crashes-or-timeouts', '1'])
614 self.assertEqual(['failures/unexpected/crash.html'], tests_run)
615
616 # Same with timeouts.
617 tests_run = get_tests_run(['failures/unexpected/timeout.html', 'passes/t ext.html', '--exit-after-n-crashes-or-timeouts', '1'])
618 self.assertEqual(['failures/unexpected/timeout.html'], tests_run)
619
620 # But we'll keep going for expected ones.
621 tests_run = get_tests_run(['failures/expected/crash.html', 'passes/text. html', '--exit-after-n-crashes-or-timeouts', '1'])
622 self.assertEqual(['failures/expected/crash.html', 'passes/text.html'], t ests_run)
623
624 def test_results_directory_absolute(self):
625 # We run a configuration that should fail, to generate output, then
626 # look for what the output results url was.
627
628 host = MockHost()
629 with host.filesystem.mkdtemp() as tmpdir:
630 _, _, user = logging_run(['--results-directory=' + str(tmpdir)], tes ts_included=True, host=host)
631 self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platfor m, host.filesystem.join(tmpdir, 'results.html'))])
632
633 def test_results_directory_default(self):
634 # We run a configuration that should fail, to generate output, then
635 # look for what the output results url was.
636
637 # This is the default location.
638 _, _, user = logging_run(tests_included=True)
639 self.assertEqual(user.opened_urls, [path.abspath_to_uri(MockHost().platf orm, '/tmp/layout-test-results/results.html')])
640
641 def test_results_directory_relative(self):
642 # We run a configuration that should fail, to generate output, then
643 # look for what the output results url was.
644 host = MockHost()
645 host.filesystem.maybe_make_directory('/tmp/cwd')
646 host.filesystem.chdir('/tmp/cwd')
647 _, _, user = logging_run(['--results-directory=foo'], tests_included=Tru e, host=host)
648 self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, ' /tmp/cwd/foo/results.html')])
649
650 def test_retrying_default_value(self):
651 host = MockHost()
652 details, err, _ = logging_run(['--debug-rwt-logging', 'failures/unexpect ed/text-image-checksum.html'], tests_included=True, host=host)
653 self.assertEqual(details.exit_code, 1)
654 self.assertFalse('Retrying' in err.getvalue())
655
656 host = MockHost()
657 details, err, _ = logging_run(['--debug-rwt-logging', 'failures/unexpect ed'], tests_included=True, host=host)
658 self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES - 7) # FIX ME: This should be a constant in test.py .
659 self.assertTrue('Retrying' in err.getvalue())
660
661 def test_retrying_default_value_test_list(self):
662 host = MockHost()
663 filename = '/tmp/foo.txt'
664 host.filesystem.write_text_file(filename, 'failures/unexpected/text-imag e-checksum.html\nfailures/unexpected/crash.html')
665 details, err, _ = logging_run(['--debug-rwt-logging', '--test-list=%s' % filename], tests_included=True, host=host)
666 self.assertEqual(details.exit_code, 2)
667 self.assertFalse('Retrying' in err.getvalue())
668
669 host = MockHost()
670 filename = '/tmp/foo.txt'
671 host.filesystem.write_text_file(filename, 'failures')
672 details, err, _ = logging_run(['--debug-rwt-logging', '--test-list=%s' % filename], tests_included=True, host=host)
673 self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES - 7)
674 self.assertTrue('Retrying' in err.getvalue())
675
676 def test_retrying_and_flaky_tests(self):
677 host = MockHost()
678 details, err, _ = logging_run(['--debug-rwt-logging', '--retry-failures' , 'failures/flaky'], tests_included=True, host=host)
679 self.assertEqual(details.exit_code, 0)
680 self.assertTrue('Retrying' in err.getvalue())
681 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failure s/flaky/text-actual.txt'))
682 self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/retrie s/failures/flaky/text-actual.txt'))
683 self.assertEqual(len(host.user.opened_urls), 0)
684
685 # Now we test that --clobber-old-results does remove the old entries and the old retries,
686 # and that we don't retry again.
687 host = MockHost()
688 details, err, _ = logging_run(['--no-retry-failures', '--clobber-old-res ults', 'failures/flaky'], tests_included=True, host=host)
689 self.assertEqual(details.exit_code, 1)
690 self.assertTrue('Clobbering old results' in err.getvalue())
691 self.assertTrue('flaky/text.html' in err.getvalue())
692 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failure s/flaky/text-actual.txt'))
693 self.assertFalse(host.filesystem.exists('retries'))
694 self.assertEqual(len(host.user.opened_urls), 1)
695
696 def test_retrying_crashed_tests(self):
697 host = MockHost()
698 details, err, _ = logging_run(['--retry-failures', 'failures/unexpected/ crash.html'], tests_included=True, host=host)
699 self.assertEqual(details.exit_code, 1)
700 self.assertTrue('Retrying' in err.getvalue())
701
702 def test_retrying_leak_tests(self):
703 host = MockHost()
704 details, err, _ = logging_run(['--retry-failures', 'failures/unexpected/ leak.html'], tests_included=True, host=host)
705 self.assertEqual(details.exit_code, 1)
706 self.assertTrue('Retrying' in err.getvalue())
707
708 def test_retrying_force_pixel_tests(self):
709 host = MockHost()
710 details, err, _ = logging_run(['--no-pixel-tests', '--retry-failures', ' failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
711 self.assertEqual(details.exit_code, 1)
712 self.assertTrue('Retrying' in err.getvalue())
713 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failure s/unexpected/text-image-checksum-actual.txt'))
714 self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/failur es/unexpected/text-image-checksum-actual.png'))
715 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries /failures/unexpected/text-image-checksum-actual.txt'))
716 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries /failures/unexpected/text-image-checksum-actual.png'))
717 json_string = host.filesystem.read_text_file('/tmp/layout-test-results/f ull_results.json')
718 json = parse_full_results(json_string)
719 self.assertEqual(json["tests"]["failures"]["unexpected"]["text-image-che cksum.html"],
720 {"expected": "PASS", "actual": "TEXT IMAGE+TEXT", "is_unexpected": T rue})
721 self.assertFalse(json["pixel_tests_enabled"])
722 self.assertEqual(details.enabled_pixel_tests_in_retry, True)
723
724 def test_retrying_uses_retries_directory(self):
725 host = MockHost()
726 details, err, _ = logging_run(['--debug-rwt-logging', '--retry-failures' , 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=hos t)
727 self.assertEqual(details.exit_code, 1)
728 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failure s/unexpected/text-image-checksum-actual.txt'))
729 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries /failures/unexpected/text-image-checksum-actual.txt'))
730
731 def test_run_order__inline(self):
732 # These next tests test that we run the tests in ascending alphabetical
733 # order per directory. HTTP tests are sharded separately from other test s,
734 # so we have to test both.
735 tests_run = get_tests_run(['-i', 'passes/passes', 'passes'])
736 self.assertEqual(tests_run, sorted(tests_run))
737
738 tests_run = get_tests_run(['http/tests/passes'])
739 self.assertEqual(tests_run, sorted(tests_run))
740
741 def test_virtual(self):
742 self.assertTrue(passing_run(['passes/text.html', 'passes/args.html',
743 'virtual/passes/text.html', 'virtual/passes /args.html']))
744
745 def test_reftest_run(self):
746 tests_run = get_tests_run(['passes/reftest.html'])
747 self.assertEqual(['passes/reftest.html'], tests_run)
748
749 def test_reftest_run_reftests_if_pixel_tests_are_disabled(self):
750 tests_run = get_tests_run(['--no-pixel-tests', 'passes/reftest.html'])
751 self.assertEqual(['passes/reftest.html'], tests_run)
752
753 def test_reftest_expected_html_should_be_ignored(self):
754 tests_run = get_tests_run(['passes/reftest-expected.html'])
755 self.assertEqual([], tests_run)
756
757 def test_reftest_driver_should_run_expected_html(self):
758 tests_run = get_test_results(['passes/reftest.html'])
759 self.assertEqual(tests_run[0].references, ['passes/reftest-expected.html '])
760
761 def test_reftest_driver_should_run_expected_mismatch_html(self):
762 tests_run = get_test_results(['passes/mismatch.html'])
763 self.assertEqual(tests_run[0].references, ['passes/mismatch-expected-mis match.html'])
764
765 def test_reftest_should_not_use_naming_convention_if_not_listed_in_reftestli st(self):
766 host = MockHost()
767 _, err, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_in cluded=True, host=host)
768 results = parse_full_results(host.filesystem.read_text_file('/tmp/layout -test-results/full_results.json'))
769
770 self.assertEqual(results["tests"]["reftests"]["foo"]["unlistedtest.html" ]["actual"], "MISSING"),
771 self.assertEqual(results["num_regressions"], 5)
772 self.assertEqual(results["num_flaky"], 0)
773
774 def test_reftest_crash(self):
775 test_results = get_test_results(['failures/unexpected/crash-reftest.html '])
776 # The list of references should be empty since the test crashed and we d idn't run any references.
777 self.assertEqual(test_results[0].references, [])
778
779 def test_reftest_with_virtual_reference(self):
780 _, err, _ = logging_run(['--details', 'virtual/passes/reftest.html'], te sts_included=True)
781 self.assertTrue('ref: virtual/passes/reftest-expected.html' in err.getva lue())
782
783 def test_additional_platform_directory(self):
784 self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/fo o']))
785 self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/.. /foo']))
786 self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/fo o', '--additional-platform-directory', '/tmp/bar']))
787 self.assertTrue(passing_run(['--additional-platform-directory', 'foo']))
788
789 def test_additional_expectations(self):
790 host = MockHost()
791 host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) failures/u nexpected/mismatch.html [ ImageOnlyFailure ]\n')
792 self.assertTrue(passing_run(['--additional-expectations', '/tmp/override s.txt', 'failures/unexpected/mismatch.html'],
793 tests_included=True, host=host))
794
795 @staticmethod
796 def has_test_of_type(tests, type):
797 return [test for test in tests if type in test]
798
799 def test_platform_directories_ignored_when_searching_for_tests(self):
800 tests_run = get_tests_run(['--platform', 'test-mac-leopard'])
801 self.assertFalse('platform/test-mac-leopard/http/test.html' in tests_run )
802 self.assertFalse('platform/test-win-win7/http/test.html' in tests_run)
803
804 def test_platform_directories_not_searched_for_additional_tests(self):
805 tests_run = get_tests_run(['--platform', 'test-mac-leopard', 'http'])
806 self.assertFalse('platform/test-mac-leopard/http/test.html' in tests_run )
807 self.assertFalse('platform/test-win-win7/http/test.html' in tests_run)
808
809 def test_output_diffs(self):
810 # Test to ensure that we don't generate -wdiff.html or -pretty.html if w diff and PrettyPatch
811 # aren't available.
812 host = MockHost()
813 _, err, _ = logging_run(['--pixel-tests', 'failures/unexpected/text-imag e-checksum.html'], tests_included=True, host=host)
814 written_files = host.filesystem.written_files
815 self.assertTrue(any(path.endswith('-diff.txt') for path in written_files .keys()))
816 self.assertFalse(any(path.endswith('-wdiff.html') for path in written_fi les.keys()))
817 self.assertFalse(any(path.endswith('-pretty-diff.html') for path in writ ten_files.keys()))
818
819 full_results_text = host.filesystem.read_text_file('/tmp/layout-test-res ults/full_results.json')
820 full_results = json.loads(full_results_text.replace("ADD_RESULTS(", ""). replace(");", ""))
821 self.assertEqual(full_results['has_wdiff'], False)
822 self.assertEqual(full_results['has_pretty_patch'], False)
823
824 def test_unsupported_platform(self):
825 stdout = StringIO.StringIO()
826 stderr = StringIO.StringIO()
827 res = run_webkit_tests.main(['--platform', 'foo'], stdout, stderr)
828
829 self.assertEqual(res, test_run_results.UNEXPECTED_ERROR_EXIT_STATUS)
830 self.assertEqual(stdout.getvalue(), '')
831 self.assertTrue('unsupported platform' in stderr.getvalue())
832
833 def test_build_check(self):
834 # By using a port_name for a different platform than the one we're runni ng on, the build check should always fail.
835 if sys.platform == 'darwin':
836 port_name = 'linux-x86'
837 else:
838 port_name = 'mac-lion'
839 out = StringIO.StringIO()
840 err = StringIO.StringIO()
841 self.assertEqual(run_webkit_tests.main(['--platform', port_name, 'fast/h arness/results.html'], out, err), test_run_results.UNEXPECTED_ERROR_EXIT_STATUS)
842
843 def test_verbose_in_child_processes(self):
844 # When we actually run multiple processes, we may have to reconfigure lo gging in the
845 # child process (e.g., on win32) and we need to make sure that works and we still
846 # see the verbose log output. However, we can't use logging_run() becaus e using
847 # outputcapture to capture stdout and stderr latter results in a nonpick lable host.
848
849 # Test is flaky on Windows: https://bugs.webkit.org/show_bug.cgi?id=9855 9
850 if not self.should_test_processes:
851 return
852
853 options, parsed_args = parse_args(['--verbose', '--fully-parallel', '--c hild-processes', '2', 'passes/text.html', 'passes/image.html'], tests_included=T rue, print_nothing=False)
854 host = MockHost()
855 port_obj = host.port_factory.get(port_name=options.platform, options=opt ions)
856 logging_stream = StringIO.StringIO()
857 run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logg ing_stream)
858 self.assertTrue('text.html passed' in logging_stream.getvalue())
859 self.assertTrue('image.html passed' in logging_stream.getvalue())
860
861 def disabled_test_driver_logging(self):
862 # FIXME: Figure out how to either use a mock-test port to
863 # get output or mack mock ports work again.
864 host = Host()
865 _, err, _ = logging_run(['--platform', 'mock-win', '--driver-logging', ' fast/harness/results.html'],
866 tests_included=True, host=host)
867 self.assertTrue('OUT:' in err.getvalue())
868
869 def test_write_full_results_to(self):
870 host = MockHost()
871 details, _, _ = logging_run(['--write-full-results-to', '/tmp/full_resul ts.json'], host=host)
872 self.assertEqual(details.exit_code, 0)
873 self.assertTrue(host.filesystem.exists('/tmp/full_results.json'))
874
875
876 class EndToEndTest(unittest.TestCase):
877 def test_reftest_with_two_notrefs(self):
878 # Test that we update expectations in place. If the expectation
879 # is missing, update the expected generic location.
880 host = MockHost()
881 _, _, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_incl uded=True, host=host)
882 file_list = host.filesystem.written_files.keys()
883
884 json_string = host.filesystem.read_text_file('/tmp/layout-test-results/f ailing_results.json')
885 json = parse_full_results(json_string)
886 self.assertTrue("multiple-match-success.html" not in json["tests"]["reft ests"]["foo"])
887 self.assertTrue("multiple-mismatch-success.html" not in json["tests"]["r eftests"]["foo"])
888 self.assertTrue("multiple-both-success.html" not in json["tests"]["refte sts"]["foo"])
889
890 self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-match-failur e.html"],
891 {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["=="], "is_ unexpected": True})
892 self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-mismatch-fai lure.html"],
893 {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["!="], "is_ unexpected": True})
894 self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-both-failure .html"],
895 {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["==", "!="] , "is_unexpected": True})
896
897
898 class RebaselineTest(unittest.TestCase, StreamTestingMixin):
899 def assertBaselines(self, file_list, file, extensions, err):
900 "assert that the file_list contains the baselines."""
901 for ext in extensions:
902 baseline = file + "-expected" + ext
903 baseline_msg = 'Writing new expected result "%s"\n' % baseline
904 self.assertTrue(any(f.find(baseline) != -1 for f in file_list))
905 self.assertContains(err, baseline_msg)
906
907 # FIXME: Add tests to ensure that we're *not* writing baselines when we're n ot
908 # supposed to be.
909
910 def test_reset_results(self):
911 # Test that we update expectations in place. If the expectation
912 # is missing, update the expected generic location.
913 host = MockHost()
914 details, err, _ = logging_run(
915 ['--pixel-tests', '--reset-results', 'passes/image.html', 'failures/ expected/missing_image.html'],
916 tests_included=True, host=host, new_results=True)
917 file_list = host.filesystem.written_files.keys()
918 self.assertEqual(details.exit_code, 0)
919 self.assertEqual(len(file_list), 8)
920 self.assertBaselines(file_list, "passes/image", [".txt", ".png"], err)
921 self.assertBaselines(file_list, "failures/expected/missing_image", [".tx t", ".png"], err)
922
923 def test_missing_results(self):
924 # Test that we update expectations in place. If the expectation
925 # is missing, update the expected generic location.
926 host = MockHost()
927 details, err, _ = logging_run(['--no-show-results',
928 'failures/unexpected/missing_text.html',
929 'failures/unexpected/missing_image.html',
930 'failures/unexpected/missing_render_tree_dump.html'],
931 tests_included=True, host=host, new_results=True)
932 file_list = host.filesystem.written_files.keys()
933 self.assertEqual(details.exit_code, 3)
934 self.assertEqual(len(file_list), 10)
935 self.assertBaselines(file_list, "failures/unexpected/missing_text", [".t xt"], err)
936 self.assertBaselines(file_list, "platform/test/failures/unexpected/missi ng_image", [".png"], err)
937 self.assertBaselines(file_list, "platform/test/failures/unexpected/missi ng_render_tree_dump", [".txt"], err)
938
939 def test_missing_results_not_added_if_expected_missing(self):
940 # Test that we update expectations in place. If the expectation
941 # is missing, update the expected generic location.
942 host = MockHost()
943 options, parsed_args = run_webkit_tests.parse_args([])
944
945 port = test.TestPort(host, options=options)
946 host.filesystem.write_text_file(port.path_to_generic_test_expectations_f ile(), """
947 Bug(foo) failures/unexpected/missing_text.html [ Missing ]
948 Bug(foo) failures/unexpected/missing_image.html [ NeedsRebaseline ]
949 Bug(foo) failures/unexpected/missing_audio.html [ NeedsManualRebaseline ]
950 Bug(foo) failures/unexpected/missing_render_tree_dump.html [ Missing ]
951 """)
952 details, err, _ = logging_run(['--no-show-results',
953 'failures/unexpected/missing_text.html',
954 'failures/unexpected/missing_image.html',
955 'failures/unexpected/missing_audio.html',
956 'failures/unexpected/missing_render_tree_dump.html'],
957 tests_included=True, host=host, new_results=True, port_obj=port)
958 file_list = host.filesystem.written_files.keys()
959 self.assertEqual(details.exit_code, 0)
960 self.assertEqual(len(file_list), 7)
961 self.assertFalse(any('failures/unexpected/missing_text-expected' in file for file in file_list))
962 self.assertFalse(any('failures/unexpected/missing_image-expected' in fil e for file in file_list))
963 self.assertFalse(any('failures/unexpected/missing_render_tree_dump-expec ted' in file for file in file_list))
964
965 def test_missing_results_not_added_if_expected_missing_and_reset_results(sel f):
966 # Test that we update expectations in place. If the expectation
967 # is missing, update the expected generic location.
968 host = MockHost()
969 options, parsed_args = run_webkit_tests.parse_args(['--pixel-tests', '-- reset-results'])
970
971 port = test.TestPort(host, options=options)
972 host.filesystem.write_text_file(port.path_to_generic_test_expectations_f ile(), """
973 Bug(foo) failures/unexpected/missing_text.html [ Missing ]
974 Bug(foo) failures/unexpected/missing_image.html [ NeedsRebaseline ]
975 Bug(foo) failures/unexpected/missing_audio.html [ NeedsManualRebaseline ]
976 Bug(foo) failures/unexpected/missing_render_tree_dump.html [ Missing ]
977 """)
978 details, err, _ = logging_run(['--pixel-tests', '--reset-results',
979 'failures/unexpected/missing_text.html',
980 'failures/unexpected/missing_image.html',
981 'failures/unexpected/missing_audio.html',
982 'failures/unexpected/missing_render_tree_dump.html'],
983 tests_included=True, host=host, new_results=True, port_obj=port)
984 file_list = host.filesystem.written_files.keys()
985 self.assertEqual(details.exit_code, 0)
986 self.assertEqual(len(file_list), 11)
987 self.assertBaselines(file_list, "failures/unexpected/missing_text", [".t xt"], err)
988 self.assertBaselines(file_list, "failures/unexpected/missing_image", [". png"], err)
989 self.assertBaselines(file_list, "failures/unexpected/missing_render_tree _dump", [".txt"], err)
990
991 def test_new_baseline(self):
992 # Test that we update the platform expectations in the version-specific directories
993 # for both existing and new baselines.
994 host = MockHost()
995 details, err, _ = logging_run(
996 ['--pixel-tests', '--new-baseline', 'passes/image.html', 'failures/e xpected/missing_image.html'],
997 tests_included=True, host=host, new_results=True)
998 file_list = host.filesystem.written_files.keys()
999 self.assertEqual(details.exit_code, 0)
1000 self.assertEqual(len(file_list), 8)
1001 self.assertBaselines(file_list,
1002 "platform/test-mac-leopard/passes/image", [".txt", ".png"], err)
1003 self.assertBaselines(file_list,
1004 "platform/test-mac-leopard/failures/expected/missing_image", [".txt" , ".png"], err)
1005
1006
1007 class PortTest(unittest.TestCase):
1008 def assert_mock_port_works(self, port_name, args=[]):
1009 self.assertTrue(passing_run(args + ['--platform', 'mock-' + port_name, ' fast/harness/results.html'], tests_included=True, host=Host()))
1010
1011 def disabled_test_mac_lion(self):
1012 self.assert_mock_port_works('mac-lion')
1013
1014
1015 class MainTest(unittest.TestCase):
1016 def test_exception_handling(self):
1017 orig_run_fn = run_webkit_tests.run
1018
1019 # unused args pylint: disable=W0613
1020 def interrupting_run(port, options, args, stderr):
1021 raise KeyboardInterrupt
1022
1023 def successful_run(port, options, args, stderr):
1024
1025 class FakeRunDetails(object):
1026 exit_code = test_run_results.UNEXPECTED_ERROR_EXIT_STATUS
1027
1028 return FakeRunDetails()
1029
1030 def exception_raising_run(port, options, args, stderr):
1031 assert False
1032
1033 stdout = StringIO.StringIO()
1034 stderr = StringIO.StringIO()
1035 try:
1036 run_webkit_tests.run = interrupting_run
1037 res = run_webkit_tests.main([], stdout, stderr)
1038 self.assertEqual(res, test_run_results.INTERRUPTED_EXIT_STATUS)
1039
1040 run_webkit_tests.run = successful_run
1041 res = run_webkit_tests.main(['--platform', 'test'], stdout, stderr)
1042 self.assertEqual(res, test_run_results.UNEXPECTED_ERROR_EXIT_STATUS)
1043
1044 run_webkit_tests.run = exception_raising_run
1045 res = run_webkit_tests.main([], stdout, stderr)
1046 self.assertEqual(res, test_run_results.UNEXPECTED_ERROR_EXIT_STATUS)
1047 finally:
1048 run_webkit_tests.run = orig_run_fn
1049
1050 def test_buildbot_results_are_printed_on_early_exit(self):
1051 # unused args pylint: disable=W0613
1052 stdout = StringIO.StringIO()
1053 stderr = StringIO.StringIO()
1054 res = run_webkit_tests.main(['--platform', 'test', '--exit-after-n-failu res', '1',
1055 'failures/unexpected/missing_text.html',
1056 'failures/unexpected/missing_image.html'],
1057 stdout, stderr)
1058 self.assertEqual(res, test_run_results.EARLY_EXIT_STATUS)
1059 self.assertEqual(stdout.getvalue(),
1060 ('\n'
1061 'Regressions: Unexpected missing results (1)\n'
1062 ' failures/unexpected/missing_image.html [ Missing ]\n\n'))
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698