Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(606)

Side by Side Diff: Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py

Issue 546133003: Reformat webkitpy.layout_tests w/ format-webkitpy. (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Created 6 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 # Copyright (C) 2010 Google Inc. All rights reserved. 1 # Copyright (C) 2010 Google Inc. All rights reserved.
2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Sze ged 2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Sze ged
3 # Copyright (C) 2011 Apple Inc. All rights reserved. 3 # Copyright (C) 2011 Apple Inc. All rights reserved.
4 # 4 #
5 # Redistribution and use in source and binary forms, with or without 5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are 6 # modification, are permitted provided that the following conditions are
7 # met: 7 # met:
8 # 8 #
9 # * Redistributions of source code must retain the above copyright 9 # * Redistributions of source code must retain the above copyright
10 # notice, this list of conditions and the following disclaimer. 10 # notice, this list of conditions and the following disclaimer.
(...skipping 143 matching lines...) Expand 10 before | Expand all | Expand 10 after
154 all_results = [] 154 all_results = []
155 if run_details.initial_results: 155 if run_details.initial_results:
156 all_results.extend(run_details.initial_results.all_results) 156 all_results.extend(run_details.initial_results.all_results)
157 157
158 if run_details.retry_results: 158 if run_details.retry_results:
159 all_results.extend(run_details.retry_results.all_results) 159 all_results.extend(run_details.retry_results.all_results)
160 return all_results 160 return all_results
161 161
162 162
163 def parse_full_results(full_results_text): 163 def parse_full_results(full_results_text):
164 json_to_eval = full_results_text.replace("ADD_RESULTS(", "").replace(");", " ") 164 json_to_eval = full_results_text.replace('ADD_RESULTS(', '').replace(');', ' ')
165 compressed_results = json.loads(json_to_eval) 165 compressed_results = json.loads(json_to_eval)
166 return compressed_results 166 return compressed_results
167 167
168 168
169 class StreamTestingMixin(object): 169 class StreamTestingMixin(object):
170
170 def assertContains(self, stream, string): 171 def assertContains(self, stream, string):
171 self.assertTrue(string in stream.getvalue()) 172 self.assertTrue(string in stream.getvalue())
172 173
173 def assertEmpty(self, stream): 174 def assertEmpty(self, stream):
174 self.assertFalse(stream.getvalue()) 175 self.assertFalse(stream.getvalue())
175 176
176 def assertNotEmpty(self, stream): 177 def assertNotEmpty(self, stream):
177 self.assertTrue(stream.getvalue()) 178 self.assertTrue(stream.getvalue())
178 179
179 180
180 class RunTest(unittest.TestCase, StreamTestingMixin): 181 class RunTest(unittest.TestCase, StreamTestingMixin):
182
181 def setUp(self): 183 def setUp(self):
182 # A real PlatformInfo object is used here instead of a 184 # A real PlatformInfo object is used here instead of a
183 # MockPlatformInfo because we need to actually check for 185 # MockPlatformInfo because we need to actually check for
184 # Windows and Mac to skip some tests. 186 # Windows and Mac to skip some tests.
185 self._platform = SystemHost().platform 187 self._platform = SystemHost().platform
186 188
187 # FIXME: Remove this when we fix test-webkitpy to work 189 # FIXME: Remove this when we fix test-webkitpy to work
188 # properly on cygwin (bug 63846). 190 # properly on cygwin (bug 63846).
189 self.should_test_processes = not self._platform.is_win() 191 self.should_test_processes = not self._platform.is_win()
190 192
191 def test_basic(self): 193 def test_basic(self):
192 options, args = parse_args(tests_included=True) 194 options, args = parse_args(tests_included=True)
193 logging_stream = StringIO.StringIO() 195 logging_stream = StringIO.StringIO()
194 host = MockHost() 196 host = MockHost()
195 port_obj = host.port_factory.get(options.platform, options) 197 port_obj = host.port_factory.get(options.platform, options)
196 details = run_webkit_tests.run(port_obj, options, args, logging_stream) 198 details = run_webkit_tests.run(port_obj, options, args, logging_stream)
197 199
198 # These numbers will need to be updated whenever we add new tests. 200 # These numbers will need to be updated whenever we add new tests.
199 self.assertEqual(details.initial_results.total, test.TOTAL_TESTS) 201 self.assertEqual(details.initial_results.total, test.TOTAL_TESTS)
200 self.assertEqual(details.initial_results.expected_skips, test.TOTAL_SKIP S) 202 self.assertEqual(details.initial_results.expected_skips, test.TOTAL_SKIP S)
201 self.assertEqual(len(details.initial_results.unexpected_results_by_name) , test.UNEXPECTED_PASSES + test.UNEXPECTED_FAILURES) 203 self.assertEqual(len(details.initial_results.unexpected_results_by_name) , test.UNEXPECTED_PASSES + test.UNEXPECTED_FAILURES)
202 self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES) 204 self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES)
203 self.assertEqual(details.retry_results.total, test.UNEXPECTED_FAILURES) 205 self.assertEqual(details.retry_results.total, test.UNEXPECTED_FAILURES)
204 206
205 expected_tests = details.initial_results.total - details.initial_results .expected_skips - len(details.initial_results.unexpected_results_by_name) 207 expected_tests = details.initial_results.total - details.initial_results .expected_skips - \
208 len(details.initial_results.unexpected_results_by_name)
206 expected_summary_str = '' 209 expected_summary_str = ''
207 if details.initial_results.expected_failures > 0: 210 if details.initial_results.expected_failures > 0:
208 expected_summary_str = " (%d passed, %d didn't)" % (expected_tests - details.initial_results.expected_failures, details.initial_results.expected_fai lures) 211 expected_summary_str = " (%d passed, %d didn't)" % (
212 expected_tests - details.initial_results.expected_failures, deta ils.initial_results.expected_failures)
209 one_line_summary = "%d tests ran as expected%s, %d didn't:\n" % ( 213 one_line_summary = "%d tests ran as expected%s, %d didn't:\n" % (
210 expected_tests, 214 expected_tests,
211 expected_summary_str, 215 expected_summary_str,
212 len(details.initial_results.unexpected_results_by_name)) 216 len(details.initial_results.unexpected_results_by_name))
213 self.assertTrue(one_line_summary in logging_stream.buflist) 217 self.assertTrue(one_line_summary in logging_stream.buflist)
214 218
215 # Ensure the results were summarized properly. 219 # Ensure the results were summarized properly.
216 self.assertEqual(details.summarized_failing_results['num_regressions'], details.exit_code) 220 self.assertEqual(details.summarized_failing_results['num_regressions'], details.exit_code)
217 221
218 # Ensure the results were written out and displayed. 222 # Ensure the results were written out and displayed.
219 failing_results_text = host.filesystem.read_text_file('/tmp/layout-test- results/failing_results.json') 223 failing_results_text = host.filesystem.read_text_file('/tmp/layout-test- results/failing_results.json')
220 json_to_eval = failing_results_text.replace("ADD_RESULTS(", "").replace( ");", "") 224 json_to_eval = failing_results_text.replace('ADD_RESULTS(', '').replace( ');', '')
221 self.assertEqual(json.loads(json_to_eval), details.summarized_failing_re sults) 225 self.assertEqual(json.loads(json_to_eval), details.summarized_failing_re sults)
222 226
223 full_results_text = host.filesystem.read_text_file('/tmp/layout-test-res ults/full_results.json') 227 full_results_text = host.filesystem.read_text_file('/tmp/layout-test-res ults/full_results.json')
224 self.assertEqual(json.loads(full_results_text), details.summarized_full_ results) 228 self.assertEqual(json.loads(full_results_text), details.summarized_full_ results)
225 229
226 self.assertEqual(host.user.opened_urls, [path.abspath_to_uri(MockHost(). platform, '/tmp/layout-test-results/results.html')]) 230 self.assertEqual(host.user.opened_urls, [path.abspath_to_uri(MockHost(). platform, '/tmp/layout-test-results/results.html')])
227 231
228 def test_batch_size(self): 232 def test_batch_size(self):
229 batch_tests_run = get_test_batches(['--batch-size', '2']) 233 batch_tests_run = get_test_batches(['--batch-size', '2'])
230 for batch in batch_tests_run: 234 for batch in batch_tests_run:
231 self.assertTrue(len(batch) <= 2, '%s had too many tests' % ', '.join (batch)) 235 self.assertTrue(len(batch) <= 2, '%s had too many tests' % ', '.join (batch))
232 236
233 def test_max_locked_shards(self): 237 def test_max_locked_shards(self):
234 # Tests for the default of using one locked shard even in the case of mo re than one child process. 238 # Tests for the default of using one locked shard even in the case of mo re than one child process.
235 if not self.should_test_processes: 239 if not self.should_test_processes:
236 return 240 return
237 save_env_webkit_test_max_locked_shards = None 241 save_env_webkit_test_max_locked_shards = None
238 if "WEBKIT_TEST_MAX_LOCKED_SHARDS" in os.environ: 242 if 'WEBKIT_TEST_MAX_LOCKED_SHARDS' in os.environ:
239 save_env_webkit_test_max_locked_shards = os.environ["WEBKIT_TEST_MAX _LOCKED_SHARDS"] 243 save_env_webkit_test_max_locked_shards = os.environ['WEBKIT_TEST_MAX _LOCKED_SHARDS']
240 del os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"] 244 del os.environ['WEBKIT_TEST_MAX_LOCKED_SHARDS']
241 _, regular_output, _ = logging_run(['--debug-rwt-logging', '--child-proc esses', '2'], shared_port=False) 245 _, regular_output, _ = logging_run(['--debug-rwt-logging', '--child-proc esses', '2'], shared_port=False)
242 try: 246 try:
243 self.assertTrue(any(['1 locked' in line for line in regular_output.b uflist])) 247 self.assertTrue(any(['1 locked' in line for line in regular_output.b uflist]))
244 finally: 248 finally:
245 if save_env_webkit_test_max_locked_shards: 249 if save_env_webkit_test_max_locked_shards:
246 os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"] = save_env_webkit_te st_max_locked_shards 250 os.environ['WEBKIT_TEST_MAX_LOCKED_SHARDS'] = save_env_webkit_te st_max_locked_shards
247 251
248 def test_child_processes_2(self): 252 def test_child_processes_2(self):
249 if self.should_test_processes: 253 if self.should_test_processes:
250 _, regular_output, _ = logging_run( 254 _, regular_output, _ = logging_run(
251 ['--debug-rwt-logging', '--child-processes', '2'], shared_port=F alse) 255 ['--debug-rwt-logging', '--child-processes', '2'], shared_port=F alse)
252 self.assertTrue(any(['Running 2 ' in line for line in regular_output .buflist])) 256 self.assertTrue(any(['Running 2 ' in line for line in regular_output .buflist]))
253 257
254 def test_child_processes_min(self): 258 def test_child_processes_min(self):
255 if self.should_test_processes: 259 if self.should_test_processes:
256 _, regular_output, _ = logging_run( 260 _, regular_output, _ = logging_run(
(...skipping 15 matching lines...) Expand all
272 # Exceptions raised by a worker are treated differently depending on 276 # Exceptions raised by a worker are treated differently depending on
273 # whether they are in-process or out. inline exceptions work as normal, 277 # whether they are in-process or out. inline exceptions work as normal,
274 # which allows us to get the full stack trace and traceback from the 278 # which allows us to get the full stack trace and traceback from the
275 # worker. The downside to this is that it could be any error, but this 279 # worker. The downside to this is that it could be any error, but this
276 # is actually useful in testing. 280 # is actually useful in testing.
277 # 281 #
278 # Exceptions raised in a separate process are re-packaged into 282 # Exceptions raised in a separate process are re-packaged into
279 # WorkerExceptions (a subclass of BaseException), which have a string ca pture of the stack which can 283 # WorkerExceptions (a subclass of BaseException), which have a string ca pture of the stack which can
280 # be printed, but don't display properly in the unit test exception hand lers. 284 # be printed, but don't display properly in the unit test exception hand lers.
281 self.assertRaises(BaseException, logging_run, 285 self.assertRaises(BaseException, logging_run,
282 ['failures/expected/exception.html', '--child-processes', '1'], test s_included=True) 286 ['failures/expected/exception.html', '--child-processe s', '1'], tests_included=True)
283 287
284 if self.should_test_processes: 288 if self.should_test_processes:
285 self.assertRaises(BaseException, logging_run, 289 self.assertRaises(BaseException, logging_run,
286 ['--child-processes', '2', '--skipped=ignore', 'failures/expecte d/exception.html', 'passes/text.html'], tests_included=True, shared_port=False) 290 ['--child-processes', '2', '--skipped=ignore', 'fa ilures/expected/exception.html', 'passes/text.html'], tests_included=True, share d_port=False)
287 291
288 def test_device_failure(self): 292 def test_device_failure(self):
289 # Test that we handle a device going offline during a test properly. 293 # Test that we handle a device going offline during a test properly.
290 details, regular_output, _ = logging_run(['failures/expected/device_fail ure.html'], tests_included=True) 294 details, regular_output, _ = logging_run(['failures/expected/device_fail ure.html'], tests_included=True)
291 self.assertEqual(details.exit_code, 0) 295 self.assertEqual(details.exit_code, 0)
292 self.assertTrue('worker/0 has failed' in regular_output.getvalue()) 296 self.assertTrue('worker/0 has failed' in regular_output.getvalue())
293 297
294 def test_full_results_html(self): 298 def test_full_results_html(self):
295 host = MockHost() 299 host = MockHost()
296 details, _, _ = logging_run(['--full-results-html'], host=host) 300 details, _, _ = logging_run(['--full-results-html'], host=host)
297 self.assertEqual(details.exit_code, 0) 301 self.assertEqual(details.exit_code, 0)
298 self.assertEqual(len(host.user.opened_urls), 1) 302 self.assertEqual(len(host.user.opened_urls), 1)
299 303
300 def test_keyboard_interrupt(self): 304 def test_keyboard_interrupt(self):
301 # Note that this also tests running a test marked as SKIP if 305 # Note that this also tests running a test marked as SKIP if
302 # you specify it explicitly. 306 # you specify it explicitly.
303 details, _, _ = logging_run(['failures/expected/keyboard.html', '--child -processes', '1'], tests_included=True) 307 details, _, _ = logging_run(['failures/expected/keyboard.html', '--child -processes', '1'], tests_included=True)
304 self.assertEqual(details.exit_code, test_run_results.INTERRUPTED_EXIT_ST ATUS) 308 self.assertEqual(details.exit_code, test_run_results.INTERRUPTED_EXIT_ST ATUS)
305 309
306 if self.should_test_processes: 310 if self.should_test_processes:
307 _, regular_output, _ = logging_run(['failures/expected/keyboard.html ', 'passes/text.html', '--child-processes', '2', '--skipped=ignore'], tests_incl uded=True, shared_port=False) 311 _, regular_output, _ = logging_run(
312 ['failures/expected/keyboard.html', 'passes/text.html', '--child -processes', '2', '--skipped=ignore'], tests_included=True, shared_port=False)
308 self.assertTrue(any(['Interrupted, exiting' in line for line in regu lar_output.buflist])) 313 self.assertTrue(any(['Interrupted, exiting' in line for line in regu lar_output.buflist]))
309 314
310 def test_no_tests_found(self): 315 def test_no_tests_found(self):
311 details, err, _ = logging_run(['resources'], tests_included=True) 316 details, err, _ = logging_run(['resources'], tests_included=True)
312 self.assertEqual(details.exit_code, test_run_results.NO_TESTS_EXIT_STATU S) 317 self.assertEqual(details.exit_code, test_run_results.NO_TESTS_EXIT_STATU S)
313 self.assertContains(err, 'No tests to run.\n') 318 self.assertContains(err, 'No tests to run.\n')
314 319
315 def test_no_tests_found_2(self): 320 def test_no_tests_found_2(self):
316 details, err, _ = logging_run(['foo'], tests_included=True) 321 details, err, _ = logging_run(['foo'], tests_included=True)
317 self.assertEqual(details.exit_code, test_run_results.NO_TESTS_EXIT_STATU S) 322 self.assertEqual(details.exit_code, test_run_results.NO_TESTS_EXIT_STATU S)
318 self.assertContains(err, 'No tests to run.\n') 323 self.assertContains(err, 'No tests to run.\n')
319 324
320 def test_no_tests_found_3(self): 325 def test_no_tests_found_3(self):
321 details, err, _ = logging_run(['--run-chunk', '5:400', 'foo/bar.html'], tests_included=True) 326 details, err, _ = logging_run(['--run-chunk', '5:400', 'foo/bar.html'], tests_included=True)
322 self.assertEqual(details.exit_code, test_run_results.NO_TESTS_EXIT_STATU S) 327 self.assertEqual(details.exit_code, test_run_results.NO_TESTS_EXIT_STATU S)
323 self.assertContains(err, 'No tests to run.\n') 328 self.assertContains(err, 'No tests to run.\n')
324 329
325 def test_natural_order(self): 330 def test_natural_order(self):
326 tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'fai lures/expected/missing_text.html', 'passes/args.html'] 331 tests_to_run = [
332 'passes/audio.html',
333 'failures/expected/text.html',
334 'failures/expected/missing_text.html',
335 'passes/args.html']
327 tests_run = get_tests_run(['--order=natural'] + tests_to_run) 336 tests_run = get_tests_run(['--order=natural'] + tests_to_run)
328 self.assertEqual(['failures/expected/missing_text.html', 'failures/expec ted/text.html', 'passes/args.html', 'passes/audio.html'], tests_run) 337 self.assertEqual(['failures/expected/missing_text.html',
338 'failures/expected/text.html',
339 'passes/args.html',
340 'passes/audio.html'],
341 tests_run)
329 342
330 def test_natural_order_test_specified_multiple_times(self): 343 def test_natural_order_test_specified_multiple_times(self):
331 tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.h tml', 'passes/args.html'] 344 tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.h tml', 'passes/args.html']
332 tests_run = get_tests_run(['--order=natural'] + tests_to_run) 345 tests_run = get_tests_run(['--order=natural'] + tests_to_run)
333 self.assertEqual(['passes/args.html', 'passes/args.html', 'passes/audio. html', 'passes/audio.html'], tests_run) 346 self.assertEqual(['passes/args.html', 'passes/args.html', 'passes/audio. html', 'passes/audio.html'], tests_run)
334 347
335 def test_random_order(self): 348 def test_random_order(self):
336 tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'fai lures/expected/missing_text.html', 'passes/args.html'] 349 tests_to_run = [
350 'passes/audio.html',
351 'failures/expected/text.html',
352 'failures/expected/missing_text.html',
353 'passes/args.html']
337 tests_run = get_tests_run(['--order=random'] + tests_to_run) 354 tests_run = get_tests_run(['--order=random'] + tests_to_run)
338 self.assertEqual(sorted(tests_to_run), sorted(tests_run)) 355 self.assertEqual(sorted(tests_to_run), sorted(tests_run))
339 356
340 def test_random_daily_seed_order(self): 357 def test_random_daily_seed_order(self):
341 tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'fai lures/expected/missing_text.html', 'passes/args.html'] 358 tests_to_run = [
359 'passes/audio.html',
360 'failures/expected/text.html',
361 'failures/expected/missing_text.html',
362 'passes/args.html']
342 tests_run = get_tests_run(['--order=random-seeded'] + tests_to_run) 363 tests_run = get_tests_run(['--order=random-seeded'] + tests_to_run)
343 self.assertEqual(sorted(tests_to_run), sorted(tests_run)) 364 self.assertEqual(sorted(tests_to_run), sorted(tests_run))
344 365
345 def test_random_order_test_specified_multiple_times(self): 366 def test_random_order_test_specified_multiple_times(self):
346 tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.h tml', 'passes/args.html'] 367 tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.h tml', 'passes/args.html']
347 tests_run = get_tests_run(['--order=random'] + tests_to_run) 368 tests_run = get_tests_run(['--order=random'] + tests_to_run)
348 self.assertEqual(tests_run.count('passes/audio.html'), 2) 369 self.assertEqual(tests_run.count('passes/audio.html'), 2)
349 self.assertEqual(tests_run.count('passes/args.html'), 2) 370 self.assertEqual(tests_run.count('passes/args.html'), 2)
350 371
351 def test_no_order(self): 372 def test_no_order(self):
352 tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'fai lures/expected/missing_text.html', 'passes/args.html'] 373 tests_to_run = [
374 'passes/audio.html',
375 'failures/expected/text.html',
376 'failures/expected/missing_text.html',
377 'passes/args.html']
353 tests_run = get_tests_run(['--order=none'] + tests_to_run) 378 tests_run = get_tests_run(['--order=none'] + tests_to_run)
354 self.assertEqual(tests_to_run, tests_run) 379 self.assertEqual(tests_to_run, tests_run)
355 380
356 def test_no_order_test_specified_multiple_times(self): 381 def test_no_order_test_specified_multiple_times(self):
357 tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.h tml', 'passes/args.html'] 382 tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.h tml', 'passes/args.html']
358 tests_run = get_tests_run(['--order=none'] + tests_to_run) 383 tests_run = get_tests_run(['--order=none'] + tests_to_run)
359 self.assertEqual(tests_to_run, tests_run) 384 self.assertEqual(tests_to_run, tests_run)
360 385
361 def test_no_order_with_directory_entries_in_natural_order(self): 386 def test_no_order_with_directory_entries_in_natural_order(self):
362 tests_to_run = ['http/tests/ssl', 'perf/foo', 'http/tests/passes'] 387 tests_to_run = ['http/tests/ssl', 'perf/foo', 'http/tests/passes']
363 tests_run = get_tests_run(['--order=none'] + tests_to_run) 388 tests_run = get_tests_run(['--order=none'] + tests_to_run)
364 self.assertEqual(tests_run, ['http/tests/ssl/text.html', 'perf/foo/test. html', 'http/tests/passes/image.html', 'http/tests/passes/text.html']) 389 self.assertEqual(tests_run,
390 ['http/tests/ssl/text.html',
391 'perf/foo/test.html',
392 'http/tests/passes/image.html',
393 'http/tests/passes/text.html'])
365 394
366 def test_repeat_each(self): 395 def test_repeat_each(self):
367 tests_to_run = ['passes/image.html', 'passes/text.html'] 396 tests_to_run = ['passes/image.html', 'passes/text.html']
368 tests_run = get_tests_run(['--repeat-each', '2'] + tests_to_run) 397 tests_run = get_tests_run(['--repeat-each', '2'] + tests_to_run)
369 self.assertEqual(tests_run, ['passes/image.html', 'passes/image.html', ' passes/text.html', 'passes/text.html']) 398 self.assertEqual(tests_run, ['passes/image.html', 'passes/image.html', ' passes/text.html', 'passes/text.html'])
370 399
371 def test_ignore_flag(self): 400 def test_ignore_flag(self):
372 # Note that passes/image.html is expected to be run since we specified i t directly. 401 # Note that passes/image.html is expected to be run since we specified i t directly.
373 tests_run = get_tests_run(['-i', 'passes', 'passes/image.html']) 402 tests_run = get_tests_run(['-i', 'passes', 'passes/image.html'])
374 self.assertFalse('passes/text.html' in tests_run) 403 self.assertFalse('passes/text.html' in tests_run)
375 self.assertTrue('passes/image.html' in tests_run) 404 self.assertTrue('passes/image.html' in tests_run)
376 405
377 def test_skipped_flag(self): 406 def test_skipped_flag(self):
378 tests_run = get_tests_run(['passes']) 407 tests_run = get_tests_run(['passes'])
379 self.assertFalse('passes/skipped/skip.html' in tests_run) 408 self.assertFalse('passes/skipped/skip.html' in tests_run)
380 num_tests_run_by_default = len(tests_run) 409 num_tests_run_by_default = len(tests_run)
381 410
382 # Check that nothing changes when we specify skipped=default. 411 # Check that nothing changes when we specify skipped=default.
383 self.assertEqual(len(get_tests_run(['--skipped=default', 'passes'])), 412 self.assertEqual(len(get_tests_run(['--skipped=default', 'passes'])),
384 num_tests_run_by_default) 413 num_tests_run_by_default)
385 414
386 # Now check that we run one more test (the skipped one). 415 # Now check that we run one more test (the skipped one).
387 tests_run = get_tests_run(['--skipped=ignore', 'passes']) 416 tests_run = get_tests_run(['--skipped=ignore', 'passes'])
388 self.assertTrue('passes/skipped/skip.html' in tests_run) 417 self.assertTrue('passes/skipped/skip.html' in tests_run)
389 self.assertEqual(len(tests_run), num_tests_run_by_default + 1) 418 self.assertEqual(len(tests_run), num_tests_run_by_default + 1)
390 419
391 # Now check that we only run the skipped test. 420 # Now check that we only run the skipped test.
392 self.assertEqual(get_tests_run(['--skipped=only', 'passes']), ['passes/s kipped/skip.html']) 421 self.assertEqual(get_tests_run(['--skipped=only', 'passes']), ['passes/s kipped/skip.html'])
393 422
394 # Now check that we don't run anything. 423 # Now check that we don't run anything.
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after
459 self.assertEqual(['passes/text.html'], tests_run) 488 self.assertEqual(['passes/text.html'], tests_run)
460 489
461 def test_single_skipped_file(self): 490 def test_single_skipped_file(self):
462 tests_run = get_tests_run(['failures/expected/keybaord.html']) 491 tests_run = get_tests_run(['failures/expected/keybaord.html'])
463 self.assertEqual([], tests_run) 492 self.assertEqual([], tests_run)
464 493
465 def test_stderr_is_saved(self): 494 def test_stderr_is_saved(self):
466 host = MockHost() 495 host = MockHost()
467 self.assertTrue(passing_run(host=host)) 496 self.assertTrue(passing_run(host=host))
468 self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-result s/passes/error-stderr.txt'), 497 self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-result s/passes/error-stderr.txt'),
469 'stuff going to stderr') 498 'stuff going to stderr')
470 499
471 def test_test_list(self): 500 def test_test_list(self):
472 host = MockHost() 501 host = MockHost()
473 filename = '/tmp/foo.txt' 502 filename = '/tmp/foo.txt'
474 host.filesystem.write_text_file(filename, 'passes/text.html') 503 host.filesystem.write_text_file(filename, 'passes/text.html')
475 tests_run = get_tests_run(['--test-list=%s' % filename], host=host) 504 tests_run = get_tests_run(['--test-list=%s' % filename], host=host)
476 self.assertEqual(['passes/text.html'], tests_run) 505 self.assertEqual(['passes/text.html'], tests_run)
477 host.filesystem.remove(filename) 506 host.filesystem.remove(filename)
478 details, err, user = logging_run(['--test-list=%s' % filename], tests_in cluded=True, host=host) 507 details, err, user = logging_run(['--test-list=%s' % filename], tests_in cluded=True, host=host)
479 self.assertEqual(details.exit_code, test_run_results.NO_TESTS_EXIT_STATU S) 508 self.assertEqual(details.exit_code, test_run_results.NO_TESTS_EXIT_STATU S)
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
515 544
516 # Verify that --no-smoke continues to work on a smoke-by-default port. 545 # Verify that --no-smoke continues to work on a smoke-by-default port.
517 tests_run = get_tests_run(['--no-smoke'], host=host, port_obj=port_obj) 546 tests_run = get_tests_run(['--no-smoke'], host=host, port_obj=port_obj)
518 self.assertNotEqual(['passes/text.html'], tests_run) 547 self.assertNotEqual(['passes/text.html'], tests_run)
519 548
520 def test_missing_and_unexpected_results(self): 549 def test_missing_and_unexpected_results(self):
521 # Test that we update expectations in place. If the expectation 550 # Test that we update expectations in place. If the expectation
522 # is missing, update the expected generic location. 551 # is missing, update the expected generic location.
523 host = MockHost() 552 host = MockHost()
524 details, err, _ = logging_run(['--no-show-results', '--retry-failures', 553 details, err, _ = logging_run(['--no-show-results', '--retry-failures',
525 'failures/expected/missing_image.html', 554 'failures/expected/missing_image.html',
526 'failures/unexpected/missing_text.html', 555 'failures/unexpected/missing_text.html',
527 'failures/unexpected/text-image-checksum.html'], 556 'failures/unexpected/text-image-checksum. html'],
528 tests_included=True, host=host) 557 tests_included=True, host=host)
529 file_list = host.filesystem.written_files.keys() 558 file_list = host.filesystem.written_files.keys()
530 self.assertEqual(details.exit_code, 2) 559 self.assertEqual(details.exit_code, 2)
531 json_string = host.filesystem.read_text_file('/tmp/layout-test-results/f ull_results.json') 560 json_string = host.filesystem.read_text_file('/tmp/layout-test-results/f ull_results.json')
532 self.assertTrue(json_string.find('"text-image-checksum.html":{"expected" :"PASS","actual":"IMAGE+TEXT","is_unexpected":true') != -1) 561 self.assertTrue(
533 self.assertTrue(json_string.find('"missing_text.html":{"expected":"PASS" ,"is_missing_text":true,"actual":"MISSING","is_unexpected":true') != -1) 562 json_string.find('"text-image-checksum.html":{"expected":"PASS","act ual":"IMAGE+TEXT","is_unexpected":true') != -
563 1)
564 self.assertTrue(
565 json_string.find('"missing_text.html":{"expected":"PASS","is_missing _text":true,"actual":"MISSING","is_unexpected":true') != -
566 1)
534 self.assertTrue(json_string.find('"num_regressions":2') != -1) 567 self.assertTrue(json_string.find('"num_regressions":2') != -1)
535 self.assertTrue(json_string.find('"num_flaky":0') != -1) 568 self.assertTrue(json_string.find('"num_flaky":0') != -1)
536 569
537 def test_different_failure_on_retry(self): 570 def test_different_failure_on_retry(self):
538 # This tests that if a test fails two different ways -- both unexpected 571 # This tests that if a test fails two different ways -- both unexpected
539 # -- we treat it as a failure rather than a flaky result. We use the 572 # -- we treat it as a failure rather than a flaky result. We use the
540 # initial failure for simplicity and consistency w/ the flakiness 573 # initial failure for simplicity and consistency w/ the flakiness
541 # dashboard, even if the second failure is worse. 574 # dashboard, even if the second failure is worse.
542 575
543 details, err, _ = logging_run(['--retry-failures', 'failures/unexpected/ text_then_crash.html'], tests_included=True) 576 details, err, _ = logging_run(['--retry-failures', 'failures/unexpected/ text_then_crash.html'], tests_included=True)
(...skipping 19 matching lines...) Expand all
563 details, err, _ = logging_run(extra_args=args, host=host, tests_included =True) 596 details, err, _ = logging_run(extra_args=args, host=host, tests_included =True)
564 597
565 self.assertEqual(details.exit_code, 1) 598 self.assertEqual(details.exit_code, 1)
566 expected_token = '"pixeldir":{"image_in_pixeldir.html":{"expected":"PASS ","actual":"IMAGE","is_unexpected":true' 599 expected_token = '"pixeldir":{"image_in_pixeldir.html":{"expected":"PASS ","actual":"IMAGE","is_unexpected":true'
567 json_string = host.filesystem.read_text_file('/tmp/layout-test-results/f ull_results.json') 600 json_string = host.filesystem.read_text_file('/tmp/layout-test-results/f ull_results.json')
568 self.assertTrue(json_string.find(expected_token) != -1) 601 self.assertTrue(json_string.find(expected_token) != -1)
569 602
570 def test_crash_with_stderr(self): 603 def test_crash_with_stderr(self):
571 host = MockHost() 604 host = MockHost()
572 _, regular_output, _ = logging_run(['failures/unexpected/crash-with-stde rr.html'], tests_included=True, host=host) 605 _, regular_output, _ = logging_run(['failures/unexpected/crash-with-stde rr.html'], tests_included=True, host=host)
573 self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results /full_results.json').find('{"crash-with-stderr.html":{"expected":"PASS","actual" :"CRASH","has_stderr":true,"is_unexpected":true') != -1) 606 self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results /full_results.json').find(
607 '{"crash-with-stderr.html":{"expected":"PASS","actual":"CRASH","has_ stderr":true,"is_unexpected":true') != -1)
574 608
575 def test_no_image_failure_with_image_diff(self): 609 def test_no_image_failure_with_image_diff(self):
576 host = MockHost() 610 host = MockHost()
577 _, regular_output, _ = logging_run(['failures/unexpected/checksum-with-m atching-image.html'], tests_included=True, host=host) 611 _, regular_output, _ = logging_run(
578 self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results /full_results.json').find('"num_regressions":0') != -1) 612 ['failures/unexpected/checksum-with-matching-image.html'], tests_inc luded=True, host=host)
613 self.assertTrue(
614 host.filesystem.read_text_file('/tmp/layout-test-results/full_result s.json').find('"num_regressions":0') != -
615 1)
579 616
580 def test_exit_after_n_failures_upload(self): 617 def test_exit_after_n_failures_upload(self):
581 host = MockHost() 618 host = MockHost()
582 details, regular_output, user = logging_run( 619 details, regular_output, user = logging_run(
583 ['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'], 620 ['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'],
584 tests_included=True, host=host) 621 tests_included=True, host=host)
585 622
586 # By returning False, we know that the incremental results were generate d and then deleted. 623 # By returning False, we know that the incremental results were generate d and then deleted.
587 self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/increm ental_results.json')) 624 self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/increm ental_results.json'))
588 625
589 self.assertEqual(details.exit_code, test_run_results.EARLY_EXIT_STATUS) 626 self.assertEqual(details.exit_code, test_run_results.EARLY_EXIT_STATUS)
590 627
591 # This checks that passes/text.html is considered SKIPped. 628 # This checks that passes/text.html is considered SKIPped.
592 self.assertTrue('"skipped":1' in host.filesystem.read_text_file('/tmp/la yout-test-results/full_results.json')) 629 self.assertTrue('"skipped":1' in host.filesystem.read_text_file('/tmp/la yout-test-results/full_results.json'))
593 630
594 # This checks that we told the user we bailed out. 631 # This checks that we told the user we bailed out.
595 self.assertTrue('Exiting early after 1 failures. 1 tests run.\n' in regu lar_output.getvalue()) 632 self.assertTrue('Exiting early after 1 failures. 1 tests run.\n' in regu lar_output.getvalue())
596 633
597 # This checks that neither test ran as expected. 634 # This checks that neither test ran as expected.
598 # FIXME: This log message is confusing; tests that were skipped should b e called out separately. 635 # FIXME: This log message is confusing; tests that were skipped should b e called out separately.
599 self.assertTrue('0 tests ran as expected, 2 didn\'t:\n' in regular_outpu t.getvalue()) 636 self.assertTrue('0 tests ran as expected, 2 didn\'t:\n' in regular_outpu t.getvalue())
600 637
601 def test_exit_after_n_failures(self): 638 def test_exit_after_n_failures(self):
602 # Unexpected failures should result in tests stopping. 639 # Unexpected failures should result in tests stopping.
603 tests_run = get_tests_run(['failures/unexpected/text-image-checksum.html ', 'passes/text.html', '--exit-after-n-failures', '1']) 640 tests_run = get_tests_run(
641 ['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'])
604 self.assertEqual(['failures/unexpected/text-image-checksum.html'], tests _run) 642 self.assertEqual(['failures/unexpected/text-image-checksum.html'], tests _run)
605 643
606 # But we'll keep going for expected ones. 644 # But we'll keep going for expected ones.
607 tests_run = get_tests_run(['failures/expected/text.html', 'passes/text.h tml', '--exit-after-n-failures', '1']) 645 tests_run = get_tests_run(['failures/expected/text.html', 'passes/text.h tml', '--exit-after-n-failures', '1'])
608 self.assertEqual(['failures/expected/text.html', 'passes/text.html'], te sts_run) 646 self.assertEqual(['failures/expected/text.html', 'passes/text.html'], te sts_run)
609 647
610 def test_exit_after_n_crashes(self): 648 def test_exit_after_n_crashes(self):
611 # Unexpected crashes should result in tests stopping. 649 # Unexpected crashes should result in tests stopping.
612 tests_run = get_tests_run(['failures/unexpected/crash.html', 'passes/tex t.html', '--exit-after-n-crashes-or-timeouts', '1']) 650 tests_run = get_tests_run(['failures/unexpected/crash.html', 'passes/tex t.html', '--exit-after-n-crashes-or-timeouts', '1'])
613 self.assertEqual(['failures/unexpected/crash.html'], tests_run) 651 self.assertEqual(['failures/unexpected/crash.html'], tests_run)
614 652
615 # Same with timeouts. 653 # Same with timeouts.
616 tests_run = get_tests_run(['failures/unexpected/timeout.html', 'passes/t ext.html', '--exit-after-n-crashes-or-timeouts', '1']) 654 tests_run = get_tests_run(['failures/unexpected/timeout.html',
655 'passes/text.html',
656 '--exit-after-n-crashes-or-timeouts',
657 '1'])
617 self.assertEqual(['failures/unexpected/timeout.html'], tests_run) 658 self.assertEqual(['failures/unexpected/timeout.html'], tests_run)
618 659
619 # But we'll keep going for expected ones. 660 # But we'll keep going for expected ones.
620 tests_run = get_tests_run(['failures/expected/crash.html', 'passes/text. html', '--exit-after-n-crashes-or-timeouts', '1']) 661 tests_run = get_tests_run(['failures/expected/crash.html', 'passes/text. html', '--exit-after-n-crashes-or-timeouts', '1'])
621 self.assertEqual(['failures/expected/crash.html', 'passes/text.html'], t ests_run) 662 self.assertEqual(['failures/expected/crash.html', 'passes/text.html'], t ests_run)
622 663
623 def test_results_directory_absolute(self): 664 def test_results_directory_absolute(self):
624 # We run a configuration that should fail, to generate output, then 665 # We run a configuration that should fail, to generate output, then
625 # look for what the output results url was. 666 # look for what the output results url was.
626 667
(...skipping 14 matching lines...) Expand all
641 # We run a configuration that should fail, to generate output, then 682 # We run a configuration that should fail, to generate output, then
642 # look for what the output results url was. 683 # look for what the output results url was.
643 host = MockHost() 684 host = MockHost()
644 host.filesystem.maybe_make_directory('/tmp/cwd') 685 host.filesystem.maybe_make_directory('/tmp/cwd')
645 host.filesystem.chdir('/tmp/cwd') 686 host.filesystem.chdir('/tmp/cwd')
646 _, _, user = logging_run(['--results-directory=foo'], tests_included=Tru e, host=host) 687 _, _, user = logging_run(['--results-directory=foo'], tests_included=Tru e, host=host)
647 self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, ' /tmp/cwd/foo/results.html')]) 688 self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, ' /tmp/cwd/foo/results.html')])
648 689
649 def test_retrying_default_value(self): 690 def test_retrying_default_value(self):
650 host = MockHost() 691 host = MockHost()
651 details, err, _ = logging_run(['--debug-rwt-logging', 'failures/unexpect ed/text-image-checksum.html'], tests_included=True, host=host) 692 details, err, _ = logging_run(
693 ['--debug-rwt-logging', 'failures/unexpected/text-image-checksum.htm l'], tests_included=True, host=host)
652 self.assertEqual(details.exit_code, 1) 694 self.assertEqual(details.exit_code, 1)
653 self.assertFalse('Retrying' in err.getvalue()) 695 self.assertFalse('Retrying' in err.getvalue())
654 696
655 host = MockHost() 697 host = MockHost()
656 details, err, _ = logging_run(['--debug-rwt-logging', 'failures/unexpect ed'], tests_included=True, host=host) 698 details, err, _ = logging_run(['--debug-rwt-logging', 'failures/unexpect ed'], tests_included=True, host=host)
657 self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES - 7) # FIX ME: This should be a constant in test.py . 699 self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES - 7) # FIX ME: This should be a constant in test.py .
658 self.assertTrue('Retrying' in err.getvalue()) 700 self.assertTrue('Retrying' in err.getvalue())
659 701
660 def test_retrying_default_value_test_list(self): 702 def test_retrying_default_value_test_list(self):
661 host = MockHost() 703 host = MockHost()
(...skipping 15 matching lines...) Expand all
677 details, err, _ = logging_run(['--debug-rwt-logging', '--retry-failures' , 'failures/flaky'], tests_included=True, host=host) 719 details, err, _ = logging_run(['--debug-rwt-logging', '--retry-failures' , 'failures/flaky'], tests_included=True, host=host)
678 self.assertEqual(details.exit_code, 0) 720 self.assertEqual(details.exit_code, 0)
679 self.assertTrue('Retrying' in err.getvalue()) 721 self.assertTrue('Retrying' in err.getvalue())
680 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failure s/flaky/text-actual.txt')) 722 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failure s/flaky/text-actual.txt'))
681 self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/retrie s/failures/flaky/text-actual.txt')) 723 self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/retrie s/failures/flaky/text-actual.txt'))
682 self.assertEqual(len(host.user.opened_urls), 0) 724 self.assertEqual(len(host.user.opened_urls), 0)
683 725
684 # Now we test that --clobber-old-results does remove the old entries and the old retries, 726 # Now we test that --clobber-old-results does remove the old entries and the old retries,
685 # and that we don't retry again. 727 # and that we don't retry again.
686 host = MockHost() 728 host = MockHost()
687 details, err, _ = logging_run(['--no-retry-failures', '--clobber-old-res ults', 'failures/flaky'], tests_included=True, host=host) 729 details, err, _ = logging_run(
730 ['--no-retry-failures', '--clobber-old-results', 'failures/flaky'], tests_included=True, host=host)
688 self.assertEqual(details.exit_code, 1) 731 self.assertEqual(details.exit_code, 1)
689 self.assertTrue('Clobbering old results' in err.getvalue()) 732 self.assertTrue('Clobbering old results' in err.getvalue())
690 self.assertTrue('flaky/text.html' in err.getvalue()) 733 self.assertTrue('flaky/text.html' in err.getvalue())
691 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failure s/flaky/text-actual.txt')) 734 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failure s/flaky/text-actual.txt'))
692 self.assertFalse(host.filesystem.exists('retries')) 735 self.assertFalse(host.filesystem.exists('retries'))
693 self.assertEqual(len(host.user.opened_urls), 1) 736 self.assertEqual(len(host.user.opened_urls), 1)
694 737
695 def test_retrying_crashed_tests(self): 738 def test_retrying_crashed_tests(self):
696 host = MockHost() 739 host = MockHost()
697 details, err, _ = logging_run(['--retry-failures', 'failures/unexpected/ crash.html'], tests_included=True, host=host) 740 details, err, _ = logging_run(['--retry-failures', 'failures/unexpected/ crash.html'], tests_included=True, host=host)
698 self.assertEqual(details.exit_code, 1) 741 self.assertEqual(details.exit_code, 1)
699 self.assertTrue('Retrying' in err.getvalue()) 742 self.assertTrue('Retrying' in err.getvalue())
700 743
701 def test_retrying_leak_tests(self): 744 def test_retrying_leak_tests(self):
702 host = MockHost() 745 host = MockHost()
703 details, err, _ = logging_run(['--retry-failures', 'failures/unexpected/ leak.html'], tests_included=True, host=host) 746 details, err, _ = logging_run(['--retry-failures', 'failures/unexpected/ leak.html'], tests_included=True, host=host)
704 self.assertEqual(details.exit_code, 1) 747 self.assertEqual(details.exit_code, 1)
705 self.assertTrue('Retrying' in err.getvalue()) 748 self.assertTrue('Retrying' in err.getvalue())
706 749
707 def test_retrying_force_pixel_tests(self): 750 def test_retrying_force_pixel_tests(self):
708 host = MockHost() 751 host = MockHost()
709 details, err, _ = logging_run(['--no-pixel-tests', '--retry-failures', ' failures/unexpected/text-image-checksum.html'], tests_included=True, host=host) 752 details, err, _ = logging_run(
753 ['--no-pixel-tests', '--retry-failures', 'failures/unexpected/text-i mage-checksum.html'], tests_included=True, host=host)
710 self.assertEqual(details.exit_code, 1) 754 self.assertEqual(details.exit_code, 1)
711 self.assertTrue('Retrying' in err.getvalue()) 755 self.assertTrue('Retrying' in err.getvalue())
712 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failure s/unexpected/text-image-checksum-actual.txt')) 756 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failure s/unexpected/text-image-checksum-actual.txt'))
713 self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/failur es/unexpected/text-image-checksum-actual.png')) 757 self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/failur es/unexpected/text-image-checksum-actual.png'))
714 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries /failures/unexpected/text-image-checksum-actual.txt')) 758 self.assertTrue(
715 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries /failures/unexpected/text-image-checksum-actual.png')) 759 host.filesystem.exists('/tmp/layout-test-results/retries/failures/un expected/text-image-checksum-actual.txt'))
760 self.assertTrue(
761 host.filesystem.exists('/tmp/layout-test-results/retries/failures/un expected/text-image-checksum-actual.png'))
716 json_string = host.filesystem.read_text_file('/tmp/layout-test-results/f ull_results.json') 762 json_string = host.filesystem.read_text_file('/tmp/layout-test-results/f ull_results.json')
717 json = parse_full_results(json_string) 763 json = parse_full_results(json_string)
718 self.assertEqual(json["tests"]["failures"]["unexpected"]["text-image-che cksum.html"], 764 self.assertEqual(json['tests']['failures']['unexpected']['text-image-che cksum.html'],
719 {"expected": "PASS", "actual": "TEXT IMAGE+TEXT", "is_unexpected": T rue}) 765 {'expected': 'PASS', 'actual': 'TEXT IMAGE+TEXT', 'is_u nexpected': True})
720 self.assertFalse(json["pixel_tests_enabled"]) 766 self.assertFalse(json['pixel_tests_enabled'])
721 self.assertEqual(details.enabled_pixel_tests_in_retry, True) 767 self.assertEqual(details.enabled_pixel_tests_in_retry, True)
722 768
723 def test_retrying_uses_retries_directory(self): 769 def test_retrying_uses_retries_directory(self):
724 host = MockHost() 770 host = MockHost()
725 details, err, _ = logging_run(['--debug-rwt-logging', '--retry-failures' , 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=hos t) 771 details, err, _ = logging_run(
772 ['--debug-rwt-logging', '--retry-failures', 'failures/unexpected/tex t-image-checksum.html'], tests_included=True, host=host)
726 self.assertEqual(details.exit_code, 1) 773 self.assertEqual(details.exit_code, 1)
727 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failure s/unexpected/text-image-checksum-actual.txt')) 774 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failure s/unexpected/text-image-checksum-actual.txt'))
728 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries /failures/unexpected/text-image-checksum-actual.txt')) 775 self.assertTrue(
776 host.filesystem.exists('/tmp/layout-test-results/retries/failures/un expected/text-image-checksum-actual.txt'))
729 777
730 def test_run_order__inline(self): 778 def test_run_order__inline(self):
731 # These next tests test that we run the tests in ascending alphabetical 779 # These next tests test that we run the tests in ascending alphabetical
732 # order per directory. HTTP tests are sharded separately from other test s, 780 # order per directory. HTTP tests are sharded separately from other test s,
733 # so we have to test both. 781 # so we have to test both.
734 tests_run = get_tests_run(['-i', 'passes/passes', 'passes']) 782 tests_run = get_tests_run(['-i', 'passes/passes', 'passes'])
735 self.assertEqual(tests_run, sorted(tests_run)) 783 self.assertEqual(tests_run, sorted(tests_run))
736 784
737 tests_run = get_tests_run(['http/tests/passes']) 785 tests_run = get_tests_run(['http/tests/passes'])
738 self.assertEqual(tests_run, sorted(tests_run)) 786 self.assertEqual(tests_run, sorted(tests_run))
(...skipping 20 matching lines...) Expand all
759 807
760 def test_reftest_driver_should_run_expected_mismatch_html(self): 808 def test_reftest_driver_should_run_expected_mismatch_html(self):
761 tests_run = get_test_results(['passes/mismatch.html']) 809 tests_run = get_test_results(['passes/mismatch.html'])
762 self.assertEqual(tests_run[0].references, ['passes/mismatch-expected-mis match.html']) 810 self.assertEqual(tests_run[0].references, ['passes/mismatch-expected-mis match.html'])
763 811
764 def test_reftest_should_not_use_naming_convention_if_not_listed_in_reftestli st(self): 812 def test_reftest_should_not_use_naming_convention_if_not_listed_in_reftestli st(self):
765 host = MockHost() 813 host = MockHost()
766 _, err, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_in cluded=True, host=host) 814 _, err, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_in cluded=True, host=host)
767 results = parse_full_results(host.filesystem.read_text_file('/tmp/layout -test-results/full_results.json')) 815 results = parse_full_results(host.filesystem.read_text_file('/tmp/layout -test-results/full_results.json'))
768 816
769 self.assertEqual(results["tests"]["reftests"]["foo"]["unlistedtest.html" ]["actual"], "MISSING"), 817 self.assertEqual(results['tests']['reftests']['foo']['unlistedtest.html' ]['actual'], 'MISSING'),
770 self.assertEqual(results["num_regressions"], 5) 818 self.assertEqual(results['num_regressions'], 5)
771 self.assertEqual(results["num_flaky"], 0) 819 self.assertEqual(results['num_flaky'], 0)
772 820
773 def test_reftest_crash(self): 821 def test_reftest_crash(self):
774 test_results = get_test_results(['failures/unexpected/crash-reftest.html ']) 822 test_results = get_test_results(['failures/unexpected/crash-reftest.html '])
775 # The list of references should be empty since the test crashed and we d idn't run any references. 823 # The list of references should be empty since the test crashed and we d idn't run any references.
776 self.assertEqual(test_results[0].references, []) 824 self.assertEqual(test_results[0].references, [])
777 825
778 def test_reftest_with_virtual_reference(self): 826 def test_reftest_with_virtual_reference(self):
779 _, err, _ = logging_run(['--details', 'virtual/passes/reftest.html'], te sts_included=True) 827 _, err, _ = logging_run(['--details', 'virtual/passes/reftest.html'], te sts_included=True)
780 self.assertTrue('ref: virtual/passes/reftest-expected.html' in err.getva lue()) 828 self.assertTrue('ref: virtual/passes/reftest-expected.html' in err.getva lue())
781 829
(...skipping 27 matching lines...) Expand all
809 # Test to ensure that we don't generate -wdiff.html or -pretty.html if w diff and PrettyPatch 857 # Test to ensure that we don't generate -wdiff.html or -pretty.html if w diff and PrettyPatch
810 # aren't available. 858 # aren't available.
811 host = MockHost() 859 host = MockHost()
812 _, err, _ = logging_run(['--pixel-tests', 'failures/unexpected/text-imag e-checksum.html'], tests_included=True, host=host) 860 _, err, _ = logging_run(['--pixel-tests', 'failures/unexpected/text-imag e-checksum.html'], tests_included=True, host=host)
813 written_files = host.filesystem.written_files 861 written_files = host.filesystem.written_files
814 self.assertTrue(any(path.endswith('-diff.txt') for path in written_files .keys())) 862 self.assertTrue(any(path.endswith('-diff.txt') for path in written_files .keys()))
815 self.assertFalse(any(path.endswith('-wdiff.html') for path in written_fi les.keys())) 863 self.assertFalse(any(path.endswith('-wdiff.html') for path in written_fi les.keys()))
816 self.assertFalse(any(path.endswith('-pretty-diff.html') for path in writ ten_files.keys())) 864 self.assertFalse(any(path.endswith('-pretty-diff.html') for path in writ ten_files.keys()))
817 865
818 full_results_text = host.filesystem.read_text_file('/tmp/layout-test-res ults/full_results.json') 866 full_results_text = host.filesystem.read_text_file('/tmp/layout-test-res ults/full_results.json')
819 full_results = json.loads(full_results_text.replace("ADD_RESULTS(", ""). replace(");", "")) 867 full_results = json.loads(full_results_text.replace('ADD_RESULTS(', ''). replace(');', ''))
820 self.assertEqual(full_results['has_wdiff'], False) 868 self.assertEqual(full_results['has_wdiff'], False)
821 self.assertEqual(full_results['has_pretty_patch'], False) 869 self.assertEqual(full_results['has_pretty_patch'], False)
822 870
823 def test_unsupported_platform(self): 871 def test_unsupported_platform(self):
824 stdout = StringIO.StringIO() 872 stdout = StringIO.StringIO()
825 stderr = StringIO.StringIO() 873 stderr = StringIO.StringIO()
826 res = run_webkit_tests.main(['--platform', 'foo'], stdout, stderr) 874 res = run_webkit_tests.main(['--platform', 'foo'], stdout, stderr)
827 875
828 self.assertEqual(res, test_run_results.UNEXPECTED_ERROR_EXIT_STATUS) 876 self.assertEqual(res, test_run_results.UNEXPECTED_ERROR_EXIT_STATUS)
829 self.assertEqual(stdout.getvalue(), '') 877 self.assertEqual(stdout.getvalue(), '')
830 self.assertTrue('unsupported platform' in stderr.getvalue()) 878 self.assertTrue('unsupported platform' in stderr.getvalue())
831 879
832 def test_build_check(self): 880 def test_build_check(self):
833 # By using a port_name for a different platform than the one we're runni ng on, the build check should always fail. 881 # By using a port_name for a different platform than the one we're runni ng on, the build check should always fail.
834 if sys.platform == 'darwin': 882 if sys.platform == 'darwin':
835 port_name = 'linux-x86' 883 port_name = 'linux-x86'
836 else: 884 else:
837 port_name = 'mac-lion' 885 port_name = 'mac-lion'
838 out = StringIO.StringIO() 886 out = StringIO.StringIO()
839 err = StringIO.StringIO() 887 err = StringIO.StringIO()
840 self.assertEqual(run_webkit_tests.main(['--platform', port_name, 'fast/h arness/results.html'], out, err), test_run_results.UNEXPECTED_ERROR_EXIT_STATUS) 888 self.assertEqual(run_webkit_tests.main(['--platform',
889 port_name,
890 'fast/harness/results.html'],
891 out,
892 err),
893 test_run_results.UNEXPECTED_ERROR_EXIT_STATUS)
841 894
842 def test_verbose_in_child_processes(self): 895 def test_verbose_in_child_processes(self):
843 # When we actually run multiple processes, we may have to reconfigure lo gging in the 896 # When we actually run multiple processes, we may have to reconfigure lo gging in the
844 # child process (e.g., on win32) and we need to make sure that works and we still 897 # child process (e.g., on win32) and we need to make sure that works and we still
845 # see the verbose log output. However, we can't use logging_run() becaus e using 898 # see the verbose log output. However, we can't use logging_run() becaus e using
846 # outputcapture to capture stdout and stderr latter results in a nonpick lable host. 899 # outputcapture to capture stdout and stderr latter results in a nonpick lable host.
847 900
848 # Test is flaky on Windows: https://bugs.webkit.org/show_bug.cgi?id=9855 9 901 # Test is flaky on Windows: https://bugs.webkit.org/show_bug.cgi?id=9855 9
849 if not self.should_test_processes: 902 if not self.should_test_processes:
850 return 903 return
851 904
852 options, parsed_args = parse_args(['--verbose', '--fully-parallel', '--c hild-processes', '2', 'passes/text.html', 'passes/image.html'], tests_included=T rue, print_nothing=False) 905 options, parsed_args = parse_args(['--verbose', '--fully-parallel', '--c hild-processes',
906 '2', 'passes/text.html', 'passes/imag e.html'], tests_included=True, print_nothing=False)
853 host = MockHost() 907 host = MockHost()
854 port_obj = host.port_factory.get(port_name=options.platform, options=opt ions) 908 port_obj = host.port_factory.get(port_name=options.platform, options=opt ions)
855 logging_stream = StringIO.StringIO() 909 logging_stream = StringIO.StringIO()
856 run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logg ing_stream) 910 run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logg ing_stream)
857 self.assertTrue('text.html passed' in logging_stream.getvalue()) 911 self.assertTrue('text.html passed' in logging_stream.getvalue())
858 self.assertTrue('image.html passed' in logging_stream.getvalue()) 912 self.assertTrue('image.html passed' in logging_stream.getvalue())
859 913
860 def disabled_test_driver_logging(self): 914 def disabled_test_driver_logging(self):
861 # FIXME: Figure out how to either use a mock-test port to 915 # FIXME: Figure out how to either use a mock-test port to
862 # get output or mack mock ports work again. 916 # get output or mack mock ports work again.
863 host = Host() 917 host = Host()
864 _, err, _ = logging_run(['--platform', 'mock-win', '--driver-logging', ' fast/harness/results.html'], 918 _, err, _ = logging_run(['--platform', 'mock-win', '--driver-logging', ' fast/harness/results.html'],
865 tests_included=True, host=host) 919 tests_included=True, host=host)
866 self.assertTrue('OUT:' in err.getvalue()) 920 self.assertTrue('OUT:' in err.getvalue())
867 921
868 def test_write_full_results_to(self): 922 def test_write_full_results_to(self):
869 host = MockHost() 923 host = MockHost()
870 details, _, _ = logging_run(['--write-full-results-to', '/tmp/full_resul ts.json'], host=host) 924 details, _, _ = logging_run(['--write-full-results-to', '/tmp/full_resul ts.json'], host=host)
871 self.assertEqual(details.exit_code, 0) 925 self.assertEqual(details.exit_code, 0)
872 self.assertTrue(host.filesystem.exists('/tmp/full_results.json')) 926 self.assertTrue(host.filesystem.exists('/tmp/full_results.json'))
873 927
874 928
875 class EndToEndTest(unittest.TestCase): 929 class EndToEndTest(unittest.TestCase):
930
876 def test_reftest_with_two_notrefs(self): 931 def test_reftest_with_two_notrefs(self):
877 # Test that we update expectations in place. If the expectation 932 # Test that we update expectations in place. If the expectation
878 # is missing, update the expected generic location. 933 # is missing, update the expected generic location.
879 host = MockHost() 934 host = MockHost()
880 _, _, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_incl uded=True, host=host) 935 _, _, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_incl uded=True, host=host)
881 file_list = host.filesystem.written_files.keys() 936 file_list = host.filesystem.written_files.keys()
882 937
883 json_string = host.filesystem.read_text_file('/tmp/layout-test-results/f ailing_results.json') 938 json_string = host.filesystem.read_text_file('/tmp/layout-test-results/f ailing_results.json')
884 json = parse_full_results(json_string) 939 json = parse_full_results(json_string)
885 self.assertTrue("multiple-match-success.html" not in json["tests"]["reft ests"]["foo"]) 940 self.assertTrue('multiple-match-success.html' not in json['tests']['reft ests']['foo'])
886 self.assertTrue("multiple-mismatch-success.html" not in json["tests"]["r eftests"]["foo"]) 941 self.assertTrue('multiple-mismatch-success.html' not in json['tests']['r eftests']['foo'])
887 self.assertTrue("multiple-both-success.html" not in json["tests"]["refte sts"]["foo"]) 942 self.assertTrue('multiple-both-success.html' not in json['tests']['refte sts']['foo'])
888 943
889 self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-match-failur e.html"], 944 self.assertEqual(json['tests']['reftests']['foo']['multiple-match-failur e.html'],
890 {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["=="], "is_ unexpected": True}) 945 {'expected': 'PASS', 'actual': 'IMAGE', 'reftest_type': ['=='], 'is_unexpected': True})
891 self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-mismatch-fai lure.html"], 946 self.assertEqual(json['tests']['reftests']['foo']['multiple-mismatch-fai lure.html'],
892 {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["!="], "is_ unexpected": True}) 947 {'expected': 'PASS', 'actual': 'IMAGE', 'reftest_type': ['!='], 'is_unexpected': True})
893 self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-both-failure .html"], 948 self.assertEqual(json['tests']['reftests']['foo']['multiple-both-failure .html'],
894 {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["==", "!="] , "is_unexpected": True}) 949 {'expected': 'PASS', 'actual': 'IMAGE', 'reftest_type': ['==', '!='], 'is_unexpected': True})
895 950
896 951
897 class RebaselineTest(unittest.TestCase, StreamTestingMixin): 952 class RebaselineTest(unittest.TestCase, StreamTestingMixin):
953
898 def assertBaselines(self, file_list, file, extensions, err): 954 def assertBaselines(self, file_list, file, extensions, err):
899 "assert that the file_list contains the baselines.""" 955 'assert that the file_list contains the baselines.'''
900 for ext in extensions: 956 for ext in extensions:
901 baseline = file + "-expected" + ext 957 baseline = file + '-expected' + ext
902 baseline_msg = 'Writing new expected result "%s"\n' % baseline 958 baseline_msg = 'Writing new expected result "%s"\n' % baseline
903 self.assertTrue(any(f.find(baseline) != -1 for f in file_list)) 959 self.assertTrue(any(f.find(baseline) != -1 for f in file_list))
904 self.assertContains(err, baseline_msg) 960 self.assertContains(err, baseline_msg)
905 961
906 # FIXME: Add tests to ensure that we're *not* writing baselines when we're n ot 962 # FIXME: Add tests to ensure that we're *not* writing baselines when we're n ot
907 # supposed to be. 963 # supposed to be.
908 964
909 def test_reset_results(self): 965 def test_reset_results(self):
910 # Test that we update expectations in place. If the expectation 966 # Test that we update expectations in place. If the expectation
911 # is missing, update the expected generic location. 967 # is missing, update the expected generic location.
912 host = MockHost() 968 host = MockHost()
913 details, err, _ = logging_run( 969 details, err, _ = logging_run(
914 ['--pixel-tests', '--reset-results', 'passes/image.html', 'failures/ expected/missing_image.html'], 970 ['--pixel-tests', '--reset-results', 'passes/image.html', 'failures/ expected/missing_image.html'],
915 tests_included=True, host=host, new_results=True) 971 tests_included=True, host=host, new_results=True)
916 file_list = host.filesystem.written_files.keys() 972 file_list = host.filesystem.written_files.keys()
917 self.assertEqual(details.exit_code, 0) 973 self.assertEqual(details.exit_code, 0)
918 self.assertEqual(len(file_list), 8) 974 self.assertEqual(len(file_list), 8)
919 self.assertBaselines(file_list, "passes/image", [".txt", ".png"], err) 975 self.assertBaselines(file_list, 'passes/image', ['.txt', '.png'], err)
920 self.assertBaselines(file_list, "failures/expected/missing_image", [".tx t", ".png"], err) 976 self.assertBaselines(file_list, 'failures/expected/missing_image', ['.tx t', '.png'], err)
921 977
922 def test_missing_results(self): 978 def test_missing_results(self):
923 # Test that we update expectations in place. If the expectation 979 # Test that we update expectations in place. If the expectation
924 # is missing, update the expected generic location. 980 # is missing, update the expected generic location.
925 host = MockHost() 981 host = MockHost()
926 details, err, _ = logging_run(['--no-show-results', 982 details, err, _ = logging_run(['--no-show-results',
927 'failures/unexpected/missing_text.html', 983 'failures/unexpected/missing_text.html',
928 'failures/unexpected/missing_image.html', 984 'failures/unexpected/missing_image.html',
929 'failures/unexpected/missing_render_tree_dump.html'], 985 'failures/unexpected/missing_render_tree_ dump.html'],
930 tests_included=True, host=host, new_results=True) 986 tests_included=True, host=host, new_result s=True)
931 file_list = host.filesystem.written_files.keys() 987 file_list = host.filesystem.written_files.keys()
932 self.assertEqual(details.exit_code, 3) 988 self.assertEqual(details.exit_code, 3)
933 self.assertEqual(len(file_list), 10) 989 self.assertEqual(len(file_list), 10)
934 self.assertBaselines(file_list, "failures/unexpected/missing_text", [".t xt"], err) 990 self.assertBaselines(file_list, 'failures/unexpected/missing_text', ['.t xt'], err)
935 self.assertBaselines(file_list, "platform/test/failures/unexpected/missi ng_image", [".png"], err) 991 self.assertBaselines(file_list, 'platform/test/failures/unexpected/missi ng_image', ['.png'], err)
936 self.assertBaselines(file_list, "platform/test/failures/unexpected/missi ng_render_tree_dump", [".txt"], err) 992 self.assertBaselines(file_list, 'platform/test/failures/unexpected/missi ng_render_tree_dump', ['.txt'], err)
937 993
938 def test_missing_results_not_added_if_expected_missing(self): 994 def test_missing_results_not_added_if_expected_missing(self):
939 # Test that we update expectations in place. If the expectation 995 # Test that we update expectations in place. If the expectation
940 # is missing, update the expected generic location. 996 # is missing, update the expected generic location.
941 host = MockHost() 997 host = MockHost()
942 options, parsed_args = run_webkit_tests.parse_args([]) 998 options, parsed_args = run_webkit_tests.parse_args([])
943 999
944 port = test.TestPort(host, options=options) 1000 port = test.TestPort(host, options=options)
945 host.filesystem.write_text_file(port.path_to_generic_test_expectations_f ile(), """ 1001 host.filesystem.write_text_file(port.path_to_generic_test_expectations_f ile(), """
946 Bug(foo) failures/unexpected/missing_text.html [ Missing ] 1002 Bug(foo) failures/unexpected/missing_text.html [ Missing ]
947 Bug(foo) failures/unexpected/missing_image.html [ NeedsRebaseline ] 1003 Bug(foo) failures/unexpected/missing_image.html [ NeedsRebaseline ]
948 Bug(foo) failures/unexpected/missing_audio.html [ NeedsManualRebaseline ] 1004 Bug(foo) failures/unexpected/missing_audio.html [ NeedsManualRebaseline ]
949 Bug(foo) failures/unexpected/missing_render_tree_dump.html [ Missing ] 1005 Bug(foo) failures/unexpected/missing_render_tree_dump.html [ Missing ]
950 """) 1006 """)
951 details, err, _ = logging_run(['--no-show-results', 1007 details, err, _ = logging_run(['--no-show-results',
952 'failures/unexpected/missing_text.html', 1008 'failures/unexpected/missing_text.html',
953 'failures/unexpected/missing_image.html', 1009 'failures/unexpected/missing_image.html',
954 'failures/unexpected/missing_audio.html', 1010 'failures/unexpected/missing_audio.html',
955 'failures/unexpected/missing_render_tree_dump.html'], 1011 'failures/unexpected/missing_render_tree_ dump.html'],
956 tests_included=True, host=host, new_results=True, port_obj=port) 1012 tests_included=True, host=host, new_result s=True, port_obj=port)
957 file_list = host.filesystem.written_files.keys() 1013 file_list = host.filesystem.written_files.keys()
958 self.assertEqual(details.exit_code, 0) 1014 self.assertEqual(details.exit_code, 0)
959 self.assertEqual(len(file_list), 7) 1015 self.assertEqual(len(file_list), 7)
960 self.assertFalse(any('failures/unexpected/missing_text-expected' in file for file in file_list)) 1016 self.assertFalse(any('failures/unexpected/missing_text-expected' in file for file in file_list))
961 self.assertFalse(any('failures/unexpected/missing_image-expected' in fil e for file in file_list)) 1017 self.assertFalse(any('failures/unexpected/missing_image-expected' in fil e for file in file_list))
962 self.assertFalse(any('failures/unexpected/missing_render_tree_dump-expec ted' in file for file in file_list)) 1018 self.assertFalse(any('failures/unexpected/missing_render_tree_dump-expec ted' in file for file in file_list))
963 1019
964 def test_missing_results_not_added_if_expected_missing_and_reset_results(sel f): 1020 def test_missing_results_not_added_if_expected_missing_and_reset_results(sel f):
965 # Test that we update expectations in place. If the expectation 1021 # Test that we update expectations in place. If the expectation
966 # is missing, update the expected generic location. 1022 # is missing, update the expected generic location.
967 host = MockHost() 1023 host = MockHost()
968 options, parsed_args = run_webkit_tests.parse_args(['--pixel-tests', '-- reset-results']) 1024 options, parsed_args = run_webkit_tests.parse_args(['--pixel-tests', '-- reset-results'])
969 1025
970 port = test.TestPort(host, options=options) 1026 port = test.TestPort(host, options=options)
971 host.filesystem.write_text_file(port.path_to_generic_test_expectations_f ile(), """ 1027 host.filesystem.write_text_file(port.path_to_generic_test_expectations_f ile(), """
972 Bug(foo) failures/unexpected/missing_text.html [ Missing ] 1028 Bug(foo) failures/unexpected/missing_text.html [ Missing ]
973 Bug(foo) failures/unexpected/missing_image.html [ NeedsRebaseline ] 1029 Bug(foo) failures/unexpected/missing_image.html [ NeedsRebaseline ]
974 Bug(foo) failures/unexpected/missing_audio.html [ NeedsManualRebaseline ] 1030 Bug(foo) failures/unexpected/missing_audio.html [ NeedsManualRebaseline ]
975 Bug(foo) failures/unexpected/missing_render_tree_dump.html [ Missing ] 1031 Bug(foo) failures/unexpected/missing_render_tree_dump.html [ Missing ]
976 """) 1032 """)
977 details, err, _ = logging_run(['--pixel-tests', '--reset-results', 1033 details, err, _ = logging_run(['--pixel-tests', '--reset-results',
978 'failures/unexpected/missing_text.html', 1034 'failures/unexpected/missing_text.html',
979 'failures/unexpected/missing_image.html', 1035 'failures/unexpected/missing_image.html',
980 'failures/unexpected/missing_audio.html', 1036 'failures/unexpected/missing_audio.html',
981 'failures/unexpected/missing_render_tree_dump.html'], 1037 'failures/unexpected/missing_render_tree_ dump.html'],
982 tests_included=True, host=host, new_results=True, port_obj=port) 1038 tests_included=True, host=host, new_result s=True, port_obj=port)
983 file_list = host.filesystem.written_files.keys() 1039 file_list = host.filesystem.written_files.keys()
984 self.assertEqual(details.exit_code, 0) 1040 self.assertEqual(details.exit_code, 0)
985 self.assertEqual(len(file_list), 11) 1041 self.assertEqual(len(file_list), 11)
986 self.assertBaselines(file_list, "failures/unexpected/missing_text", [".t xt"], err) 1042 self.assertBaselines(file_list, 'failures/unexpected/missing_text', ['.t xt'], err)
987 self.assertBaselines(file_list, "failures/unexpected/missing_image", [". png"], err) 1043 self.assertBaselines(file_list, 'failures/unexpected/missing_image', ['. png'], err)
988 self.assertBaselines(file_list, "failures/unexpected/missing_render_tree _dump", [".txt"], err) 1044 self.assertBaselines(file_list, 'failures/unexpected/missing_render_tree _dump', ['.txt'], err)
989 1045
990 def test_new_baseline(self): 1046 def test_new_baseline(self):
991 # Test that we update the platform expectations in the version-specific directories 1047 # Test that we update the platform expectations in the version-specific directories
992 # for both existing and new baselines. 1048 # for both existing and new baselines.
993 host = MockHost() 1049 host = MockHost()
994 details, err, _ = logging_run( 1050 details, err, _ = logging_run(
995 ['--pixel-tests', '--new-baseline', 'passes/image.html', 'failures/e xpected/missing_image.html'], 1051 ['--pixel-tests', '--new-baseline', 'passes/image.html', 'failures/e xpected/missing_image.html'],
996 tests_included=True, host=host, new_results=True) 1052 tests_included=True, host=host, new_results=True)
997 file_list = host.filesystem.written_files.keys() 1053 file_list = host.filesystem.written_files.keys()
998 self.assertEqual(details.exit_code, 0) 1054 self.assertEqual(details.exit_code, 0)
999 self.assertEqual(len(file_list), 8) 1055 self.assertEqual(len(file_list), 8)
1000 self.assertBaselines(file_list, 1056 self.assertBaselines(file_list,
1001 "platform/test-mac-leopard/passes/image", [".txt", ".png"], err) 1057 'platform/test-mac-leopard/passes/image', ['.txt', '.png'], err)
1002 self.assertBaselines(file_list, 1058 self.assertBaselines(file_list,
1003 "platform/test-mac-leopard/failures/expected/missing_image", [".txt" , ".png"], err) 1059 'platform/test-mac-leopard/failures/expected/missin g_image', ['.txt', '.png'], err)
1004 1060
1005 1061
1006 class PortTest(unittest.TestCase): 1062 class PortTest(unittest.TestCase):
1063
1007 def assert_mock_port_works(self, port_name, args=[]): 1064 def assert_mock_port_works(self, port_name, args=[]):
1008 self.assertTrue(passing_run(args + ['--platform', 'mock-' + port_name, ' fast/harness/results.html'], tests_included=True, host=Host())) 1065 self.assertTrue(passing_run(args + ['--platform',
1066 'mock-' + port_name,
1067 'fast/harness/results.html'],
1068 tests_included=True,
1069 host=Host()))
1009 1070
1010 def disabled_test_mac_lion(self): 1071 def disabled_test_mac_lion(self):
1011 self.assert_mock_port_works('mac-lion') 1072 self.assert_mock_port_works('mac-lion')
1012 1073
1013 1074
1014 class MainTest(unittest.TestCase): 1075 class MainTest(unittest.TestCase):
1076
1015 def test_exception_handling(self): 1077 def test_exception_handling(self):
1016 orig_run_fn = run_webkit_tests.run 1078 orig_run_fn = run_webkit_tests.run
1017 1079
1018 # unused args pylint: disable=W0613 1080 # unused args pylint: disable=W0613
1019 def interrupting_run(port, options, args, stderr): 1081 def interrupting_run(port, options, args, stderr):
1020 raise KeyboardInterrupt 1082 raise KeyboardInterrupt
1021 1083
1022 def successful_run(port, options, args, stderr): 1084 def successful_run(port, options, args, stderr):
1023 1085
1024 class FakeRunDetails(object): 1086 class FakeRunDetails(object):
(...skipping 24 matching lines...) Expand all
1049 def test_buildbot_results_are_printed_on_early_exit(self): 1111 def test_buildbot_results_are_printed_on_early_exit(self):
1050 # unused args pylint: disable=W0613 1112 # unused args pylint: disable=W0613
1051 stdout = StringIO.StringIO() 1113 stdout = StringIO.StringIO()
1052 stderr = StringIO.StringIO() 1114 stderr = StringIO.StringIO()
1053 res = run_webkit_tests.main(['--platform', 'test', '--exit-after-n-failu res', '1', 1115 res = run_webkit_tests.main(['--platform', 'test', '--exit-after-n-failu res', '1',
1054 'failures/unexpected/missing_text.html', 1116 'failures/unexpected/missing_text.html',
1055 'failures/unexpected/missing_image.html'], 1117 'failures/unexpected/missing_image.html'],
1056 stdout, stderr) 1118 stdout, stderr)
1057 self.assertEqual(res, test_run_results.EARLY_EXIT_STATUS) 1119 self.assertEqual(res, test_run_results.EARLY_EXIT_STATUS)
1058 self.assertEqual(stdout.getvalue(), 1120 self.assertEqual(stdout.getvalue(),
1059 ('\n' 1121 ('\n'
1060 'Regressions: Unexpected missing results (1)\n' 1122 'Regressions: Unexpected missing results (1)\n'
1061 ' failures/unexpected/missing_image.html [ Missing ]\n\n')) 1123 ' failures/unexpected/missing_image.html [ Missing ]\ n\n'))
OLDNEW
« no previous file with comments | « Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py ('k') | Tools/Scripts/webkitpy/layout_tests/servers/apache_http.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698