| OLD | NEW |
| 1 # Copyright (C) 2012 Google Inc. All rights reserved. | 1 # Copyright (C) 2012 Google Inc. All rights reserved. |
| 2 # | 2 # |
| 3 # Redistribution and use in source and binary forms, with or without | 3 # Redistribution and use in source and binary forms, with or without |
| 4 # modification, are permitted provided that the following conditions are | 4 # modification, are permitted provided that the following conditions are |
| 5 # met: | 5 # met: |
| 6 # | 6 # |
| 7 # * Redistributions of source code must retain the above copyright | 7 # * Redistributions of source code must retain the above copyright |
| 8 # notice, this list of conditions and the following disclaimer. | 8 # notice, this list of conditions and the following disclaimer. |
| 9 # * Redistributions in binary form must reproduce the above | 9 # * Redistributions in binary form must reproduce the above |
| 10 # copyright notice, this list of conditions and the following disclaimer | 10 # copyright notice, this list of conditions and the following disclaimer |
| (...skipping 29 matching lines...) Expand all Loading... |
| 40 from webkitpy.layout_tests.port.test import TestPort | 40 from webkitpy.layout_tests.port.test import TestPort |
| 41 from webkitpy.performance_tests.perftest import ChromiumStylePerfTest | 41 from webkitpy.performance_tests.perftest import ChromiumStylePerfTest |
| 42 from webkitpy.performance_tests.perftest import DEFAULT_TEST_RUNNER_COUNT | 42 from webkitpy.performance_tests.perftest import DEFAULT_TEST_RUNNER_COUNT |
| 43 from webkitpy.performance_tests.perftest import PerfTest | 43 from webkitpy.performance_tests.perftest import PerfTest |
| 44 from webkitpy.performance_tests.perftestsrunner import PerfTestsRunner | 44 from webkitpy.performance_tests.perftestsrunner import PerfTestsRunner |
| 45 | 45 |
| 46 | 46 |
| 47 class MainTest(unittest.TestCase): | 47 class MainTest(unittest.TestCase): |
| 48 | 48 |
| 49 def create_runner(self, args=[]): | 49 def create_runner(self, args=[]): |
| 50 options, parsed_args = PerfTestsRunner._parse_args(args) | 50 options, _ = PerfTestsRunner._parse_args(args) |
| 51 test_port = TestPort(host=MockHost(), options=options) | 51 test_port = TestPort(host=MockHost(), options=options) |
| 52 runner = PerfTestsRunner(args=args, port=test_port) | 52 runner = PerfTestsRunner(args=args, port=test_port) |
| 53 runner._host.filesystem.maybe_make_directory(runner._base_path, 'inspect
or') | 53 runner._host.filesystem.maybe_make_directory(runner._base_path, 'inspect
or') |
| 54 runner._host.filesystem.maybe_make_directory(runner._base_path, 'Binding
s') | 54 runner._host.filesystem.maybe_make_directory(runner._base_path, 'Binding
s') |
| 55 runner._host.filesystem.maybe_make_directory(runner._base_path, 'Parser'
) | 55 runner._host.filesystem.maybe_make_directory(runner._base_path, 'Parser'
) |
| 56 return runner, test_port | 56 return runner, test_port |
| 57 | 57 |
| 58 def _add_file(self, runner, dirname, filename, content=True): | 58 def _add_file(self, runner, dirname, filename, content=True): |
| 59 dirname = runner._host.filesystem.join(runner._base_path, dirname) if di
rname else runner._base_path | 59 dirname = runner._host.filesystem.join(runner._base_path, dirname) if di
rname else runner._base_path |
| 60 runner._host.filesystem.maybe_make_directory(dirname) | 60 runner._host.filesystem.maybe_make_directory(dirname) |
| 61 runner._host.filesystem.files[runner._host.filesystem.join(dirname, file
name)] = content | 61 runner._host.filesystem.files[runner._host.filesystem.join(dirname, file
name)] = content |
| 62 | 62 |
| 63 def test_collect_tests(self): | 63 def test_collect_tests(self): |
| 64 runner, port = self.create_runner() | 64 runner, _ = self.create_runner() |
| 65 self._add_file(runner, 'inspector', 'a_file.html', 'a content') | 65 self._add_file(runner, 'inspector', 'a_file.html', 'a content') |
| 66 tests = runner._collect_tests() | 66 tests = runner._collect_tests() |
| 67 self.assertEqual(len(tests), 1) | 67 self.assertEqual(len(tests), 1) |
| 68 | 68 |
| 69 def _collect_tests_and_sort_test_name(self, runner): | 69 def _collect_tests_and_sort_test_name(self, runner): |
| 70 return sorted([test.test_name() for test in runner._collect_tests()]) | 70 return sorted([test.test_name() for test in runner._collect_tests()]) |
| 71 | 71 |
| 72 def test_collect_tests_with_multile_files(self): | 72 def test_collect_tests_with_multile_files(self): |
| 73 runner, port = self.create_runner(args=['PerformanceTests/test1.html', '
test2.html']) | 73 runner, port = self.create_runner(args=['PerformanceTests/test1.html', '
test2.html']) |
| 74 | 74 |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 111 self._add_file(runner, 'inspector', 'test1.html') | 111 self._add_file(runner, 'inspector', 'test1.html') |
| 112 self._add_file(runner, 'inspector', 'unsupported_test1.html') | 112 self._add_file(runner, 'inspector', 'unsupported_test1.html') |
| 113 self._add_file(runner, 'inspector', 'test2.html') | 113 self._add_file(runner, 'inspector', 'test2.html') |
| 114 self._add_file(runner, 'inspector/resources', 'resource_file.html') | 114 self._add_file(runner, 'inspector/resources', 'resource_file.html') |
| 115 self._add_file(runner, 'unsupported', 'unsupported_test2.html') | 115 self._add_file(runner, 'unsupported', 'unsupported_test2.html') |
| 116 port.skipped_perf_tests = lambda: ['inspector/unsupported_test1.html', '
unsupported'] | 116 port.skipped_perf_tests = lambda: ['inspector/unsupported_test1.html', '
unsupported'] |
| 117 self.assertItemsEqual(self._collect_tests_and_sort_test_name(runner), [ | 117 self.assertItemsEqual(self._collect_tests_and_sort_test_name(runner), [ |
| 118 'inspector/test1.html', 'inspector/test2.html', 'i
nspector/unsupported_test1.html', 'unsupported/unsupported_test2.html']) | 118 'inspector/test1.html', 'inspector/test2.html', 'i
nspector/unsupported_test1.html', 'unsupported/unsupported_test2.html']) |
| 119 | 119 |
| 120 def test_default_args(self): | 120 def test_default_args(self): |
| 121 runner, port = self.create_runner() | 121 options, _ = PerfTestsRunner._parse_args([]) |
| 122 options, args = PerfTestsRunner._parse_args([]) | |
| 123 self.assertTrue(options.build) | 122 self.assertTrue(options.build) |
| 124 self.assertEqual(options.time_out_ms, 600 * 1000) | 123 self.assertEqual(options.time_out_ms, 600 * 1000) |
| 125 self.assertTrue(options.generate_results) | 124 self.assertTrue(options.generate_results) |
| 126 self.assertTrue(options.show_results) | 125 self.assertTrue(options.show_results) |
| 127 self.assertTrue(options.use_skipped_list) | 126 self.assertTrue(options.use_skipped_list) |
| 128 self.assertEqual(options.repeat, 1) | 127 self.assertEqual(options.repeat, 1) |
| 129 self.assertEqual(options.test_runner_count, DEFAULT_TEST_RUNNER_COUNT) | 128 self.assertEqual(options.test_runner_count, DEFAULT_TEST_RUNNER_COUNT) |
| 130 | 129 |
| 131 def test_parse_args(self): | 130 def test_parse_args(self): |
| 132 runner, port = self.create_runner() | 131 options, _ = PerfTestsRunner._parse_args([ |
| 133 options, args = PerfTestsRunner._parse_args([ | |
| 134 '--build-directory=folder42', | 132 '--build-directory=folder42', |
| 135 '--platform=platform42', | 133 '--platform=platform42', |
| 136 '--builder-name', 'webkit-mac-1', | 134 '--builder-name', 'webkit-mac-1', |
| 137 '--build-number=56', | 135 '--build-number=56', |
| 138 '--time-out-ms=42', | 136 '--time-out-ms=42', |
| 139 '--no-show-results', | 137 '--no-show-results', |
| 140 '--reset-results', | 138 '--reset-results', |
| 141 '--output-json-path=a/output.json', | 139 '--output-json-path=a/output.json', |
| 142 '--slave-config-json-path=a/source.json', | 140 '--slave-config-json-path=a/source.json', |
| 143 '--test-results-server=somehost', | 141 '--test-results-server=somehost', |
| (...skipping 234 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 378 class IntegrationTest(unittest.TestCase): | 376 class IntegrationTest(unittest.TestCase): |
| 379 | 377 |
| 380 def _normalize_output(self, log): | 378 def _normalize_output(self, log): |
| 381 return re.sub(r'(stdev=\s+\d+\.\d{5})\d+', r'\1', re.sub(r'Finished: [0-
9\.]+ s', 'Finished: 0.1 s', log)) | 379 return re.sub(r'(stdev=\s+\d+\.\d{5})\d+', r'\1', re.sub(r'Finished: [0-
9\.]+ s', 'Finished: 0.1 s', log)) |
| 382 | 380 |
| 383 def _load_output_json(self, runner): | 381 def _load_output_json(self, runner): |
| 384 json_content = runner._host.filesystem.read_text_file(runner._output_jso
n_path()) | 382 json_content = runner._host.filesystem.read_text_file(runner._output_jso
n_path()) |
| 385 return json.loads(re.sub(r'("stdev":\s*\d+\.\d{5})\d+', r'\1', json_cont
ent)) | 383 return json.loads(re.sub(r'("stdev":\s*\d+\.\d{5})\d+', r'\1', json_cont
ent)) |
| 386 | 384 |
| 387 def create_runner(self, args=[], driver_class=TestDriver): | 385 def create_runner(self, args=[], driver_class=TestDriver): |
| 388 options, parsed_args = PerfTestsRunner._parse_args(args) | 386 options, _ = PerfTestsRunner._parse_args(args) |
| 389 test_port = TestPort(host=MockHost(), options=options) | 387 test_port = TestPort(host=MockHost(), options=options) |
| 390 test_port.create_driver = lambda worker_number=None, no_timeout=False: d
river_class() | 388 test_port.create_driver = lambda worker_number=None, no_timeout=False: d
river_class() |
| 391 | 389 |
| 392 runner = PerfTestsRunner(args=args, port=test_port) | 390 runner = PerfTestsRunner(args=args, port=test_port) |
| 393 runner._host.filesystem.maybe_make_directory(runner._base_path, 'inspect
or') | 391 runner._host.filesystem.maybe_make_directory(runner._base_path, 'inspect
or') |
| 394 runner._host.filesystem.maybe_make_directory(runner._base_path, 'Binding
s') | 392 runner._host.filesystem.maybe_make_directory(runner._base_path, 'Binding
s') |
| 395 runner._host.filesystem.maybe_make_directory(runner._base_path, 'Parser'
) | 393 runner._host.filesystem.maybe_make_directory(runner._base_path, 'Parser'
) |
| 396 | 394 |
| 397 return runner, test_port | 395 return runner, test_port |
| 398 | 396 |
| (...skipping 18 matching lines...) Expand all Loading... |
| 417 self.assertFalse(self.run_test('timeout.html')) | 415 self.assertFalse(self.run_test('timeout.html')) |
| 418 | 416 |
| 419 def test_run_crash_test(self): | 417 def test_run_crash_test(self): |
| 420 self.assertFalse(self.run_test('crash.html')) | 418 self.assertFalse(self.run_test('crash.html')) |
| 421 | 419 |
| 422 def _tests_for_runner(self, runner, test_names): | 420 def _tests_for_runner(self, runner, test_names): |
| 423 filesystem = runner._host.filesystem | 421 filesystem = runner._host.filesystem |
| 424 tests = [] | 422 tests = [] |
| 425 for test in test_names: | 423 for test in test_names: |
| 426 path = filesystem.join(runner._base_path, test) | 424 path = filesystem.join(runner._base_path, test) |
| 427 dirname = filesystem.dirname(path) | |
| 428 if test.startswith('inspector/'): | 425 if test.startswith('inspector/'): |
| 429 tests.append(ChromiumStylePerfTest(runner._port, test, path)) | 426 tests.append(ChromiumStylePerfTest(runner._port, test, path)) |
| 430 else: | 427 else: |
| 431 tests.append(PerfTest(runner._port, test, path)) | 428 tests.append(PerfTest(runner._port, test, path)) |
| 432 return tests | 429 return tests |
| 433 | 430 |
| 434 def test_run_test_set(self): | 431 def test_run_test_set(self): |
| 435 runner, port = self.create_runner() | 432 runner, _ = self.create_runner() |
| 436 tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspecto
r/silent.html', 'inspector/failed.html', | 433 tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspecto
r/silent.html', 'inspector/failed.html', |
| 437 'inspector/tonguey.html', 'inspe
ctor/timeout.html', 'inspector/crash.html']) | 434 'inspector/tonguey.html', 'inspe
ctor/timeout.html', 'inspector/crash.html']) |
| 438 output = OutputCapture() | 435 output = OutputCapture() |
| 439 output.capture_output() | 436 output.capture_output() |
| 440 try: | 437 try: |
| 441 unexpected_result_count = runner._run_tests_set(tests) | 438 unexpected_result_count = runner._run_tests_set(tests) |
| 442 finally: | 439 finally: |
| 443 stdout, stderr, log = output.restore_output() | 440 _, _, log = output.restore_output() |
| 444 self.assertEqual(unexpected_result_count, len(tests) - 1) | 441 self.assertEqual(unexpected_result_count, len(tests) - 1) |
| 445 self.assertTrue('\nRESULT group_name: test_name= 42 ms\n' in log) | 442 self.assertTrue('\nRESULT group_name: test_name= 42 ms\n' in log) |
| 446 | 443 |
| 447 def test_run_test_set_kills_drt_per_run(self): | 444 def test_run_test_set_kills_drt_per_run(self): |
| 448 | 445 |
| 449 class TestDriverWithStopCount(TestDriver): | 446 class TestDriverWithStopCount(TestDriver): |
| 450 stop_count = 0 | 447 stop_count = 0 |
| 451 | 448 |
| 452 def stop(self): | 449 def stop(self): |
| 453 TestDriverWithStopCount.stop_count += 1 | 450 TestDriverWithStopCount.stop_count += 1 |
| 454 | 451 |
| 455 runner, port = self.create_runner(driver_class=TestDriverWithStopCount) | 452 runner, _ = self.create_runner(driver_class=TestDriverWithStopCount) |
| 456 | 453 |
| 457 tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspecto
r/silent.html', 'inspector/failed.html', | 454 tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspecto
r/silent.html', 'inspector/failed.html', |
| 458 'inspector/tonguey.html', 'inspe
ctor/timeout.html', 'inspector/crash.html']) | 455 'inspector/tonguey.html', 'inspe
ctor/timeout.html', 'inspector/crash.html']) |
| 459 unexpected_result_count = runner._run_tests_set(tests) | 456 runner._run_tests_set(tests) |
| 460 | 457 |
| 461 self.assertEqual(TestDriverWithStopCount.stop_count, 6) | 458 self.assertEqual(TestDriverWithStopCount.stop_count, 6) |
| 462 | 459 |
| 463 def test_run_test_set_for_parser_tests(self): | 460 def test_run_test_set_for_parser_tests(self): |
| 464 runner, port = self.create_runner() | 461 runner, _ = self.create_runner() |
| 465 tests = self._tests_for_runner(runner, ['Bindings/event-target-wrapper.h
tml', 'Parser/some-parser.html']) | 462 tests = self._tests_for_runner(runner, ['Bindings/event-target-wrapper.h
tml', 'Parser/some-parser.html']) |
| 466 output = OutputCapture() | 463 output = OutputCapture() |
| 467 output.capture_output() | 464 output.capture_output() |
| 468 try: | 465 try: |
| 469 unexpected_result_count = runner._run_tests_set(tests) | 466 unexpected_result_count = runner._run_tests_set(tests) |
| 470 finally: | 467 finally: |
| 471 stdout, stderr, log = output.restore_output() | 468 _, _, log = output.restore_output() |
| 472 self.assertEqual(unexpected_result_count, 0) | 469 self.assertEqual(unexpected_result_count, 0) |
| 473 self.assertEqual(self._normalize_output(log), EventTargetWrapperTestData
.output + SomeParserTestData.output) | 470 self.assertEqual(self._normalize_output(log), EventTargetWrapperTestData
.output + SomeParserTestData.output) |
| 474 | 471 |
| 475 def test_run_memory_test(self): | 472 def test_run_memory_test(self): |
| 476 runner, port = self.create_runner_and_setup_results_template() | 473 runner, port = self.create_runner_and_setup_results_template() |
| 477 runner._timestamp = 123456789 | 474 runner._timestamp = 123456789 |
| 478 port.host.filesystem.write_text_file(runner._base_path + '/Parser/memory
-test.html', 'some content') | 475 port.host.filesystem.write_text_file(runner._base_path + '/Parser/memory
-test.html', 'some content') |
| 479 | 476 |
| 480 output = OutputCapture() | 477 output = OutputCapture() |
| 481 output.capture_output() | 478 output.capture_output() |
| 482 try: | 479 try: |
| 483 unexpected_result_count = runner.run() | 480 unexpected_result_count = runner.run() |
| 484 finally: | 481 finally: |
| 485 stdout, stderr, log = output.restore_output() | 482 _, _, log = output.restore_output() |
| 486 self.assertEqual(unexpected_result_count, 0) | 483 self.assertEqual(unexpected_result_count, 0) |
| 487 self.assertEqual(self._normalize_output(log), MemoryTestData.output + '\
nMOCK: user.open_url: file://...\n') | 484 self.assertEqual(self._normalize_output(log), MemoryTestData.output + '\
nMOCK: user.open_url: file://...\n') |
| 488 parser_tests = self._load_output_json(runner)[0]['tests']['Parser']['tes
ts'] | 485 parser_tests = self._load_output_json(runner)[0]['tests']['Parser']['tes
ts'] |
| 489 self.assertEqual(parser_tests['memory-test']['metrics']['Time'], MemoryT
estData.results) | 486 self.assertEqual(parser_tests['memory-test']['metrics']['Time'], MemoryT
estData.results) |
| 490 self.assertEqual(parser_tests['memory-test']['metrics']['JSHeap'], Memor
yTestData.js_heap_results) | 487 self.assertEqual(parser_tests['memory-test']['metrics']['JSHeap'], Memor
yTestData.js_heap_results) |
| 491 self.assertEqual(parser_tests['memory-test']['metrics']['Malloc'], Memor
yTestData.malloc_results) | 488 self.assertEqual(parser_tests['memory-test']['metrics']['Malloc'], Memor
yTestData.malloc_results) |
| 492 | 489 |
| 493 def _test_run_with_json_output( | 490 def _test_run_with_json_output( |
| 494 self, runner, filesystem, upload_succeeds=False, results_shown=True,
expected_exit_code=0, repeat=1, compare_logs=True): | 491 self, runner, filesystem, upload_succeeds=False, results_shown=True,
expected_exit_code=0, repeat=1, compare_logs=True): |
| 495 filesystem.write_text_file(runner._base_path + '/inspector/pass.html', '
some content') | 492 filesystem.write_text_file(runner._base_path + '/inspector/pass.html', '
some content') |
| (...skipping 10 matching lines...) Expand all Loading... |
| 506 return upload_succeeds | 503 return upload_succeeds |
| 507 | 504 |
| 508 runner._upload_json = mock_upload_json | 505 runner._upload_json = mock_upload_json |
| 509 runner._timestamp = 123456789 | 506 runner._timestamp = 123456789 |
| 510 runner._utc_timestamp = datetime.datetime(2013, 2, 8, 15, 19, 37, 460000
) | 507 runner._utc_timestamp = datetime.datetime(2013, 2, 8, 15, 19, 37, 460000
) |
| 511 output_capture = OutputCapture() | 508 output_capture = OutputCapture() |
| 512 output_capture.capture_output() | 509 output_capture.capture_output() |
| 513 try: | 510 try: |
| 514 self.assertEqual(runner.run(), expected_exit_code) | 511 self.assertEqual(runner.run(), expected_exit_code) |
| 515 finally: | 512 finally: |
| 516 stdout, stderr, logs = output_capture.restore_output() | 513 _, _, logs = output_capture.restore_output() |
| 517 | 514 |
| 518 if not expected_exit_code and compare_logs: | 515 if not expected_exit_code and compare_logs: |
| 519 expected_logs = '' | 516 expected_logs = '' |
| 520 for i in xrange(repeat): | 517 for i in xrange(repeat): |
| 521 runs = ' (Run %d of %d)' % (i + 1, repeat) if repeat > 1 else '' | 518 runs = ' (Run %d of %d)' % (i + 1, repeat) if repeat > 1 else '' |
| 522 expected_logs += 'Running 2 tests%s\n' % runs + EventTargetWrapp
erTestData.output + InspectorPassTestData.output | 519 expected_logs += 'Running 2 tests%s\n' % runs + EventTargetWrapp
erTestData.output + InspectorPassTestData.output |
| 523 if results_shown: | 520 if results_shown: |
| 524 expected_logs += 'MOCK: user.open_url: file://...\n' | 521 expected_logs += 'MOCK: user.open_url: file://...\n' |
| 525 self.assertEqual(self._normalize_output(logs), expected_logs) | 522 self.assertEqual(self._normalize_output(logs), expected_logs) |
| 526 | 523 |
| (...skipping 245 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 772 self._test_run_with_json_output(runner, port.host.filesystem, compare_lo
gs=False) | 769 self._test_run_with_json_output(runner, port.host.filesystem, compare_lo
gs=False) |
| 773 generated_json = json.loads(port.host.filesystem.files['/mock-checkout/o
utput.json']) | 770 generated_json = json.loads(port.host.filesystem.files['/mock-checkout/o
utput.json']) |
| 774 self.assertTrue(isinstance(generated_json, list)) | 771 self.assertTrue(isinstance(generated_json, list)) |
| 775 self.assertEqual(len(generated_json), 1) | 772 self.assertEqual(len(generated_json), 1) |
| 776 | 773 |
| 777 output = generated_json[0]['tests']['Bindings']['tests']['event-target-w
rapper']['metrics']['Time']['current'] | 774 output = generated_json[0]['tests']['Bindings']['tests']['event-target-w
rapper']['metrics']['Time']['current'] |
| 778 self.assertEqual(len(output), 3) | 775 self.assertEqual(len(output), 3) |
| 779 expectedMetrics = EventTargetWrapperTestData.results['metrics']['Time'][
'current'][0] | 776 expectedMetrics = EventTargetWrapperTestData.results['metrics']['Time'][
'current'][0] |
| 780 for metrics in output: | 777 for metrics in output: |
| 781 self.assertEqual(metrics, expectedMetrics) | 778 self.assertEqual(metrics, expectedMetrics) |
| OLD | NEW |