Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(9)

Side by Side Diff: third_party/WebKit/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py

Issue 1839193004: Run auto-formatter (autopep8) on webkitpy. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Rebased Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 # Copyright (C) 2012 Google Inc. All rights reserved. 1 # Copyright (C) 2012 Google Inc. All rights reserved.
2 # 2 #
3 # Redistribution and use in source and binary forms, with or without 3 # Redistribution and use in source and binary forms, with or without
4 # modification, are permitted provided that the following conditions are 4 # modification, are permitted provided that the following conditions are
5 # met: 5 # met:
6 # 6 #
7 # * Redistributions of source code must retain the above copyright 7 # * Redistributions of source code must retain the above copyright
8 # notice, this list of conditions and the following disclaimer. 8 # notice, this list of conditions and the following disclaimer.
9 # * Redistributions in binary form must reproduce the above 9 # * Redistributions in binary form must reproduce the above
10 # copyright notice, this list of conditions and the following disclaimer 10 # copyright notice, this list of conditions and the following disclaimer
(...skipping 27 matching lines...) Expand all
38 from webkitpy.common.system.outputcapture import OutputCapture 38 from webkitpy.common.system.outputcapture import OutputCapture
39 from webkitpy.layout_tests.port.driver import DriverOutput 39 from webkitpy.layout_tests.port.driver import DriverOutput
40 from webkitpy.layout_tests.port.test import TestPort 40 from webkitpy.layout_tests.port.test import TestPort
41 from webkitpy.performance_tests.perftest import ChromiumStylePerfTest 41 from webkitpy.performance_tests.perftest import ChromiumStylePerfTest
42 from webkitpy.performance_tests.perftest import DEFAULT_TEST_RUNNER_COUNT 42 from webkitpy.performance_tests.perftest import DEFAULT_TEST_RUNNER_COUNT
43 from webkitpy.performance_tests.perftest import PerfTest 43 from webkitpy.performance_tests.perftest import PerfTest
44 from webkitpy.performance_tests.perftestsrunner import PerfTestsRunner 44 from webkitpy.performance_tests.perftestsrunner import PerfTestsRunner
45 45
46 46
47 class MainTest(unittest.TestCase): 47 class MainTest(unittest.TestCase):
48
48 def create_runner(self, args=[]): 49 def create_runner(self, args=[]):
49 options, parsed_args = PerfTestsRunner._parse_args(args) 50 options, parsed_args = PerfTestsRunner._parse_args(args)
50 test_port = TestPort(host=MockHost(), options=options) 51 test_port = TestPort(host=MockHost(), options=options)
51 runner = PerfTestsRunner(args=args, port=test_port) 52 runner = PerfTestsRunner(args=args, port=test_port)
52 runner._host.filesystem.maybe_make_directory(runner._base_path, 'inspect or') 53 runner._host.filesystem.maybe_make_directory(runner._base_path, 'inspect or')
53 runner._host.filesystem.maybe_make_directory(runner._base_path, 'Binding s') 54 runner._host.filesystem.maybe_make_directory(runner._base_path, 'Binding s')
54 runner._host.filesystem.maybe_make_directory(runner._base_path, 'Parser' ) 55 runner._host.filesystem.maybe_make_directory(runner._base_path, 'Parser' )
55 return runner, test_port 56 return runner, test_port
56 57
57 def _add_file(self, runner, dirname, filename, content=True): 58 def _add_file(self, runner, dirname, filename, content=True):
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
95 runner, port = self.create_runner(args=['Suite/Test1.html', 'Suite/Skipp edTest1.html', 'SkippedSuite/Test1.html']) 96 runner, port = self.create_runner(args=['Suite/Test1.html', 'Suite/Skipp edTest1.html', 'SkippedSuite/Test1.html'])
96 97
97 self._add_file(runner, 'SkippedSuite', 'Test1.html') 98 self._add_file(runner, 'SkippedSuite', 'Test1.html')
98 self._add_file(runner, 'SkippedSuite', 'Test2.html') 99 self._add_file(runner, 'SkippedSuite', 'Test2.html')
99 self._add_file(runner, 'Suite', 'Test1.html') 100 self._add_file(runner, 'Suite', 'Test1.html')
100 self._add_file(runner, 'Suite', 'Test2.html') 101 self._add_file(runner, 'Suite', 'Test2.html')
101 self._add_file(runner, 'Suite', 'SkippedTest1.html') 102 self._add_file(runner, 'Suite', 'SkippedTest1.html')
102 self._add_file(runner, 'Suite', 'SkippedTest2.html') 103 self._add_file(runner, 'Suite', 'SkippedTest2.html')
103 port.skipped_perf_tests = lambda: ['Suite/SkippedTest1.html', 'Suite/Ski ppedTest1.html', 'SkippedSuite'] 104 port.skipped_perf_tests = lambda: ['Suite/SkippedTest1.html', 'Suite/Ski ppedTest1.html', 'SkippedSuite']
104 self.assertItemsEqual(self._collect_tests_and_sort_test_name(runner), 105 self.assertItemsEqual(self._collect_tests_and_sort_test_name(runner),
105 ['SkippedSuite/Test1.html', 'Suite/SkippedTest1.html', 'Suite/Test1. html']) 106 ['SkippedSuite/Test1.html', 'Suite/SkippedTest1.ht ml', 'Suite/Test1.html'])
106 107
107 def test_collect_tests_with_ignored_skipped_list(self): 108 def test_collect_tests_with_ignored_skipped_list(self):
108 runner, port = self.create_runner(args=['--force']) 109 runner, port = self.create_runner(args=['--force'])
109 110
110 self._add_file(runner, 'inspector', 'test1.html') 111 self._add_file(runner, 'inspector', 'test1.html')
111 self._add_file(runner, 'inspector', 'unsupported_test1.html') 112 self._add_file(runner, 'inspector', 'unsupported_test1.html')
112 self._add_file(runner, 'inspector', 'test2.html') 113 self._add_file(runner, 'inspector', 'test2.html')
113 self._add_file(runner, 'inspector/resources', 'resource_file.html') 114 self._add_file(runner, 'inspector/resources', 'resource_file.html')
114 self._add_file(runner, 'unsupported', 'unsupported_test2.html') 115 self._add_file(runner, 'unsupported', 'unsupported_test2.html')
115 port.skipped_perf_tests = lambda: ['inspector/unsupported_test1.html', ' unsupported'] 116 port.skipped_perf_tests = lambda: ['inspector/unsupported_test1.html', ' unsupported']
116 self.assertItemsEqual(self._collect_tests_and_sort_test_name(runner), [' inspector/test1.html', 'inspector/test2.html', 'inspector/unsupported_test1.html ', 'unsupported/unsupported_test2.html']) 117 self.assertItemsEqual(self._collect_tests_and_sort_test_name(runner), [
118 'inspector/test1.html', 'inspector/test2.html', 'i nspector/unsupported_test1.html', 'unsupported/unsupported_test2.html'])
117 119
118 def test_default_args(self): 120 def test_default_args(self):
119 runner, port = self.create_runner() 121 runner, port = self.create_runner()
120 options, args = PerfTestsRunner._parse_args([]) 122 options, args = PerfTestsRunner._parse_args([])
121 self.assertTrue(options.build) 123 self.assertTrue(options.build)
122 self.assertEqual(options.time_out_ms, 600 * 1000) 124 self.assertEqual(options.time_out_ms, 600 * 1000)
123 self.assertTrue(options.generate_results) 125 self.assertTrue(options.generate_results)
124 self.assertTrue(options.show_results) 126 self.assertTrue(options.show_results)
125 self.assertTrue(options.use_skipped_list) 127 self.assertTrue(options.use_skipped_list)
126 self.assertEqual(options.repeat, 1) 128 self.assertEqual(options.repeat, 1)
127 self.assertEqual(options.test_runner_count, DEFAULT_TEST_RUNNER_COUNT) 129 self.assertEqual(options.test_runner_count, DEFAULT_TEST_RUNNER_COUNT)
128 130
129 def test_parse_args(self): 131 def test_parse_args(self):
130 runner, port = self.create_runner() 132 runner, port = self.create_runner()
131 options, args = PerfTestsRunner._parse_args([ 133 options, args = PerfTestsRunner._parse_args([
132 '--build-directory=folder42', 134 '--build-directory=folder42',
133 '--platform=platform42', 135 '--platform=platform42',
134 '--builder-name', 'webkit-mac-1', 136 '--builder-name', 'webkit-mac-1',
135 '--build-number=56', 137 '--build-number=56',
136 '--time-out-ms=42', 138 '--time-out-ms=42',
137 '--no-show-results', 139 '--no-show-results',
138 '--reset-results', 140 '--reset-results',
139 '--output-json-path=a/output.json', 141 '--output-json-path=a/output.json',
140 '--slave-config-json-path=a/source.json', 142 '--slave-config-json-path=a/source.json',
141 '--test-results-server=somehost', 143 '--test-results-server=somehost',
142 '--additional-driver-flag=--enable-threaded-parser', 144 '--additional-driver-flag=--enable-threaded-parser',
143 '--additional-driver-flag=--awesomesauce', 145 '--additional-driver-flag=--awesomesauce',
144 '--repeat=5', 146 '--repeat=5',
145 '--test-runner-count=5', 147 '--test-runner-count=5',
146 '--debug']) 148 '--debug'])
147 self.assertTrue(options.build) 149 self.assertTrue(options.build)
148 self.assertEqual(options.build_directory, 'folder42') 150 self.assertEqual(options.build_directory, 'folder42')
149 self.assertEqual(options.platform, 'platform42') 151 self.assertEqual(options.platform, 'platform42')
150 self.assertEqual(options.builder_name, 'webkit-mac-1') 152 self.assertEqual(options.builder_name, 'webkit-mac-1')
151 self.assertEqual(options.build_number, '56') 153 self.assertEqual(options.build_number, '56')
152 self.assertEqual(options.time_out_ms, '42') 154 self.assertEqual(options.time_out_ms, '42')
153 self.assertEqual(options.configuration, 'Debug') 155 self.assertEqual(options.configuration, 'Debug')
154 self.assertFalse(options.show_results) 156 self.assertFalse(options.show_results)
155 self.assertTrue(options.reset_results) 157 self.assertTrue(options.reset_results)
156 self.assertEqual(options.output_json_path, 'a/output.json') 158 self.assertEqual(options.output_json_path, 'a/output.json')
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
206 MockFileUploader.upload_single_text_file_throws = True 208 MockFileUploader.upload_single_text_file_throws = True
207 self.assertFalse(runner._upload_json('some.host', 'some.json', '/some/pa th', MockFileUploader)) 209 self.assertFalse(runner._upload_json('some.host', 'some.json', '/some/pa th', MockFileUploader))
208 self.assertEqual(MockFileUploader.called, ['FileUploader', 'upload_singl e_text_file']) 210 self.assertEqual(MockFileUploader.called, ['FileUploader', 'upload_singl e_text_file'])
209 211
210 MockFileUploader.reset() 212 MockFileUploader.reset()
211 MockFileUploader.upload_single_text_file_return_value = StringIO.StringI O('{"status": "OK"}') 213 MockFileUploader.upload_single_text_file_return_value = StringIO.StringI O('{"status": "OK"}')
212 self.assertTrue(runner._upload_json('some.host', 'some.json', '/some/pat h', MockFileUploader)) 214 self.assertTrue(runner._upload_json('some.host', 'some.json', '/some/pat h', MockFileUploader))
213 self.assertEqual(MockFileUploader.called, ['FileUploader', 'upload_singl e_text_file']) 215 self.assertEqual(MockFileUploader.called, ['FileUploader', 'upload_singl e_text_file'])
214 216
215 MockFileUploader.reset() 217 MockFileUploader.reset()
216 MockFileUploader.upload_single_text_file_return_value = StringIO.StringI O('{"status": "SomethingHasFailed", "failureStored": false}') 218 MockFileUploader.upload_single_text_file_return_value = StringIO.StringI O(
219 '{"status": "SomethingHasFailed", "failureStored": false}')
217 output = OutputCapture() 220 output = OutputCapture()
218 output.capture_output() 221 output.capture_output()
219 self.assertFalse(runner._upload_json('some.host', 'some.json', '/some/pa th', MockFileUploader)) 222 self.assertFalse(runner._upload_json('some.host', 'some.json', '/some/pa th', MockFileUploader))
220 _, _, logs = output.restore_output() 223 _, _, logs = output.restore_output()
221 serialized_json = json.dumps({'status': 'SomethingHasFailed', 'failureSt ored': False}, indent=4) 224 serialized_json = json.dumps({'status': 'SomethingHasFailed', 'failureSt ored': False}, indent=4)
222 self.assertEqual(logs, 'Uploaded JSON to https://some.host/some/path but got an error:\n%s\n' % serialized_json) 225 self.assertEqual(logs, 'Uploaded JSON to https://some.host/some/path but got an error:\n%s\n' % serialized_json)
223 226
224 227
225 class InspectorPassTestData: 228 class InspectorPassTestData:
226 text = 'RESULT group_name: test_name= 42 ms' 229 text = 'RESULT group_name: test_name= 42 ms'
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
265 """ 268 """
266 269
267 output = """Running Bindings/event-target-wrapper.html (1 of 2) 270 output = """Running Bindings/event-target-wrapper.html (1 of 2)
268 RESULT Bindings: event-target-wrapper: Time= 1490.0 ms 271 RESULT Bindings: event-target-wrapper: Time= 1490.0 ms
269 median= 1488.0 ms, stdev= 14.11751 ms, min= 1471.0 ms, max= 1510.0 ms 272 median= 1488.0 ms, stdev= 14.11751 ms, min= 1471.0 ms, max= 1510.0 ms
270 Finished: 0.1 s 273 Finished: 0.1 s
271 274
272 """ 275 """
273 276
274 results = {'url': 'https://src.chromium.org/viewvc/blink/trunk/PerformanceTe sts/Bindings/event-target-wrapper.html', 277 results = {'url': 'https://src.chromium.org/viewvc/blink/trunk/PerformanceTe sts/Bindings/event-target-wrapper.html',
275 'metrics': {'Time': {'current': [[1486.0, 1471.0, 1510.0, 1505.0, 1478.0 , 1490.0]] * 4}}} 278 'metrics': {'Time': {'current': [[1486.0, 1471.0, 1510.0, 1505.0, 1478.0, 1490.0]] * 4}}}
276 279
277 280
278 class SomeParserTestData: 281 class SomeParserTestData:
279 text = """Running 20 times 282 text = """Running 20 times
280 Ignoring warm-up run (1115) 283 Ignoring warm-up run (1115)
281 284
282 Time: 285 Time:
283 values 1080, 1120, 1095, 1101, 1104 ms 286 values 1080, 1120, 1095, 1101, 1104 ms
284 avg 1100 ms 287 avg 1100 ms
285 median 1101 ms 288 median 1101 ms
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
335 median= 529000.0 bytes, stdev= 12961.48139 bytes, min= 511000.0 bytes, max= 5480 00.0 bytes 338 median= 529000.0 bytes, stdev= 12961.48139 bytes, min= 511000.0 bytes, max= 5480 00.0 bytes
336 Finished: 0.1 s 339 Finished: 0.1 s
337 """ 340 """
338 341
339 results = {'current': [[1080, 1120, 1095, 1101, 1104]] * 4} 342 results = {'current': [[1080, 1120, 1095, 1101, 1104]] * 4}
340 js_heap_results = {'current': [[825000, 811000, 848000, 837000, 829000]] * 4 } 343 js_heap_results = {'current': [[825000, 811000, 848000, 837000, 829000]] * 4 }
341 malloc_results = {'current': [[529000, 511000, 548000, 536000, 521000]] * 4} 344 malloc_results = {'current': [[529000, 511000, 548000, 536000, 521000]] * 4}
342 345
343 346
344 class TestDriver: 347 class TestDriver:
348
345 def run_test(self, driver_input, stop_when_done): 349 def run_test(self, driver_input, stop_when_done):
346 text = '' 350 text = ''
347 timeout = False 351 timeout = False
348 crash = False 352 crash = False
349 if driver_input.test_name.endswith('pass.html'): 353 if driver_input.test_name.endswith('pass.html'):
350 text = InspectorPassTestData.text 354 text = InspectorPassTestData.text
351 elif driver_input.test_name.endswith('timeout.html'): 355 elif driver_input.test_name.endswith('timeout.html'):
352 timeout = True 356 timeout = True
353 elif driver_input.test_name.endswith('failed.html'): 357 elif driver_input.test_name.endswith('failed.html'):
354 text = None 358 text = None
(...skipping 10 matching lines...) Expand all
365 return DriverOutput(text, '', '', '', crash=crash, timeout=timeout) 369 return DriverOutput(text, '', '', '', crash=crash, timeout=timeout)
366 370
367 def start(self): 371 def start(self):
368 """do nothing""" 372 """do nothing"""
369 373
370 def stop(self): 374 def stop(self):
371 """do nothing""" 375 """do nothing"""
372 376
373 377
374 class IntegrationTest(unittest.TestCase): 378 class IntegrationTest(unittest.TestCase):
379
375 def _normalize_output(self, log): 380 def _normalize_output(self, log):
376 return re.sub(r'(stdev=\s+\d+\.\d{5})\d+', r'\1', re.sub(r'Finished: [0- 9\.]+ s', 'Finished: 0.1 s', log)) 381 return re.sub(r'(stdev=\s+\d+\.\d{5})\d+', r'\1', re.sub(r'Finished: [0- 9\.]+ s', 'Finished: 0.1 s', log))
377 382
378 def _load_output_json(self, runner): 383 def _load_output_json(self, runner):
379 json_content = runner._host.filesystem.read_text_file(runner._output_jso n_path()) 384 json_content = runner._host.filesystem.read_text_file(runner._output_jso n_path())
380 return json.loads(re.sub(r'("stdev":\s*\d+\.\d{5})\d+', r'\1', json_cont ent)) 385 return json.loads(re.sub(r'("stdev":\s*\d+\.\d{5})\d+', r'\1', json_cont ent))
381 386
382 def create_runner(self, args=[], driver_class=TestDriver): 387 def create_runner(self, args=[], driver_class=TestDriver):
383 options, parsed_args = PerfTestsRunner._parse_args(args) 388 options, parsed_args = PerfTestsRunner._parse_args(args)
384 test_port = TestPort(host=MockHost(), options=options) 389 test_port = TestPort(host=MockHost(), options=options)
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
422 dirname = filesystem.dirname(path) 427 dirname = filesystem.dirname(path)
423 if test.startswith('inspector/'): 428 if test.startswith('inspector/'):
424 tests.append(ChromiumStylePerfTest(runner._port, test, path)) 429 tests.append(ChromiumStylePerfTest(runner._port, test, path))
425 else: 430 else:
426 tests.append(PerfTest(runner._port, test, path)) 431 tests.append(PerfTest(runner._port, test, path))
427 return tests 432 return tests
428 433
429 def test_run_test_set(self): 434 def test_run_test_set(self):
430 runner, port = self.create_runner() 435 runner, port = self.create_runner()
431 tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspecto r/silent.html', 'inspector/failed.html', 436 tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspecto r/silent.html', 'inspector/failed.html',
432 'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash .html']) 437 'inspector/tonguey.html', 'inspe ctor/timeout.html', 'inspector/crash.html'])
433 output = OutputCapture() 438 output = OutputCapture()
434 output.capture_output() 439 output.capture_output()
435 try: 440 try:
436 unexpected_result_count = runner._run_tests_set(tests) 441 unexpected_result_count = runner._run_tests_set(tests)
437 finally: 442 finally:
438 stdout, stderr, log = output.restore_output() 443 stdout, stderr, log = output.restore_output()
439 self.assertEqual(unexpected_result_count, len(tests) - 1) 444 self.assertEqual(unexpected_result_count, len(tests) - 1)
440 self.assertTrue('\nRESULT group_name: test_name= 42 ms\n' in log) 445 self.assertTrue('\nRESULT group_name: test_name= 42 ms\n' in log)
441 446
442 def test_run_test_set_kills_drt_per_run(self): 447 def test_run_test_set_kills_drt_per_run(self):
443 448
444 class TestDriverWithStopCount(TestDriver): 449 class TestDriverWithStopCount(TestDriver):
445 stop_count = 0 450 stop_count = 0
446 451
447 def stop(self): 452 def stop(self):
448 TestDriverWithStopCount.stop_count += 1 453 TestDriverWithStopCount.stop_count += 1
449 454
450 runner, port = self.create_runner(driver_class=TestDriverWithStopCount) 455 runner, port = self.create_runner(driver_class=TestDriverWithStopCount)
451 456
452 tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspecto r/silent.html', 'inspector/failed.html', 457 tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspecto r/silent.html', 'inspector/failed.html',
453 'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash .html']) 458 'inspector/tonguey.html', 'inspe ctor/timeout.html', 'inspector/crash.html'])
454 unexpected_result_count = runner._run_tests_set(tests) 459 unexpected_result_count = runner._run_tests_set(tests)
455 460
456 self.assertEqual(TestDriverWithStopCount.stop_count, 6) 461 self.assertEqual(TestDriverWithStopCount.stop_count, 6)
457 462
458 def test_run_test_set_for_parser_tests(self): 463 def test_run_test_set_for_parser_tests(self):
459 runner, port = self.create_runner() 464 runner, port = self.create_runner()
460 tests = self._tests_for_runner(runner, ['Bindings/event-target-wrapper.h tml', 'Parser/some-parser.html']) 465 tests = self._tests_for_runner(runner, ['Bindings/event-target-wrapper.h tml', 'Parser/some-parser.html'])
461 output = OutputCapture() 466 output = OutputCapture()
462 output.capture_output() 467 output.capture_output()
463 try: 468 try:
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
518 expected_logs += 'MOCK: user.open_url: file://...\n' 523 expected_logs += 'MOCK: user.open_url: file://...\n'
519 self.assertEqual(self._normalize_output(logs), expected_logs) 524 self.assertEqual(self._normalize_output(logs), expected_logs)
520 525
521 self.assertEqual(uploaded[0], upload_succeeds) 526 self.assertEqual(uploaded[0], upload_succeeds)
522 527
523 return logs 528 return logs
524 529
525 _event_target_wrapper_and_inspector_results = { 530 _event_target_wrapper_and_inspector_results = {
526 "Bindings": 531 "Bindings":
527 {"url": "https://src.chromium.org/viewvc/blink/trunk/PerformanceTest s/Bindings", 532 {"url": "https://src.chromium.org/viewvc/blink/trunk/PerformanceTest s/Bindings",
528 "tests": {"event-target-wrapper": EventTargetWrapperTestData.results }}} 533 "tests": {"event-target-wrapper": EventTargetWrapperTestData.result s}}}
529 534
530 def test_run_with_json_output(self): 535 def test_run_with_json_output(self):
531 runner, port = self.create_runner_and_setup_results_template(args=['--ou tput-json-path=/mock-checkout/output.json', 536 runner, port = self.create_runner_and_setup_results_template(args=['--ou tput-json-path=/mock-checkout/output.json',
532 '--test-results-server=some.host']) 537 '--te st-results-server=some.host'])
533 self._test_run_with_json_output(runner, port.host.filesystem, upload_suc ceeds=True) 538 self._test_run_with_json_output(runner, port.host.filesystem, upload_suc ceeds=True)
534 self.assertEqual(self._load_output_json(runner), [{ 539 self.assertEqual(self._load_output_json(runner), [{
535 "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_targ et_wrapper_and_inspector_results, 540 "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_targ et_wrapper_and_inspector_results,
536 "revisions": {"chromium": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}]) 541 "revisions": {"chromium": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
537 542
538 filesystem = port.host.filesystem 543 filesystem = port.host.filesystem
539 self.assertTrue(filesystem.isfile(runner._output_json_path())) 544 self.assertTrue(filesystem.isfile(runner._output_json_path()))
540 self.assertTrue(filesystem.isfile(filesystem.splitext(runner._output_jso n_path())[0] + '.html')) 545 self.assertTrue(filesystem.isfile(filesystem.splitext(runner._output_jso n_path())[0] + '.html'))
541 546
542 def test_run_with_description(self): 547 def test_run_with_description(self):
543 runner, port = self.create_runner_and_setup_results_template(args=['--ou tput-json-path=/mock-checkout/output.json', 548 runner, port = self.create_runner_and_setup_results_template(args=['--ou tput-json-path=/mock-checkout/output.json',
544 '--test-results-server=some.host', '--description', 'some descriptio n']) 549 '--te st-results-server=some.host', '--description', 'some description'])
545 self._test_run_with_json_output(runner, port.host.filesystem, upload_suc ceeds=True) 550 self._test_run_with_json_output(runner, port.host.filesystem, upload_suc ceeds=True)
546 self.assertEqual(self._load_output_json(runner), [{ 551 self.assertEqual(self._load_output_json(runner), [{
547 "buildTime": "2013-02-08T15:19:37.460000", "description": "some desc ription", 552 "buildTime": "2013-02-08T15:19:37.460000", "description": "some desc ription",
548 "tests": self._event_target_wrapper_and_inspector_results, 553 "tests": self._event_target_wrapper_and_inspector_results,
549 "revisions": {"chromium": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}]) 554 "revisions": {"chromium": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
550 555
551 def create_runner_and_setup_results_template(self, args=[]): 556 def create_runner_and_setup_results_template(self, args=[]):
552 runner, port = self.create_runner(args) 557 runner, port = self.create_runner(args)
553 filesystem = port.host.filesystem 558 filesystem = port.host.filesystem
554 filesystem.write_text_file(runner._base_path + '/resources/results-templ ate.html', 559 filesystem.write_text_file(runner._base_path + '/resources/results-templ ate.html',
555 'BEGIN<script src="%AbsolutePathToWebKitTrunk%/some.js"></script>' 560 'BEGIN<script src="%AbsolutePathToWebKitTrunk %/some.js"></script>'
556 '<script src="%AbsolutePathToWebKitTrunk%/other.js"></script><script >%PeformanceTestsResultsJSON%</script>END') 561 '<script src="%AbsolutePathToWebKitTrunk%/oth er.js"></script><script>%PeformanceTestsResultsJSON%</script>END')
557 filesystem.write_text_file(runner._base_path + '/Dromaeo/resources/droma eo/web/lib/jquery-1.6.4.js', 'jquery content') 562 filesystem.write_text_file(runner._base_path + '/Dromaeo/resources/droma eo/web/lib/jquery-1.6.4.js', 'jquery content')
558 return runner, port 563 return runner, port
559 564
560 def test_run_respects_no_results(self): 565 def test_run_respects_no_results(self):
561 runner, port = self.create_runner(args=['--output-json-path=/mock-checko ut/output.json', 566 runner, port = self.create_runner(args=['--output-json-path=/mock-checko ut/output.json',
562 '--test-results-server=some.host', '--no-results']) 567 '--test-results-server=some.host ', '--no-results'])
563 self._test_run_with_json_output(runner, port.host.filesystem, upload_suc ceeds=False, results_shown=False) 568 self._test_run_with_json_output(runner, port.host.filesystem, upload_suc ceeds=False, results_shown=False)
564 self.assertFalse(port.host.filesystem.isfile('/mock-checkout/output.json ')) 569 self.assertFalse(port.host.filesystem.isfile('/mock-checkout/output.json '))
565 570
566 def test_run_generates_json_by_default(self): 571 def test_run_generates_json_by_default(self):
567 runner, port = self.create_runner_and_setup_results_template() 572 runner, port = self.create_runner_and_setup_results_template()
568 filesystem = port.host.filesystem 573 filesystem = port.host.filesystem
569 output_json_path = runner._output_json_path() 574 output_json_path = runner._output_json_path()
570 results_page_path = filesystem.splitext(output_json_path)[0] + '.html' 575 results_page_path = filesystem.splitext(output_json_path)[0] + '.html'
571 576
572 self.assertFalse(filesystem.isfile(output_json_path)) 577 self.assertFalse(filesystem.isfile(output_json_path))
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
618 self._test_run_with_json_output(runner, filesystem, results_shown=False) 623 self._test_run_with_json_output(runner, filesystem, results_shown=False)
619 624
620 expected_entry = {"buildTime": "2013-02-08T15:19:37.460000", 625 expected_entry = {"buildTime": "2013-02-08T15:19:37.460000",
621 "tests": self._event_target_wrapper_and_inspector_resu lts, 626 "tests": self._event_target_wrapper_and_inspector_resu lts,
622 "revisions": {"chromium": {"timestamp": "2013-02-01 08 :48:05 +0000", "revision": "5678"}}} 627 "revisions": {"chromium": {"timestamp": "2013-02-01 08 :48:05 +0000", "revision": "5678"}}}
623 628
624 self.maxDiff = None 629 self.maxDiff = None
625 self.assertEqual(runner._output_json_path(), '/mock-checkout/output.json ') 630 self.assertEqual(runner._output_json_path(), '/mock-checkout/output.json ')
626 self.assertEqual(self._load_output_json(runner), [expected_entry]) 631 self.assertEqual(self._load_output_json(runner), [expected_entry])
627 self.assertEqual(filesystem.read_text_file('/mock-checkout/output.html') , 632 self.assertEqual(filesystem.read_text_file('/mock-checkout/output.html') ,
628 'BEGIN<script src="/test.checkout/some.js"></script><script src="/te st.checkout/other.js"></script>' 633 'BEGIN<script src="/test.checkout/some.js"></script><sc ript src="/test.checkout/other.js"></script>'
629 '<script>%s</script>END' % port.host.filesystem.read_text_file(runne r._output_json_path())) 634 '<script>%s</script>END' % port.host.filesystem.read_te xt_file(runner._output_json_path()))
630 self.assertEqual(page_shown[0], '/mock-checkout/output.html') 635 self.assertEqual(page_shown[0], '/mock-checkout/output.html')
631 636
632 self._test_run_with_json_output(runner, filesystem, results_shown=False) 637 self._test_run_with_json_output(runner, filesystem, results_shown=False)
633 self.assertEqual(runner._output_json_path(), '/mock-checkout/output.json ') 638 self.assertEqual(runner._output_json_path(), '/mock-checkout/output.json ')
634 self.assertEqual(self._load_output_json(runner), [expected_entry, expect ed_entry]) 639 self.assertEqual(self._load_output_json(runner), [expected_entry, expect ed_entry])
635 self.assertEqual(filesystem.read_text_file('/mock-checkout/output.html') , 640 self.assertEqual(filesystem.read_text_file('/mock-checkout/output.html') ,
636 'BEGIN<script src="/test.checkout/some.js"></script><script src="/te st.checkout/other.js"></script>' 641 'BEGIN<script src="/test.checkout/some.js"></script><sc ript src="/test.checkout/other.js"></script>'
637 '<script>%s</script>END' % port.host.filesystem.read_text_file(runne r._output_json_path())) 642 '<script>%s</script>END' % port.host.filesystem.read_te xt_file(runner._output_json_path()))
638 643
639 def test_run_respects_no_show_results(self): 644 def test_run_respects_no_show_results(self):
640 show_results_html_file = lambda path: page_shown.append(path) 645 show_results_html_file = lambda path: page_shown.append(path)
641 646
642 runner, port = self.create_runner_and_setup_results_template(args=['--ou tput-json-path=/mock-checkout/output.json']) 647 runner, port = self.create_runner_and_setup_results_template(args=['--ou tput-json-path=/mock-checkout/output.json'])
643 page_shown = [] 648 page_shown = []
644 port.show_results_html_file = show_results_html_file 649 port.show_results_html_file = show_results_html_file
645 self._test_run_with_json_output(runner, port.host.filesystem, results_sh own=False) 650 self._test_run_with_json_output(runner, port.host.filesystem, results_sh own=False)
646 self.assertEqual(page_shown[0], '/mock-checkout/output.html') 651 self.assertEqual(page_shown[0], '/mock-checkout/output.html')
647 652
648 runner, port = self.create_runner_and_setup_results_template(args=['--ou tput-json-path=/mock-checkout/output.json', 653 runner, port = self.create_runner_and_setup_results_template(args=['--ou tput-json-path=/mock-checkout/output.json',
649 '--no-show-results']) 654 '--no -show-results'])
650 page_shown = [] 655 page_shown = []
651 port.show_results_html_file = show_results_html_file 656 port.show_results_html_file = show_results_html_file
652 self._test_run_with_json_output(runner, port.host.filesystem, results_sh own=False) 657 self._test_run_with_json_output(runner, port.host.filesystem, results_sh own=False)
653 self.assertEqual(page_shown, []) 658 self.assertEqual(page_shown, [])
654 659
655 def test_run_with_bad_output_json(self): 660 def test_run_with_bad_output_json(self):
656 runner, port = self.create_runner_and_setup_results_template(args=['--ou tput-json-path=/mock-checkout/output.json']) 661 runner, port = self.create_runner_and_setup_results_template(args=['--ou tput-json-path=/mock-checkout/output.json'])
657 port.host.filesystem.write_text_file('/mock-checkout/output.json', 'bad json') 662 port.host.filesystem.write_text_file('/mock-checkout/output.json', 'bad json')
658 self._test_run_with_json_output(runner, port.host.filesystem, expected_e xit_code=PerfTestsRunner.EXIT_CODE_BAD_MERGE) 663 self._test_run_with_json_output(runner, port.host.filesystem, expected_e xit_code=PerfTestsRunner.EXIT_CODE_BAD_MERGE)
659 port.host.filesystem.write_text_file('/mock-checkout/output.json', '{"an other bad json": "1"}') 664 port.host.filesystem.write_text_file('/mock-checkout/output.json', '{"an other bad json": "1"}')
660 self._test_run_with_json_output(runner, port.host.filesystem, expected_e xit_code=PerfTestsRunner.EXIT_CODE_BAD_MERGE) 665 self._test_run_with_json_output(runner, port.host.filesystem, expected_e xit_code=PerfTestsRunner.EXIT_CODE_BAD_MERGE)
661 666
662 def test_run_with_slave_config_json(self): 667 def test_run_with_slave_config_json(self):
663 runner, port = self.create_runner_and_setup_results_template(args=['--ou tput-json-path=/mock-checkout/output.json', 668 runner, port = self.create_runner_and_setup_results_template(args=['--ou tput-json-path=/mock-checkout/output.json',
664 '--slave-config-json-path=/mock-checkout/slave-config.json', '--test -results-server=some.host']) 669 '--sl ave-config-json-path=/mock-checkout/slave-config.json', '--test-results-server=s ome.host'])
665 port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '{"key": "value"}') 670 port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '{"key": "value"}')
666 self._test_run_with_json_output(runner, port.host.filesystem, upload_suc ceeds=True) 671 self._test_run_with_json_output(runner, port.host.filesystem, upload_suc ceeds=True)
667 self.assertEqual(self._load_output_json(runner), [{ 672 self.assertEqual(self._load_output_json(runner), [{
668 "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_targ et_wrapper_and_inspector_results, 673 "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_targ et_wrapper_and_inspector_results,
669 "revisions": {"chromium": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}, "builderKey": "value"}]) 674 "revisions": {"chromium": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}, "builderKey": "value"}])
670 675
671 def test_run_with_bad_slave_config_json(self): 676 def test_run_with_bad_slave_config_json(self):
672 runner, port = self.create_runner_and_setup_results_template(args=['--ou tput-json-path=/mock-checkout/output.json', 677 runner, port = self.create_runner_and_setup_results_template(args=['--ou tput-json-path=/mock-checkout/output.json',
673 '--slave-config-json-path=/mock-checkout/slave-config.json', '--test -results-server=some.host']) 678 '--sl ave-config-json-path=/mock-checkout/slave-config.json', '--test-results-server=s ome.host'])
674 logs = self._test_run_with_json_output(runner, port.host.filesystem, exp ected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON) 679 logs = self._test_run_with_json_output(runner, port.host.filesystem,
680 expected_exit_code=PerfTestsRunne r.EXIT_CODE_BAD_SOURCE_JSON)
675 self.assertTrue('Missing slave configuration JSON file: /mock-checkout/s lave-config.json' in logs) 681 self.assertTrue('Missing slave configuration JSON file: /mock-checkout/s lave-config.json' in logs)
676 port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', 'bad json') 682 port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', 'bad json')
677 self._test_run_with_json_output(runner, port.host.filesystem, expected_e xit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON) 683 self._test_run_with_json_output(runner, port.host.filesystem, expected_e xit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
678 port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '["another bad json"]') 684 port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '["another bad json"]')
679 self._test_run_with_json_output(runner, port.host.filesystem, expected_e xit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON) 685 self._test_run_with_json_output(runner, port.host.filesystem, expected_e xit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
680 686
681 def test_run_with_multiple_repositories(self): 687 def test_run_with_multiple_repositories(self):
682 runner, port = self.create_runner_and_setup_results_template(args=['--ou tput-json-path=/mock-checkout/output.json', 688 runner, port = self.create_runner_and_setup_results_template(args=['--ou tput-json-path=/mock-checkout/output.json',
683 '--test-results-server=some.host']) 689 '--te st-results-server=some.host'])
684 port.repository_path = lambda: '/mock-checkout' 690 port.repository_path = lambda: '/mock-checkout'
685 self._test_run_with_json_output(runner, port.host.filesystem, upload_suc ceeds=True) 691 self._test_run_with_json_output(runner, port.host.filesystem, upload_suc ceeds=True)
686 self.assertEqual(self._load_output_json(runner), [{ 692 self.assertEqual(self._load_output_json(runner), [{
687 "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_targ et_wrapper_and_inspector_results, 693 "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_targ et_wrapper_and_inspector_results,
688 "revisions": {"chromium": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}]) 694 "revisions": {"chromium": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
689 695
690 def test_run_with_upload_json(self): 696 def test_run_with_upload_json(self):
691 runner, port = self.create_runner_and_setup_results_template(args=['--ou tput-json-path=/mock-checkout/output.json', 697 runner, port = self.create_runner_and_setup_results_template(args=['--ou tput-json-path=/mock-checkout/output.json',
692 '--test-results-server', 'some.host', '--platform', 'platform1', '-- builder-name', 'builder1', '--build-number', '123']) 698 '--te st-results-server', 'some.host', '--platform', 'platform1', '--builder-name', 'b uilder1', '--build-number', '123'])
693 699
694 self._test_run_with_json_output(runner, port.host.filesystem, upload_suc ceeds=True) 700 self._test_run_with_json_output(runner, port.host.filesystem, upload_suc ceeds=True)
695 generated_json = json.loads(port.host.filesystem.files['/mock-checkout/o utput.json']) 701 generated_json = json.loads(port.host.filesystem.files['/mock-checkout/o utput.json'])
696 self.assertEqual(generated_json[0]['platform'], 'platform1') 702 self.assertEqual(generated_json[0]['platform'], 'platform1')
697 self.assertEqual(generated_json[0]['builderName'], 'builder1') 703 self.assertEqual(generated_json[0]['builderName'], 'builder1')
698 self.assertEqual(generated_json[0]['buildNumber'], 123) 704 self.assertEqual(generated_json[0]['buildNumber'], 123)
699 705
700 self._test_run_with_json_output(runner, port.host.filesystem, upload_suc ceeds=False, expected_exit_code=PerfTestsRunner.EXIT_CODE_FAILED_UPLOADING) 706 self._test_run_with_json_output(runner, port.host.filesystem, upload_suc ceeds=False,
707 expected_exit_code=PerfTestsRunner.EXIT_ CODE_FAILED_UPLOADING)
701 708
702 def test_run_with_upload_json_should_generate_perf_webkit_json(self): 709 def test_run_with_upload_json_should_generate_perf_webkit_json(self):
703 runner, port = self.create_runner_and_setup_results_template(args=['--ou tput-json-path=/mock-checkout/output.json', 710 runner, port = self.create_runner_and_setup_results_template(args=['--ou tput-json-path=/mock-checkout/output.json',
704 '--test-results-server', 'some.host', '--platform', 'platform1', '-- builder-name', 'builder1', '--build-number', '123', 711 '--te st-results-server', 'some.host', '--platform', 'platform1', '--builder-name', 'b uilder1', '--build-number', '123',
705 '--slave-config-json-path=/mock-checkout/slave-config.json']) 712 '--sl ave-config-json-path=/mock-checkout/slave-config.json'])
706 port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '{"key": "value1"}') 713 port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '{"key": "value1"}')
707 714
708 self._test_run_with_json_output(runner, port.host.filesystem, upload_suc ceeds=True) 715 self._test_run_with_json_output(runner, port.host.filesystem, upload_suc ceeds=True)
709 generated_json = json.loads(port.host.filesystem.files['/mock-checkout/o utput.json']) 716 generated_json = json.loads(port.host.filesystem.files['/mock-checkout/o utput.json'])
710 self.assertTrue(isinstance(generated_json, list)) 717 self.assertTrue(isinstance(generated_json, list))
711 self.assertEqual(len(generated_json), 1) 718 self.assertEqual(len(generated_json), 1)
712 719
713 output = generated_json[0] 720 output = generated_json[0]
714 self.maxDiff = None 721 self.maxDiff = None
715 self.assertEqual(output['platform'], 'platform1') 722 self.assertEqual(output['platform'], 'platform1')
716 self.assertEqual(output['buildNumber'], 123) 723 self.assertEqual(output['buildNumber'], 123)
717 self.assertEqual(output['buildTime'], '2013-02-08T15:19:37.460000') 724 self.assertEqual(output['buildTime'], '2013-02-08T15:19:37.460000')
718 self.assertEqual(output['builderName'], 'builder1') 725 self.assertEqual(output['builderName'], 'builder1')
719 self.assertEqual(output['builderKey'], 'value1') 726 self.assertEqual(output['builderKey'], 'value1')
720 self.assertEqual(output['revisions'], {'chromium': {'revision': '5678', 'timestamp': '2013-02-01 08:48:05 +0000'}}) 727 self.assertEqual(output['revisions'], {'chromium': {'revision': '5678', 'timestamp': '2013-02-01 08:48:05 +0000'}})
721 self.assertEqual(output['tests'].keys(), ['Bindings']) 728 self.assertEqual(output['tests'].keys(), ['Bindings'])
722 self.assertEqual(sorted(output['tests']['Bindings'].keys()), ['tests', ' url']) 729 self.assertEqual(sorted(output['tests']['Bindings'].keys()), ['tests', ' url'])
723 self.assertEqual(output['tests']['Bindings']['url'], 'https://src.chromi um.org/viewvc/blink/trunk/PerformanceTests/Bindings') 730 self.assertEqual(output['tests']['Bindings']['url'],
731 'https://src.chromium.org/viewvc/blink/trunk/Performanc eTests/Bindings')
724 self.assertEqual(output['tests']['Bindings']['tests'].keys(), ['event-ta rget-wrapper']) 732 self.assertEqual(output['tests']['Bindings']['tests'].keys(), ['event-ta rget-wrapper'])
725 self.assertEqual(output['tests']['Bindings']['tests']['event-target-wrap per'], { 733 self.assertEqual(output['tests']['Bindings']['tests']['event-target-wrap per'], {
726 'url': 'https://src.chromium.org/viewvc/blink/trunk/PerformanceTests /Bindings/event-target-wrapper.html', 734 'url': 'https://src.chromium.org/viewvc/blink/trunk/PerformanceTests /Bindings/event-target-wrapper.html',
727 'metrics': {'Time': {'current': [[1486.0, 1471.0, 1510.0, 1505.0, 14 78.0, 1490.0]] * 4}}}) 735 'metrics': {'Time': {'current': [[1486.0, 1471.0, 1510.0, 1505.0, 14 78.0, 1490.0]] * 4}}})
728 736
729 def test_run_with_repeat(self): 737 def test_run_with_repeat(self):
730 self.maxDiff = None 738 self.maxDiff = None
731 runner, port = self.create_runner_and_setup_results_template(args=['--ou tput-json-path=/mock-checkout/output.json', 739 runner, port = self.create_runner_and_setup_results_template(args=['--ou tput-json-path=/mock-checkout/output.json',
732 '--test-results-server=some.host', '--repeat', '5']) 740 '--te st-results-server=some.host', '--repeat', '5'])
733 self._test_run_with_json_output(runner, port.host.filesystem, upload_suc ceeds=True, repeat=5) 741 self._test_run_with_json_output(runner, port.host.filesystem, upload_suc ceeds=True, repeat=5)
734 self.assertEqual(self._load_output_json(runner), [ 742 self.assertEqual(self._load_output_json(runner), [
735 {"buildTime": "2013-02-08T15:19:37.460000", 743 {"buildTime": "2013-02-08T15:19:37.460000",
736 "tests": self._event_target_wrapper_and_inspector_results, 744 "tests": self._event_target_wrapper_and_inspector_results,
737 "revisions": {"chromium": {"timestamp": "2013-02-01 08:48:05 +0000" , "revision": "5678"}}}, 745 "revisions": {"chromium": {"timestamp": "2013-02-01 08:48:05 +0000" , "revision": "5678"}}},
738 {"buildTime": "2013-02-08T15:19:37.460000", 746 {"buildTime": "2013-02-08T15:19:37.460000",
739 "tests": self._event_target_wrapper_and_inspector_results, 747 "tests": self._event_target_wrapper_and_inspector_results,
740 "revisions": {"chromium": {"timestamp": "2013-02-01 08:48:05 +0000" , "revision": "5678"}}}, 748 "revisions": {"chromium": {"timestamp": "2013-02-01 08:48:05 +0000" , "revision": "5678"}}},
741 {"buildTime": "2013-02-08T15:19:37.460000", 749 {"buildTime": "2013-02-08T15:19:37.460000",
742 "tests": self._event_target_wrapper_and_inspector_results, 750 "tests": self._event_target_wrapper_and_inspector_results,
743 "revisions": {"chromium": {"timestamp": "2013-02-01 08:48:05 +0000" , "revision": "5678"}}}, 751 "revisions": {"chromium": {"timestamp": "2013-02-01 08:48:05 +0000" , "revision": "5678"}}},
744 {"buildTime": "2013-02-08T15:19:37.460000", 752 {"buildTime": "2013-02-08T15:19:37.460000",
745 "tests": self._event_target_wrapper_and_inspector_results, 753 "tests": self._event_target_wrapper_and_inspector_results,
746 "revisions": {"chromium": {"timestamp": "2013-02-01 08:48:05 +0000" , "revision": "5678"}}}, 754 "revisions": {"chromium": {"timestamp": "2013-02-01 08:48:05 +0000" , "revision": "5678"}}},
747 {"buildTime": "2013-02-08T15:19:37.460000", 755 {"buildTime": "2013-02-08T15:19:37.460000",
748 "tests": self._event_target_wrapper_and_inspector_results, 756 "tests": self._event_target_wrapper_and_inspector_results,
749 "revisions": {"chromium": {"timestamp": "2013-02-01 08:48:05 +0000" , "revision": "5678"}}}]) 757 "revisions": {"chromium": {"timestamp": "2013-02-01 08:48:05 +0000" , "revision": "5678"}}}])
750 758
751 def test_run_with_test_runner_count(self): 759 def test_run_with_test_runner_count(self):
752 runner, port = self.create_runner_and_setup_results_template(args=['--ou tput-json-path=/mock-checkout/output.json', 760 runner, port = self.create_runner_and_setup_results_template(args=['--ou tput-json-path=/mock-checkout/output.json',
753 '--test-runner-count=3']) 761 '--te st-runner-count=3'])
754 self._test_run_with_json_output(runner, port.host.filesystem, compare_lo gs=False) 762 self._test_run_with_json_output(runner, port.host.filesystem, compare_lo gs=False)
755 generated_json = json.loads(port.host.filesystem.files['/mock-checkout/o utput.json']) 763 generated_json = json.loads(port.host.filesystem.files['/mock-checkout/o utput.json'])
756 self.assertTrue(isinstance(generated_json, list)) 764 self.assertTrue(isinstance(generated_json, list))
757 self.assertEqual(len(generated_json), 1) 765 self.assertEqual(len(generated_json), 1)
758 766
759 output = generated_json[0]['tests']['Bindings']['tests']['event-target-w rapper']['metrics']['Time']['current'] 767 output = generated_json[0]['tests']['Bindings']['tests']['event-target-w rapper']['metrics']['Time']['current']
760 self.assertEqual(len(output), 3) 768 self.assertEqual(len(output), 3)
761 expectedMetrics = EventTargetWrapperTestData.results['metrics']['Time'][ 'current'][0] 769 expectedMetrics = EventTargetWrapperTestData.results['metrics']['Time'][ 'current'][0]
762 for metrics in output: 770 for metrics in output:
763 self.assertEqual(metrics, expectedMetrics) 771 self.assertEqual(metrics, expectedMetrics)
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698