OLD | NEW |
| (Empty) |
1 # Copyright (C) 2012 Google Inc. All rights reserved. | |
2 # | |
3 # Redistribution and use in source and binary forms, with or without | |
4 # modification, are permitted provided that the following conditions are | |
5 # met: | |
6 # | |
7 # * Redistributions of source code must retain the above copyright | |
8 # notice, this list of conditions and the following disclaimer. | |
9 # * Redistributions in binary form must reproduce the above | |
10 # copyright notice, this list of conditions and the following disclaimer | |
11 # in the documentation and/or other materials provided with the | |
12 # distribution. | |
13 # * Neither the name of Google Inc. nor the names of its | |
14 # contributors may be used to endorse or promote products derived from | |
15 # this software without specific prior written permission. | |
16 # | |
17 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
18 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
19 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
20 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
21 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
22 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
23 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
24 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
25 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
26 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
27 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
28 | |
29 """Integration tests for run_perf_tests.""" | |
30 | |
31 import StringIO | |
32 import datetime | |
33 import json | |
34 import re | |
35 import unittest2 as unittest | |
36 | |
37 from webkitpy.common.host_mock import MockHost | |
38 from webkitpy.common.system.outputcapture import OutputCapture | |
39 from webkitpy.layout_tests.port.driver import DriverOutput | |
40 from webkitpy.layout_tests.port.test import TestPort | |
41 from webkitpy.performance_tests.perftest import ChromiumStylePerfTest | |
42 from webkitpy.performance_tests.perftest import PerfTest | |
43 from webkitpy.performance_tests.perftestsrunner import PerfTestsRunner | |
44 | |
45 | |
46 class InspectorPassTestData: | |
47 text = 'RESULT group_name: test_name= 42 ms' | |
48 output = """Running inspector/pass.html (2 of 2) | |
49 RESULT group_name: test_name= 42 ms | |
50 Finished: 0.1 s | |
51 | |
52 """ | |
53 | |
54 | |
55 class EventTargetWrapperTestData: | |
56 text = """Running 20 times | |
57 Ignoring warm-up run (1502) | |
58 1504 | |
59 1505 | |
60 1510 | |
61 1504 | |
62 1507 | |
63 1509 | |
64 1510 | |
65 1487 | |
66 1488 | |
67 1472 | |
68 1472 | |
69 1488 | |
70 1473 | |
71 1472 | |
72 1475 | |
73 1487 | |
74 1486 | |
75 1486 | |
76 1475 | |
77 1471 | |
78 | |
79 Time: | |
80 values 1486, 1471, 1510, 1505, 1478, 1490 ms | |
81 avg 1490 ms | |
82 median 1488 ms | |
83 stdev 15.13935 ms | |
84 min 1471 ms | |
85 max 1510 ms | |
86 """ | |
87 | |
88 output = """Running Bindings/event-target-wrapper.html (1 of 2) | |
89 RESULT Bindings: event-target-wrapper: Time= 1490.0 ms | |
90 median= 1488.0 ms, stdev= 14.11751 ms, min= 1471.0 ms, max= 1510.0 ms | |
91 Finished: 0.1 s | |
92 | |
93 """ | |
94 | |
95 results = {'url': 'http://trac.webkit.org/browser/trunk/PerformanceTests/Bin
dings/event-target-wrapper.html', | |
96 'metrics': {'Time': {'current': [[1486.0, 1471.0, 1510.0, 1505.0, 1478.0
, 1490.0]] * 4}}} | |
97 | |
98 | |
99 class SomeParserTestData: | |
100 text = """Running 20 times | |
101 Ignoring warm-up run (1115) | |
102 | |
103 Time: | |
104 values 1080, 1120, 1095, 1101, 1104 ms | |
105 avg 1100 ms | |
106 median 1101 ms | |
107 stdev 14.50861 ms | |
108 min 1080 ms | |
109 max 1120 ms | |
110 """ | |
111 | |
112 output = """Running Parser/some-parser.html (2 of 2) | |
113 RESULT Parser: some-parser: Time= 1100.0 ms | |
114 median= 1101.0 ms, stdev= 13.31402 ms, min= 1080.0 ms, max= 1120.0 ms | |
115 Finished: 0.1 s | |
116 | |
117 """ | |
118 | |
119 | |
120 class MemoryTestData: | |
121 text = """Running 20 times | |
122 Ignoring warm-up run (1115) | |
123 | |
124 Time: | |
125 values 1080, 1120, 1095, 1101, 1104 ms | |
126 avg 1100 ms | |
127 median 1101 ms | |
128 stdev 14.50861 ms | |
129 min 1080 ms | |
130 max 1120 ms | |
131 | |
132 JS Heap: | |
133 values 825000, 811000, 848000, 837000, 829000 bytes | |
134 avg 830000 bytes | |
135 median 829000 bytes | |
136 stdev 13784.04875 bytes | |
137 min 811000 bytes | |
138 max 848000 bytes | |
139 | |
140 Malloc: | |
141 values 529000, 511000, 548000, 536000, 521000 bytes | |
142 avg 529000 bytes | |
143 median 529000 bytes | |
144 stdev 14124.44689 bytes | |
145 min 511000 bytes | |
146 max 548000 bytes | |
147 """ | |
148 | |
149 output = """Running 1 tests | |
150 Running Parser/memory-test.html (1 of 1) | |
151 RESULT Parser: memory-test: Time= 1100.0 ms | |
152 median= 1101.0 ms, stdev= 13.31402 ms, min= 1080.0 ms, max= 1120.0 ms | |
153 RESULT Parser: memory-test: JSHeap= 830000.0 bytes | |
154 median= 829000.0 bytes, stdev= 12649.11064 bytes, min= 811000.0 bytes, max= 8480
00.0 bytes | |
155 RESULT Parser: memory-test: Malloc= 529000.0 bytes | |
156 median= 529000.0 bytes, stdev= 12961.48139 bytes, min= 511000.0 bytes, max= 5480
00.0 bytes | |
157 Finished: 0.1 s | |
158 """ | |
159 | |
160 results = {'current': [[1080, 1120, 1095, 1101, 1104]] * 4} | |
161 js_heap_results = {'current': [[825000, 811000, 848000, 837000, 829000]] * 4
} | |
162 malloc_results = {'current': [[529000, 511000, 548000, 536000, 521000]] * 4} | |
163 | |
164 | |
165 class TestDriver: | |
166 def run_test(self, driver_input, stop_when_done): | |
167 text = '' | |
168 timeout = False | |
169 crash = False | |
170 if driver_input.test_name.endswith('pass.html'): | |
171 text = InspectorPassTestData.text | |
172 elif driver_input.test_name.endswith('timeout.html'): | |
173 timeout = True | |
174 elif driver_input.test_name.endswith('failed.html'): | |
175 text = None | |
176 elif driver_input.test_name.endswith('tonguey.html'): | |
177 text = 'we are not expecting an output from perf tests but RESULT bl
ablabla' | |
178 elif driver_input.test_name.endswith('crash.html'): | |
179 crash = True | |
180 elif driver_input.test_name.endswith('event-target-wrapper.html'): | |
181 text = EventTargetWrapperTestData.text | |
182 elif driver_input.test_name.endswith('some-parser.html'): | |
183 text = SomeParserTestData.text | |
184 elif driver_input.test_name.endswith('memory-test.html'): | |
185 text = MemoryTestData.text | |
186 return DriverOutput(text, '', '', '', crash=crash, timeout=timeout) | |
187 | |
188 def start(self): | |
189 """do nothing""" | |
190 | |
191 def stop(self): | |
192 """do nothing""" | |
193 | |
194 | |
195 class MainTest(unittest.TestCase): | |
196 def _normalize_output(self, log): | |
197 return re.sub(r'(stdev=\s+\d+\.\d{5})\d+', r'\1', re.sub(r'Finished: [0-
9\.]+ s', 'Finished: 0.1 s', log)) | |
198 | |
199 def _load_output_json(self, runner): | |
200 json_content = runner._host.filesystem.read_text_file(runner._output_jso
n_path()) | |
201 return json.loads(re.sub(r'("stdev":\s*\d+\.\d{5})\d+', r'\1', json_cont
ent)) | |
202 | |
203 def create_runner(self, args=[], driver_class=TestDriver): | |
204 options, parsed_args = PerfTestsRunner._parse_args(args) | |
205 test_port = TestPort(host=MockHost(), options=options) | |
206 test_port.create_driver = lambda worker_number=None, no_timeout=False: d
river_class() | |
207 | |
208 runner = PerfTestsRunner(args=args, port=test_port) | |
209 runner._host.filesystem.maybe_make_directory(runner._base_path, 'inspect
or') | |
210 runner._host.filesystem.maybe_make_directory(runner._base_path, 'Binding
s') | |
211 runner._host.filesystem.maybe_make_directory(runner._base_path, 'Parser'
) | |
212 | |
213 return runner, test_port | |
214 | |
215 def run_test(self, test_name): | |
216 runner, port = self.create_runner() | |
217 tests = [ChromiumStylePerfTest(port, test_name, runner._host.filesystem.
join('some-dir', test_name))] | |
218 return runner._run_tests_set(tests) == 0 | |
219 | |
220 def test_run_passing_test(self): | |
221 self.assertTrue(self.run_test('pass.html')) | |
222 | |
223 def test_run_silent_test(self): | |
224 self.assertFalse(self.run_test('silent.html')) | |
225 | |
226 def test_run_failed_test(self): | |
227 self.assertFalse(self.run_test('failed.html')) | |
228 | |
229 def test_run_tonguey_test(self): | |
230 self.assertFalse(self.run_test('tonguey.html')) | |
231 | |
232 def test_run_timeout_test(self): | |
233 self.assertFalse(self.run_test('timeout.html')) | |
234 | |
235 def test_run_crash_test(self): | |
236 self.assertFalse(self.run_test('crash.html')) | |
237 | |
238 def _tests_for_runner(self, runner, test_names): | |
239 filesystem = runner._host.filesystem | |
240 tests = [] | |
241 for test in test_names: | |
242 path = filesystem.join(runner._base_path, test) | |
243 dirname = filesystem.dirname(path) | |
244 if test.startswith('inspector/'): | |
245 tests.append(ChromiumStylePerfTest(runner._port, test, path)) | |
246 else: | |
247 tests.append(PerfTest(runner._port, test, path)) | |
248 return tests | |
249 | |
250 def test_run_test_set(self): | |
251 runner, port = self.create_runner() | |
252 tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspecto
r/silent.html', 'inspector/failed.html', | |
253 'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash
.html']) | |
254 output = OutputCapture() | |
255 output.capture_output() | |
256 try: | |
257 unexpected_result_count = runner._run_tests_set(tests) | |
258 finally: | |
259 stdout, stderr, log = output.restore_output() | |
260 self.assertEqual(unexpected_result_count, len(tests) - 1) | |
261 self.assertTrue('\nRESULT group_name: test_name= 42 ms\n' in log) | |
262 | |
263 def test_run_test_set_kills_drt_per_run(self): | |
264 | |
265 class TestDriverWithStopCount(TestDriver): | |
266 stop_count = 0 | |
267 def stop(self): | |
268 TestDriverWithStopCount.stop_count += 1 | |
269 | |
270 runner, port = self.create_runner(driver_class=TestDriverWithStopCount) | |
271 | |
272 tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspecto
r/silent.html', 'inspector/failed.html', | |
273 'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash
.html']) | |
274 unexpected_result_count = runner._run_tests_set(tests) | |
275 | |
276 self.assertEqual(TestDriverWithStopCount.stop_count, 6) | |
277 | |
278 def test_run_test_set_for_parser_tests(self): | |
279 runner, port = self.create_runner() | |
280 tests = self._tests_for_runner(runner, ['Bindings/event-target-wrapper.h
tml', 'Parser/some-parser.html']) | |
281 output = OutputCapture() | |
282 output.capture_output() | |
283 try: | |
284 unexpected_result_count = runner._run_tests_set(tests) | |
285 finally: | |
286 stdout, stderr, log = output.restore_output() | |
287 self.assertEqual(unexpected_result_count, 0) | |
288 self.assertEqual(self._normalize_output(log), EventTargetWrapperTestData
.output + SomeParserTestData.output) | |
289 | |
290 def test_run_memory_test(self): | |
291 runner, port = self.create_runner_and_setup_results_template() | |
292 runner._timestamp = 123456789 | |
293 port.host.filesystem.write_text_file(runner._base_path + '/Parser/memory
-test.html', 'some content') | |
294 | |
295 output = OutputCapture() | |
296 output.capture_output() | |
297 try: | |
298 unexpected_result_count = runner.run() | |
299 finally: | |
300 stdout, stderr, log = output.restore_output() | |
301 self.assertEqual(unexpected_result_count, 0) | |
302 self.assertEqual(self._normalize_output(log), MemoryTestData.output + '\
nMOCK: user.open_url: file://...\n') | |
303 parser_tests = self._load_output_json(runner)[0]['tests']['Parser']['tes
ts'] | |
304 self.assertEqual(parser_tests['memory-test']['metrics']['Time'], MemoryT
estData.results) | |
305 self.assertEqual(parser_tests['memory-test']['metrics']['JSHeap'], Memor
yTestData.js_heap_results) | |
306 self.assertEqual(parser_tests['memory-test']['metrics']['Malloc'], Memor
yTestData.malloc_results) | |
307 | |
308 def _test_run_with_json_output(self, runner, filesystem, upload_succeeds=Fal
se, results_shown=True, expected_exit_code=0, repeat=1, compare_logs=True): | |
309 filesystem.write_text_file(runner._base_path + '/inspector/pass.html', '
some content') | |
310 filesystem.write_text_file(runner._base_path + '/Bindings/event-target-w
rapper.html', 'some content') | |
311 | |
312 uploaded = [False] | |
313 | |
314 def mock_upload_json(hostname, json_path, host_path=None): | |
315 # FIXME: Get rid of the hard-coded perf.webkit.org once we've comple
ted the transition. | |
316 self.assertIn(hostname, ['some.host']) | |
317 self.assertIn(json_path, ['/mock-checkout/output.json']) | |
318 self.assertIn(host_path, [None, '/api/report']) | |
319 uploaded[0] = upload_succeeds | |
320 return upload_succeeds | |
321 | |
322 runner._upload_json = mock_upload_json | |
323 runner._timestamp = 123456789 | |
324 runner._utc_timestamp = datetime.datetime(2013, 2, 8, 15, 19, 37, 460000
) | |
325 output_capture = OutputCapture() | |
326 output_capture.capture_output() | |
327 try: | |
328 self.assertEqual(runner.run(), expected_exit_code) | |
329 finally: | |
330 stdout, stderr, logs = output_capture.restore_output() | |
331 | |
332 if not expected_exit_code and compare_logs: | |
333 expected_logs = '' | |
334 for i in xrange(repeat): | |
335 runs = ' (Run %d of %d)' % (i + 1, repeat) if repeat > 1 else '' | |
336 expected_logs += 'Running 2 tests%s\n' % runs + EventTargetWrapp
erTestData.output + InspectorPassTestData.output | |
337 if results_shown: | |
338 expected_logs += 'MOCK: user.open_url: file://...\n' | |
339 self.assertEqual(self._normalize_output(logs), expected_logs) | |
340 | |
341 self.assertEqual(uploaded[0], upload_succeeds) | |
342 | |
343 return logs | |
344 | |
345 _event_target_wrapper_and_inspector_results = { | |
346 "Bindings": | |
347 {"url": "http://trac.webkit.org/browser/trunk/PerformanceTests/Bindi
ngs", | |
348 "tests": {"event-target-wrapper": EventTargetWrapperTestData.results
}}} | |
349 | |
350 def test_run_with_json_output(self): | |
351 runner, port = self.create_runner_and_setup_results_template(args=['--ou
tput-json-path=/mock-checkout/output.json', | |
352 '--test-results-server=some.host']) | |
353 self._test_run_with_json_output(runner, port.host.filesystem, upload_suc
ceeds=True) | |
354 self.assertEqual(self._load_output_json(runner), [{ | |
355 "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_targ
et_wrapper_and_inspector_results, | |
356 "revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "r
evision": "5678"}}}]) | |
357 | |
358 filesystem = port.host.filesystem | |
359 self.assertTrue(filesystem.isfile(runner._output_json_path())) | |
360 self.assertTrue(filesystem.isfile(filesystem.splitext(runner._output_jso
n_path())[0] + '.html')) | |
361 | |
362 def test_run_with_description(self): | |
363 runner, port = self.create_runner_and_setup_results_template(args=['--ou
tput-json-path=/mock-checkout/output.json', | |
364 '--test-results-server=some.host', '--description', 'some descriptio
n']) | |
365 self._test_run_with_json_output(runner, port.host.filesystem, upload_suc
ceeds=True) | |
366 self.assertEqual(self._load_output_json(runner), [{ | |
367 "buildTime": "2013-02-08T15:19:37.460000", "description": "some desc
ription", | |
368 "tests": self._event_target_wrapper_and_inspector_results, | |
369 "revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "r
evision": "5678"}}}]) | |
370 | |
371 def create_runner_and_setup_results_template(self, args=[]): | |
372 runner, port = self.create_runner(args) | |
373 filesystem = port.host.filesystem | |
374 filesystem.write_text_file(runner._base_path + '/resources/results-templ
ate.html', | |
375 'BEGIN<script src="%AbsolutePathToWebKitTrunk%/some.js"></script>' | |
376 '<script src="%AbsolutePathToWebKitTrunk%/other.js"></script><script
>%PeformanceTestsResultsJSON%</script>END') | |
377 filesystem.write_text_file(runner._base_path + '/Dromaeo/resources/droma
eo/web/lib/jquery-1.6.4.js', 'jquery content') | |
378 return runner, port | |
379 | |
380 def test_run_respects_no_results(self): | |
381 runner, port = self.create_runner(args=['--output-json-path=/mock-checko
ut/output.json', | |
382 '--test-results-server=some.host', '--no-results']) | |
383 self._test_run_with_json_output(runner, port.host.filesystem, upload_suc
ceeds=False, results_shown=False) | |
384 self.assertFalse(port.host.filesystem.isfile('/mock-checkout/output.json
')) | |
385 | |
386 def test_run_generates_json_by_default(self): | |
387 runner, port = self.create_runner_and_setup_results_template() | |
388 filesystem = port.host.filesystem | |
389 output_json_path = runner._output_json_path() | |
390 results_page_path = filesystem.splitext(output_json_path)[0] + '.html' | |
391 | |
392 self.assertFalse(filesystem.isfile(output_json_path)) | |
393 self.assertFalse(filesystem.isfile(results_page_path)) | |
394 | |
395 self._test_run_with_json_output(runner, port.host.filesystem) | |
396 | |
397 self.assertEqual(self._load_output_json(runner), [{ | |
398 "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_targ
et_wrapper_and_inspector_results, | |
399 "revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "r
evision": "5678"}}}]) | |
400 | |
401 self.assertTrue(filesystem.isfile(output_json_path)) | |
402 self.assertTrue(filesystem.isfile(results_page_path)) | |
403 | |
404 def test_run_merges_output_by_default(self): | |
405 runner, port = self.create_runner_and_setup_results_template() | |
406 filesystem = port.host.filesystem | |
407 output_json_path = runner._output_json_path() | |
408 | |
409 filesystem.write_text_file(output_json_path, '[{"previous": "results"}]'
) | |
410 | |
411 self._test_run_with_json_output(runner, port.host.filesystem) | |
412 | |
413 self.assertEqual(self._load_output_json(runner), [{"previous": "results"
}, { | |
414 "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_targ
et_wrapper_and_inspector_results, | |
415 "revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "r
evision": "5678"}}}]) | |
416 self.assertTrue(filesystem.isfile(filesystem.splitext(output_json_path)[
0] + '.html')) | |
417 | |
418 def test_run_respects_reset_results(self): | |
419 runner, port = self.create_runner_and_setup_results_template(args=["--re
set-results"]) | |
420 filesystem = port.host.filesystem | |
421 output_json_path = runner._output_json_path() | |
422 | |
423 filesystem.write_text_file(output_json_path, '[{"previous": "results"}]'
) | |
424 | |
425 self._test_run_with_json_output(runner, port.host.filesystem) | |
426 | |
427 self.assertEqual(self._load_output_json(runner), [{ | |
428 "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_targ
et_wrapper_and_inspector_results, | |
429 "revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "r
evision": "5678"}}}]) | |
430 self.assertTrue(filesystem.isfile(filesystem.splitext(output_json_path)[
0] + '.html')) | |
431 pass | |
432 | |
433 def test_run_generates_and_show_results_page(self): | |
434 runner, port = self.create_runner_and_setup_results_template(args=['--ou
tput-json-path=/mock-checkout/output.json']) | |
435 page_shown = [] | |
436 port.show_results_html_file = lambda path: page_shown.append(path) | |
437 filesystem = port.host.filesystem | |
438 self._test_run_with_json_output(runner, filesystem, results_shown=False) | |
439 | |
440 expected_entry = {"buildTime": "2013-02-08T15:19:37.460000", "tests": se
lf._event_target_wrapper_and_inspector_results, | |
441 "revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "r
evision": "5678"}}} | |
442 | |
443 self.maxDiff = None | |
444 self.assertEqual(runner._output_json_path(), '/mock-checkout/output.json
') | |
445 self.assertEqual(self._load_output_json(runner), [expected_entry]) | |
446 self.assertEqual(filesystem.read_text_file('/mock-checkout/output.html')
, | |
447 'BEGIN<script src="/test.checkout/some.js"></script><script src="/te
st.checkout/other.js"></script>' | |
448 '<script>%s</script>END' % port.host.filesystem.read_text_file(runne
r._output_json_path())) | |
449 self.assertEqual(page_shown[0], '/mock-checkout/output.html') | |
450 | |
451 self._test_run_with_json_output(runner, filesystem, results_shown=False) | |
452 self.assertEqual(runner._output_json_path(), '/mock-checkout/output.json
') | |
453 self.assertEqual(self._load_output_json(runner), [expected_entry, expect
ed_entry]) | |
454 self.assertEqual(filesystem.read_text_file('/mock-checkout/output.html')
, | |
455 'BEGIN<script src="/test.checkout/some.js"></script><script src="/te
st.checkout/other.js"></script>' | |
456 '<script>%s</script>END' % port.host.filesystem.read_text_file(runne
r._output_json_path())) | |
457 | |
458 def test_run_respects_no_show_results(self): | |
459 show_results_html_file = lambda path: page_shown.append(path) | |
460 | |
461 runner, port = self.create_runner_and_setup_results_template(args=['--ou
tput-json-path=/mock-checkout/output.json']) | |
462 page_shown = [] | |
463 port.show_results_html_file = show_results_html_file | |
464 self._test_run_with_json_output(runner, port.host.filesystem, results_sh
own=False) | |
465 self.assertEqual(page_shown[0], '/mock-checkout/output.html') | |
466 | |
467 runner, port = self.create_runner_and_setup_results_template(args=['--ou
tput-json-path=/mock-checkout/output.json', | |
468 '--no-show-results']) | |
469 page_shown = [] | |
470 port.show_results_html_file = show_results_html_file | |
471 self._test_run_with_json_output(runner, port.host.filesystem, results_sh
own=False) | |
472 self.assertEqual(page_shown, []) | |
473 | |
474 def test_run_with_bad_output_json(self): | |
475 runner, port = self.create_runner_and_setup_results_template(args=['--ou
tput-json-path=/mock-checkout/output.json']) | |
476 port.host.filesystem.write_text_file('/mock-checkout/output.json', 'bad
json') | |
477 self._test_run_with_json_output(runner, port.host.filesystem, expected_e
xit_code=PerfTestsRunner.EXIT_CODE_BAD_MERGE) | |
478 port.host.filesystem.write_text_file('/mock-checkout/output.json', '{"an
other bad json": "1"}') | |
479 self._test_run_with_json_output(runner, port.host.filesystem, expected_e
xit_code=PerfTestsRunner.EXIT_CODE_BAD_MERGE) | |
480 | |
481 def test_run_with_slave_config_json(self): | |
482 runner, port = self.create_runner_and_setup_results_template(args=['--ou
tput-json-path=/mock-checkout/output.json', | |
483 '--slave-config-json-path=/mock-checkout/slave-config.json', '--test
-results-server=some.host']) | |
484 port.host.filesystem.write_text_file('/mock-checkout/slave-config.json',
'{"key": "value"}') | |
485 self._test_run_with_json_output(runner, port.host.filesystem, upload_suc
ceeds=True) | |
486 self.assertEqual(self._load_output_json(runner), [{ | |
487 "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_targ
et_wrapper_and_inspector_results, | |
488 "revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "r
evision": "5678"}}, "builderKey": "value"}]) | |
489 | |
490 def test_run_with_bad_slave_config_json(self): | |
491 runner, port = self.create_runner_and_setup_results_template(args=['--ou
tput-json-path=/mock-checkout/output.json', | |
492 '--slave-config-json-path=/mock-checkout/slave-config.json', '--test
-results-server=some.host']) | |
493 logs = self._test_run_with_json_output(runner, port.host.filesystem, exp
ected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON) | |
494 self.assertTrue('Missing slave configuration JSON file: /mock-checkout/s
lave-config.json' in logs) | |
495 port.host.filesystem.write_text_file('/mock-checkout/slave-config.json',
'bad json') | |
496 self._test_run_with_json_output(runner, port.host.filesystem, expected_e
xit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON) | |
497 port.host.filesystem.write_text_file('/mock-checkout/slave-config.json',
'["another bad json"]') | |
498 self._test_run_with_json_output(runner, port.host.filesystem, expected_e
xit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON) | |
499 | |
500 def test_run_with_multiple_repositories(self): | |
501 runner, port = self.create_runner_and_setup_results_template(args=['--ou
tput-json-path=/mock-checkout/output.json', | |
502 '--test-results-server=some.host']) | |
503 port.repository_paths = lambda: [('webkit', '/mock-checkout'), ('some',
'/mock-checkout/some')] | |
504 self._test_run_with_json_output(runner, port.host.filesystem, upload_suc
ceeds=True) | |
505 self.assertEqual(self._load_output_json(runner), [{ | |
506 "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_targ
et_wrapper_and_inspector_results, | |
507 "revisions": {"webkit": {"timestamp": "2013-02-01 08:48:05 +0000", "
revision": "5678"}, | |
508 "some": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678
"}}}]) | |
509 | |
510 def test_run_with_upload_json(self): | |
511 runner, port = self.create_runner_and_setup_results_template(args=['--ou
tput-json-path=/mock-checkout/output.json', | |
512 '--test-results-server', 'some.host', '--platform', 'platform1', '--
builder-name', 'builder1', '--build-number', '123']) | |
513 | |
514 self._test_run_with_json_output(runner, port.host.filesystem, upload_suc
ceeds=True) | |
515 generated_json = json.loads(port.host.filesystem.files['/mock-checkout/o
utput.json']) | |
516 self.assertEqual(generated_json[0]['platform'], 'platform1') | |
517 self.assertEqual(generated_json[0]['builderName'], 'builder1') | |
518 self.assertEqual(generated_json[0]['buildNumber'], 123) | |
519 | |
520 self._test_run_with_json_output(runner, port.host.filesystem, upload_suc
ceeds=False, expected_exit_code=PerfTestsRunner.EXIT_CODE_FAILED_UPLOADING) | |
521 | |
522 def test_run_with_upload_json_should_generate_perf_webkit_json(self): | |
523 runner, port = self.create_runner_and_setup_results_template(args=['--ou
tput-json-path=/mock-checkout/output.json', | |
524 '--test-results-server', 'some.host', '--platform', 'platform1', '--
builder-name', 'builder1', '--build-number', '123', | |
525 '--slave-config-json-path=/mock-checkout/slave-config.json']) | |
526 port.host.filesystem.write_text_file('/mock-checkout/slave-config.json',
'{"key": "value1"}') | |
527 | |
528 self._test_run_with_json_output(runner, port.host.filesystem, upload_suc
ceeds=True) | |
529 generated_json = json.loads(port.host.filesystem.files['/mock-checkout/o
utput.json']) | |
530 self.assertTrue(isinstance(generated_json, list)) | |
531 self.assertEqual(len(generated_json), 1) | |
532 | |
533 output = generated_json[0] | |
534 self.maxDiff = None | |
535 self.assertEqual(output['platform'], 'platform1') | |
536 self.assertEqual(output['buildNumber'], 123) | |
537 self.assertEqual(output['buildTime'], '2013-02-08T15:19:37.460000') | |
538 self.assertEqual(output['builderName'], 'builder1') | |
539 self.assertEqual(output['builderKey'], 'value1') | |
540 self.assertEqual(output['revisions'], {'blink': {'revision': '5678', 'ti
mestamp': '2013-02-01 08:48:05 +0000'}}) | |
541 self.assertEqual(output['tests'].keys(), ['Bindings']) | |
542 self.assertEqual(sorted(output['tests']['Bindings'].keys()), ['tests', '
url']) | |
543 self.assertEqual(output['tests']['Bindings']['url'], 'http://trac.webkit
.org/browser/trunk/PerformanceTests/Bindings') | |
544 self.assertEqual(output['tests']['Bindings']['tests'].keys(), ['event-ta
rget-wrapper']) | |
545 self.assertEqual(output['tests']['Bindings']['tests']['event-target-wrap
per'], { | |
546 'url': 'http://trac.webkit.org/browser/trunk/PerformanceTests/Bindin
gs/event-target-wrapper.html', | |
547 'metrics': {'Time': {'current': [[1486.0, 1471.0, 1510.0, 1505.0, 14
78.0, 1490.0]] * 4}}}) | |
548 | |
549 def test_run_with_repeat(self): | |
550 self.maxDiff = None | |
551 runner, port = self.create_runner_and_setup_results_template(args=['--ou
tput-json-path=/mock-checkout/output.json', | |
552 '--test-results-server=some.host', '--repeat', '5']) | |
553 self._test_run_with_json_output(runner, port.host.filesystem, upload_suc
ceeds=True, repeat=5) | |
554 self.assertEqual(self._load_output_json(runner), [ | |
555 {"buildTime": "2013-02-08T15:19:37.460000", | |
556 "tests": self._event_target_wrapper_and_inspector_results, | |
557 "revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "r
evision": "5678"}}}, | |
558 {"buildTime": "2013-02-08T15:19:37.460000", | |
559 "tests": self._event_target_wrapper_and_inspector_results, | |
560 "revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "r
evision": "5678"}}}, | |
561 {"buildTime": "2013-02-08T15:19:37.460000", | |
562 "tests": self._event_target_wrapper_and_inspector_results, | |
563 "revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "r
evision": "5678"}}}, | |
564 {"buildTime": "2013-02-08T15:19:37.460000", | |
565 "tests": self._event_target_wrapper_and_inspector_results, | |
566 "revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "r
evision": "5678"}}}, | |
567 {"buildTime": "2013-02-08T15:19:37.460000", | |
568 "tests": self._event_target_wrapper_and_inspector_results, | |
569 "revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "r
evision": "5678"}}}]) | |
570 | |
571 def test_run_with_test_runner_count(self): | |
572 runner, port = self.create_runner_and_setup_results_template(args=['--ou
tput-json-path=/mock-checkout/output.json', | |
573 '--test-runner-count=3']) | |
574 self._test_run_with_json_output(runner, port.host.filesystem, compare_lo
gs=False) | |
575 generated_json = json.loads(port.host.filesystem.files['/mock-checkout/o
utput.json']) | |
576 self.assertTrue(isinstance(generated_json, list)) | |
577 self.assertEqual(len(generated_json), 1) | |
578 | |
579 output = generated_json[0]['tests']['Bindings']['tests']['event-target-w
rapper']['metrics']['Time']['current'] | |
580 self.assertEqual(len(output), 3) | |
581 expectedMetrics = EventTargetWrapperTestData.results['metrics']['Time'][
'current'][0] | |
582 for metrics in output: | |
583 self.assertEqual(metrics, expectedMetrics) | |
OLD | NEW |