OLD | NEW |
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 # Copyright 2016 The Chromium Authors. All rights reserved. | 2 # Copyright 2016 The Chromium Authors. All rights reserved. |
3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
5 | 5 |
6 """Script to generate chromium.perf.json and chromium.perf.fyi.json in | 6 """Script to generate chromium.perf.json and chromium.perf.fyi.json in |
7 the src/testing/buildbot directory. Maintaining these files by hand is | 7 the src/testing/buildbot directory. Maintaining these files by hand is |
8 too unwieldy. | 8 too unwieldy. |
9 """ | 9 """ |
10 | 10 |
11 import json | 11 import json |
12 import os | 12 import os |
13 import sys | 13 import sys |
14 | 14 |
15 from chrome_telemetry_build import chromium_config | 15 from chrome_telemetry_build import chromium_config |
16 | 16 |
17 sys.path.append(chromium_config.GetTelemetryDir()) | 17 sys.path.append(chromium_config.GetTelemetryDir()) |
18 from telemetry import benchmark as benchmark_module | 18 from telemetry import benchmark as benchmark_module |
19 from telemetry.core import discover | 19 from telemetry.core import discover |
20 from telemetry.util import bot_utils | 20 from telemetry.util import bot_utils |
21 | 21 |
22 | 22 |
23 SCRIPT_TESTS = [ | 23 SCRIPT_TESTS = [ |
24 { | 24 { |
25 'args': [ | 25 'args': [ |
26 'gpu_perftests' | 26 'gpu_perftests' |
27 ], | 27 ], |
28 'name': 'gpu_perftests', | 28 'name': 'gpu_perftests', |
29 'script': 'gtest_perf_test.py', | 29 'script': 'gtest_perf_test.py', |
30 'testers': [ | 30 'testers': { |
31 { | 31 'chromium.perf': [ |
32 'name': 'Android Galaxy S5 Perf', | 32 { |
33 'shards': [3] | 33 'name': 'Android Galaxy S5 Perf', |
34 }, | 34 'shards': [3] |
35 { | 35 }, |
36 'name': 'Android Nexus5 Perf', | 36 { |
37 'shards': [2] | 37 'name': 'Android Nexus5 Perf', |
38 }, | 38 'shards': [2] |
39 { | 39 }, |
40 'name': 'Android Nexus7v2 Perf', | 40 { |
41 'shards': [2] | 41 'name': 'Android Nexus7v2 Perf', |
42 }, | 42 'shards': [2] |
43 { | 43 }, |
44 'name': 'Android Nexus9 Perf', | 44 { |
45 'shards': [2] | 45 'name': 'Android Nexus9 Perf', |
46 }, | 46 'shards': [2] |
47 ] | 47 } |
| 48 ], |
| 49 'chromium.perf.fyi': [ |
| 50 { |
| 51 'name': 'Android Galaxy S5 Perf', |
| 52 'shards': [1] |
| 53 }, |
| 54 ] |
| 55 } |
48 }, | 56 }, |
49 { | 57 { |
50 'args': [ | 58 'args': [ |
51 'cc_perftests' | 59 'cc_perftests' |
52 ], | 60 ], |
53 'name': 'cc_perftests', | 61 'name': 'cc_perftests', |
54 'script': 'gtest_perf_test.py', | 62 'script': 'gtest_perf_test.py', |
55 'testers': [ | 63 'testers': { |
56 { | 64 'chromium.perf': [ |
57 'name': 'Android Galaxy S5 Perf', | 65 { |
58 'shards': [3] | 66 'name': 'Android Galaxy S5 Perf', |
59 }, | 67 'shards': [3] |
60 { | 68 }, |
61 'name': 'Android Nexus5 Perf', | 69 { |
62 'shards': [2] | 70 'name': 'Android Nexus5 Perf', |
63 }, | 71 'shards': [2] |
64 { | 72 }, |
65 'name': 'Android Nexus6 Perf', | 73 { |
66 'shards': [2] | 74 'name': 'Android Nexus6 Perf', |
67 }, | 75 'shards': [2] |
68 { | 76 }, |
69 'name': 'Android Nexus7v2 Perf', | 77 { |
70 'shards': [2] | 78 'name': 'Android Nexus7v2 Perf', |
71 }, | 79 'shards': [2] |
72 { | 80 }, |
73 'name': 'Android Nexus9 Perf', | 81 { |
74 'shards': [2] | 82 'name': 'Android Nexus9 Perf', |
75 }, | 83 'shards': [2] |
76 ] | 84 }, |
| 85 ], |
| 86 'chromium.perf.fyi': [ |
| 87 { |
| 88 'name': 'Android Galaxy S5 Perf', |
| 89 'shards': [1] |
| 90 }, |
| 91 ] |
| 92 } |
77 }, | 93 }, |
78 { | 94 { |
79 'args': [ | 95 'args': [ |
80 'cc_perftests', | 96 'cc_perftests', |
81 '--test-launcher-print-test-stdio=always' | 97 '--test-launcher-print-test-stdio=always' |
82 ], | 98 ], |
83 'name': 'cc_perftests', | 99 'name': 'cc_perftests', |
84 'script': 'gtest_perf_test.py', | 100 'script': 'gtest_perf_test.py', |
85 'testers': [ | 101 'testers': { |
86 { | 102 'chromium.perf': [ |
87 'name': 'Linux Perf', | 103 { |
88 'shards': [3] | 104 'name': 'Linux Perf', |
89 }, | 105 'shards': [3] |
90 ] | 106 }, |
| 107 ] |
| 108 } |
91 }, | 109 }, |
92 { | 110 { |
93 'args': [ | 111 'args': [ |
94 'load_library_perf_tests', | 112 'load_library_perf_tests', |
95 '--test-launcher-print-test-stdio=always' | 113 '--test-launcher-print-test-stdio=always' |
96 ], | 114 ], |
97 'name': 'load_library_perf_tests', | 115 'name': 'load_library_perf_tests', |
98 'script': 'gtest_perf_test.py', | 116 'script': 'gtest_perf_test.py', |
99 'testers': [ | 117 'testers': { |
100 { | 118 'chromium.perf': [ |
101 'name': 'Linux Perf', | 119 { |
102 'shards': [3] | 120 'name': 'Linux Perf', |
103 }, | 121 'shards': [3] |
104 { | 122 }, |
105 'name': 'Win 7 ATI GPU Perf', | 123 { |
106 'shards': [2] | 124 'name': 'Win 7 ATI GPU Perf', |
107 }, | 125 'shards': [2] |
108 { | 126 }, |
109 'name': 'Win 7 Nvidia GPU Perf', | 127 { |
110 'shards': [2] | 128 'name': 'Win 7 Nvidia GPU Perf', |
111 }, | 129 'shards': [2] |
112 { | 130 }, |
113 'name': 'Win 7 Perf', | 131 { |
114 'shards': [3] | 132 'name': 'Win 7 Perf', |
115 }, | 133 'shards': [3] |
116 { | 134 }, |
117 'name': 'Win 7 x64 Perf', | 135 { |
118 'shards': [2] | 136 'name': 'Win 7 x64 Perf', |
119 }, | 137 'shards': [2] |
120 { | 138 }, |
121 'name': 'Win 8 Perf', | 139 { |
122 'shards': [2] | 140 'name': 'Win 8 Perf', |
123 }, | 141 'shards': [2] |
124 ] | 142 }, |
| 143 ] |
| 144 } |
125 }, | 145 }, |
126 { | 146 { |
127 'args': [ | 147 'args': [ |
128 'performance_browser_tests', | 148 'performance_browser_tests', |
129 '--test-launcher-print-test-stdio=always', | 149 '--test-launcher-print-test-stdio=always', |
130 '--gtest_filter=TabCapturePerformanceTest.*:CastV2PerformanceTest.*', | 150 '--gtest_filter=TabCapturePerformanceTest.*:CastV2PerformanceTest.*', |
131 '--test-launcher-jobs=1', | 151 '--test-launcher-jobs=1', |
132 '--enable-gpu' | 152 '--enable-gpu' |
133 ], | 153 ], |
134 'name': 'performance_browser_tests', | 154 'name': 'performance_browser_tests', |
135 'script': 'gtest_perf_test.py', | 155 'script': 'gtest_perf_test.py', |
136 'testers': [ | 156 'testers': { |
137 { | 157 'chromium.perf': [ |
138 'name': 'Mac 10.8 Perf', | 158 { |
139 'shards': [3] | 159 'name': 'Mac 10.8 Perf', |
140 }, | 160 'shards': [3] |
141 { | 161 }, |
142 'name': 'Mac 10.9 Perf', | 162 { |
143 'shards': [3] | 163 'name': 'Mac 10.9 Perf', |
144 }, | 164 'shards': [3] |
145 { | 165 }, |
146 'name': 'Win 7 ATI GPU Perf', | 166 { |
147 'shards': [2] | 167 'name': 'Win 7 ATI GPU Perf', |
148 }, | 168 'shards': [2] |
149 { | 169 }, |
150 'name': 'Win 7 Nvidia GPU Perf', | 170 { |
151 'shards': [2] | 171 'name': 'Win 7 Nvidia GPU Perf', |
152 }, | 172 'shards': [2] |
153 { | 173 }, |
154 'name': 'Win 7 Perf', | 174 { |
155 'shards': [3] | 175 'name': 'Win 7 Perf', |
156 }, | 176 'shards': [3] |
157 { | 177 }, |
158 'name': 'Win 7 x64 Perf', | 178 { |
159 'shards': [2] | 179 'name': 'Win 7 x64 Perf', |
160 }, | 180 'shards': [2] |
161 { | 181 }, |
162 'name': 'Win 8 Perf', | 182 { |
163 'shards': [2] | 183 'name': 'Win 8 Perf', |
164 }, | 184 'shards': [2] |
165 ] | 185 }, |
| 186 ] |
| 187 } |
166 }, | 188 }, |
167 { | 189 { |
168 'args': [ | 190 'args': [ |
169 'angle_perftests', | 191 'angle_perftests', |
170 '--test-launcher-print-test-stdio=always', | 192 '--test-launcher-print-test-stdio=always', |
171 '--test-launcher-jobs=1' | 193 '--test-launcher-jobs=1' |
172 ], | 194 ], |
173 'name': 'angle_perftests', | 195 'name': 'angle_perftests', |
174 'script': 'gtest_perf_test.py', | 196 'script': 'gtest_perf_test.py', |
175 'testers': [ | 197 'testers': { |
176 { | 198 'chromium.perf': [ |
177 'name': 'Win 7 ATI GPU Perf', | 199 { |
178 'shards': [2] | 200 'name': 'Win 7 ATI GPU Perf', |
179 }, | 201 'shards': [2] |
180 { | 202 }, |
181 'name': 'Win 7 Nvidia GPU Perf', | 203 { |
182 'shards': [2] | 204 'name': 'Win 7 Nvidia GPU Perf', |
183 }, | 205 'shards': [2] |
184 ] | 206 }, |
| 207 ] |
| 208 } |
185 }, | 209 }, |
186 ] | 210 ] |
187 | 211 |
188 def add_tester(waterfall, name, perf_id, platform, target_bits=64, | 212 def add_tester(waterfall, name, perf_id, platform, target_bits=64, |
189 num_host_shards=1, num_device_shards=1, swarming=None): | 213 num_host_shards=1, num_device_shards=1, swarming=None): |
190 del perf_id # this will be needed | 214 del perf_id # this will be needed |
191 waterfall['testers'][name] = { | 215 waterfall['testers'][name] = { |
192 'platform': platform, | 216 'platform': platform, |
193 'num_device_shards': num_device_shards, | 217 'num_device_shards': num_device_shards, |
194 'num_host_shards': num_host_shards, | 218 'num_host_shards': num_host_shards, |
195 'target_bits': target_bits, | 219 'target_bits': target_bits, |
196 } | 220 } |
197 | 221 |
198 if swarming: | 222 if swarming: |
199 waterfall['testers'][name]['swarming_dimensions'] = swarming | 223 waterfall['testers'][name]['swarming_dimensions'] = swarming |
200 waterfall['testers'][name]['swarming'] = True | 224 waterfall['testers'][name]['swarming'] = True |
201 | 225 |
202 return waterfall | 226 return waterfall |
203 | 227 |
204 def get_fyi_waterfall_config(): | 228 def get_fyi_waterfall_config(): |
205 waterfall = {'builders':[], 'testers': {}} | 229 waterfall = {'builders':[], 'testers': {}} |
206 waterfall = add_tester( | 230 waterfall = add_tester( |
| 231 waterfall, 'Android Galaxy S5 Perf', |
| 232 'android-galaxy-s5-perf', 'android') |
| 233 waterfall = add_tester( |
207 waterfall, 'Win 10 Low-End Perf Tests', | 234 waterfall, 'Win 10 Low-End Perf Tests', |
208 'win-low-end-2-core', 'win', | 235 'win-low-end-2-core', 'win', |
209 swarming=[ | 236 swarming=[ |
210 { | 237 { |
211 'device_ids': ['build187-b4'] | 238 'device_ids': ['build187-b4'] |
212 }, | 239 }, |
213 { | 240 { |
214 'device_ids': ['build171-b4', 'build186-b4'] | 241 'device_ids': ['build171-b4', 'build186-b4'] |
215 } | 242 } |
216 ]) | 243 ]) |
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
296 '--output-format=chartjson', | 323 '--output-format=chartjson', |
297 '--browser=%s' % browser | 324 '--browser=%s' % browser |
298 ] | 325 ] |
299 # When this is enabled on more than just windows machines we will need | 326 # When this is enabled on more than just windows machines we will need |
300 # --device=android | 327 # --device=android |
301 | 328 |
302 step_name = benchmark_name | 329 step_name = benchmark_name |
303 if browser == 'reference': | 330 if browser == 'reference': |
304 test_args.append('--output-trace-tag=_ref') | 331 test_args.append('--output-trace-tag=_ref') |
305 step_name += '.reference' | 332 step_name += '.reference' |
306 swarming = { | 333 |
307 # Always say this is true regardless of whether the tester | 334 swarming = None |
308 # supports swarming. It doesn't hurt. | 335 if swarming_dimensions: |
309 'can_use_on_swarming_builders': True, | 336 swarming = { |
310 'expiration': 14400, | 337 # Always say this is true regardless of whether the tester |
311 'dimension_sets': swarming_dimensions | 338 # supports swarming. It doesn't hurt. |
312 } | 339 'can_use_on_swarming_builders': True, |
| 340 'expiration': 14400, |
| 341 'dimension_sets': swarming_dimensions |
| 342 } |
313 | 343 |
314 result = { | 344 result = { |
315 'args': test_args, | 345 'args': test_args, |
316 'isolate_name': 'telemetry_perf_tests', | 346 'isolate_name': 'telemetry_perf_tests', |
317 'name': step_name, | 347 'name': step_name, |
318 'override_compile_targets': ['telemetry_perf_tests'], | 348 'override_compile_targets': ['telemetry_perf_tests'], |
319 'swarming': swarming, | |
320 } | 349 } |
| 350 if swarming: |
| 351 result['swarming'] = swarming |
321 | 352 |
322 return result | 353 return result |
323 | 354 |
324 def script_test_enabled_on_tester(test, tester_name, shard): | 355 def script_test_enabled_on_tester(master, test, tester_name, shard): |
325 for enabled_tester in test['testers']: | 356 for enabled_tester in test['testers'].get(master, []): |
326 if enabled_tester['name'] == tester_name: | 357 if enabled_tester['name'] == tester_name: |
327 if shard in enabled_tester['shards']: | 358 if shard in enabled_tester['shards']: |
328 return True | 359 return True |
329 return False | 360 return False |
330 | 361 |
331 def generate_script_tests(tester_name, shard): | 362 def generate_script_tests(master, tester_name, shard): |
332 script_tests = [] | 363 script_tests = [] |
333 for test in SCRIPT_TESTS: | 364 for test in SCRIPT_TESTS: |
334 if script_test_enabled_on_tester(test, tester_name, shard): | 365 if script_test_enabled_on_tester(master, test, tester_name, shard): |
335 script = { | 366 script = { |
336 'args': test['args'], | 367 'args': test['args'], |
337 'name': test['name'], | 368 'name': test['name'], |
338 'script': test['script'] | 369 'script': test['script'] |
339 } | 370 } |
340 script_tests.append(script) | 371 script_tests.append(script) |
341 return script_tests | 372 return script_tests |
342 | 373 |
343 def generate_telemetry_tests(tester_config, benchmarks): | 374 def generate_telemetry_tests(tester_config, benchmarks): |
344 isolated_scripts = [] | 375 isolated_scripts = [] |
(...skipping 25 matching lines...) Expand all Loading... |
370 test = generate_telemetry_test( | 401 test = generate_telemetry_test( |
371 swarming_dimensions, benchmark.Name(), browser_name) | 402 swarming_dimensions, benchmark.Name(), browser_name) |
372 isolated_scripts.append(test) | 403 isolated_scripts.append(test) |
373 # Now create another executable for this benchmark on the reference browser | 404 # Now create another executable for this benchmark on the reference browser |
374 reference_test = generate_telemetry_test( | 405 reference_test = generate_telemetry_test( |
375 swarming_dimensions, benchmark.Name(),'reference') | 406 swarming_dimensions, benchmark.Name(),'reference') |
376 isolated_scripts.append(reference_test) | 407 isolated_scripts.append(reference_test) |
377 | 408 |
378 return isolated_scripts | 409 return isolated_scripts |
379 | 410 |
| 411 |
| 412 BENCHMARK_NAME_WHITELIST = set([ |
| 413 u'smoothness.top_25_smooth', |
| 414 u'sunspider', |
| 415 u'system_health.webview_startup', |
| 416 u'page_cycler_v2.intl_hi_ru', |
| 417 u'dromaeo.cssqueryjquery', |
| 418 ]) |
| 419 |
| 420 |
380 def current_benchmarks(): | 421 def current_benchmarks(): |
381 current_dir = os.path.dirname(__file__) | 422 current_dir = os.path.dirname(__file__) |
382 benchmarks_dir = os.path.join(current_dir, 'benchmarks') | 423 benchmarks_dir = os.path.join(current_dir, 'benchmarks') |
383 top_level_dir = os.path.dirname(benchmarks_dir) | 424 top_level_dir = os.path.dirname(benchmarks_dir) |
384 | 425 |
385 return discover.DiscoverClasses( | 426 all_benchmarks = discover.DiscoverClasses( |
386 benchmarks_dir, top_level_dir, benchmark_module.Benchmark, | 427 benchmarks_dir, top_level_dir, benchmark_module.Benchmark, |
387 index_by_class_name=True).values() | 428 index_by_class_name=True).values() |
| 429 return sorted(( |
| 430 bench for bench in all_benchmarks |
| 431 if bench.Name() in BENCHMARK_NAME_WHITELIST), key=lambda b: b.Name()) |
388 | 432 |
389 def generate_all_tests(waterfall, is_fyi): | 433 |
| 434 def generate_all_tests(waterfall): |
390 tests = {} | 435 tests = {} |
391 for builder in waterfall['builders']: | 436 for builder in waterfall['builders']: |
392 tests[builder] = {} | 437 tests[builder] = {} |
| 438 all_benchmarks = current_benchmarks() |
| 439 |
393 for name, config in waterfall['testers'].iteritems(): | 440 for name, config in waterfall['testers'].iteritems(): |
394 if is_fyi: | 441 if config.get('swarming', False): |
395 # Right now we are only generating benchmarks for the fyi waterfall | 442 # Right now we are only generating benchmarks for the fyi waterfall |
396 all_benchmarks = current_benchmarks() | |
397 isolated_scripts = generate_telemetry_tests(config, all_benchmarks) | 443 isolated_scripts = generate_telemetry_tests(config, all_benchmarks) |
398 tests[name] = { | 444 tests[name] = { |
399 'isolated_scripts': sorted(isolated_scripts, key=lambda x: x['name']) | 445 'isolated_scripts': sorted(isolated_scripts, key=lambda x: x['name']) |
400 } | 446 } |
401 else: | 447 else: |
402 # scripts are only currently run in addition to the main waterfall. They | 448 # scripts are only currently run in addition to the main waterfall. They |
403 # are currently the only thing generated in the perf json file. | 449 # are currently the only thing generated in the perf json file. |
404 # TODO eyaich: will need to handle the sharding differently when we have | 450 # TODO eyaich: will need to handle the sharding differently when we have |
405 # swarmed bots on the main waterfall. | 451 # swarmed bots on the main waterfall. |
406 for shard in range(0, config['num_host_shards']): | 452 for shard in range(0, config['num_host_shards']): |
407 tester_name = '%s (%d)' % (name, shard + 1) | 453 tester_name = '%s (%d)' % (name, shard + 1) |
408 scripts = generate_script_tests(name, shard + 1) | 454 scripts = generate_script_tests(waterfall['name'], name, shard + 1) |
409 if scripts: | 455 if scripts: |
410 tests[tester_name] = { | 456 tests[tester_name] = { |
411 'scripts': sorted(scripts, key=lambda x: x['name']) | 457 'scripts': sorted(scripts, key=lambda x: x['name']) |
412 } | 458 } |
413 | 459 |
414 tests['AAAAA1 AUTOGENERATED FILE DO NOT EDIT'] = {} | 460 tests['AAAAA1 AUTOGENERATED FILE DO NOT EDIT'] = {} |
415 tests['AAAAA2 See generate_perf_json.py to make changes'] = {} | 461 tests['AAAAA2 See //tools/perf/generate_perf_json.py to make changes'] = {} |
416 filename = 'chromium.perf.fyi.json' if is_fyi else 'chromium.perf.json' | 462 filename = '%s.json' % waterfall['name'] |
| 463 |
417 current_dir = os.path.dirname(os.path.abspath(__file__)) | 464 current_dir = os.path.dirname(os.path.abspath(__file__)) |
418 src_dir = os.path.dirname(os.path.dirname(current_dir)) | 465 src_dir = os.path.dirname(os.path.dirname(current_dir)) |
419 | 466 |
420 with open(os.path.join(src_dir, 'testing', 'buildbot', filename), 'w') as fp: | 467 with open(os.path.join(src_dir, 'testing', 'buildbot', filename), 'w') as fp: |
421 json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True) | 468 json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True) |
422 fp.write('\n') | 469 fp.write('\n') |
423 | 470 |
| 471 |
424 def main(): | 472 def main(): |
425 waterfall = get_waterfall_config() | 473 waterfall = get_waterfall_config() |
| 474 waterfall['name'] = 'chromium.perf' |
426 fyi_waterfall = get_fyi_waterfall_config() | 475 fyi_waterfall = get_fyi_waterfall_config() |
427 generate_all_tests(fyi_waterfall, True) | 476 fyi_waterfall['name'] = 'chromium.perf.fyi' |
428 generate_all_tests(waterfall, False) | 477 |
| 478 generate_all_tests(fyi_waterfall) |
| 479 generate_all_tests(waterfall) |
429 return 0 | 480 return 0 |
430 | 481 |
431 if __name__ == "__main__": | 482 if __name__ == "__main__": |
432 sys.exit(main()) | 483 sys.exit(main()) |
OLD | NEW |