Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 #!/usr/bin/env python | |
| 2 # Copyright 2016 The Chromium Authors. All rights reserved. | |
| 3 # Use of this source code is governed by a BSD-style license that can be | |
| 4 # found in the LICENSE file. | |
| 5 | |
| 6 """Script to generate chromium.perf.json and chromium.perf.fyi.json in | |
| 7 the src/testing/buildbot directory. Maintaining these files by hand is | |
| 8 too unwieldy. | |
| 9 """ | |
| 10 | |
| 11 import json | |
| 12 import os | |
| 13 import sys | |
| 14 | |
| 15 from chrome_telemetry_build import chromium_config | |
| 16 | |
| 17 sys.path.append(chromium_config.GetTelemetryDir()) | |
| 18 from telemetry import benchmark as benchmark_module | |
| 19 from telemetry.core import discover | |
| 20 | |
|
dtu
2016/08/25 17:39:05
style guide: two spaces between all top level bloc
eyaich1
2016/08/29 18:38:09
Done.
| |
| 21 SCRIPT_TESTS = [ | |
| 22 { | |
| 23 'args': [ | |
| 24 'gpu_perftests' | |
| 25 ], | |
| 26 'name': 'gpu_perftests', | |
| 27 'script': 'gtest_perf_test.py', | |
| 28 'testers': [ | |
| 29 { | |
| 30 'name': 'Android Galaxy S5 Perf', | |
| 31 'shards': [3] | |
| 32 }, | |
| 33 { | |
| 34 'name': 'Android Nexus5 Perf', | |
| 35 'shards': [2] | |
| 36 }, | |
| 37 { | |
| 38 'name': 'Android Nexus7v2 Perf', | |
| 39 'shards': [2] | |
| 40 }, | |
| 41 { | |
| 42 'name': 'Android Nexus9 Perf', | |
| 43 'shards': [2] | |
| 44 }, | |
| 45 ] | |
| 46 }, | |
| 47 { | |
| 48 'args': [ | |
| 49 'cc_perftests' | |
| 50 ], | |
| 51 'name': 'cc_perftests', | |
| 52 'script': 'gtest_perf_test.py', | |
| 53 'testers': [ | |
| 54 { | |
| 55 'name': 'Android Galaxy S5 Perf', | |
| 56 'shards': [3] | |
| 57 }, | |
| 58 { | |
| 59 'name': 'Android Nexus5 Perf', | |
| 60 'shards': [2] | |
| 61 }, | |
| 62 { | |
| 63 'name': 'Android Nexus6 Perf', | |
| 64 'shards': [2] | |
| 65 }, | |
| 66 { | |
| 67 'name': 'Android Nexus7v2 Perf', | |
| 68 'shards': [2] | |
| 69 }, | |
| 70 { | |
| 71 'name': 'Android Nexus9 Perf', | |
| 72 'shards': [2] | |
| 73 }, | |
| 74 ] | |
| 75 }, | |
| 76 { | |
| 77 'args': [ | |
| 78 'cc_perftests', | |
| 79 '--test-launcher-print-test-stdio=always' | |
| 80 ], | |
| 81 'name': 'cc_perftests', | |
| 82 'script': 'gtest_perf_test.py', | |
| 83 'testers': [ | |
| 84 { | |
| 85 'name': 'Linux Perf', | |
| 86 'shards': [3] | |
| 87 }, | |
| 88 ] | |
| 89 }, | |
| 90 { | |
| 91 'args': [ | |
| 92 'load_library_perf_tests', | |
| 93 '--test-launcher-print-test-stdio=always' | |
| 94 ], | |
| 95 'name': 'load_library_perf_tests', | |
| 96 'script': 'gtest_perf_test.py', | |
| 97 'testers': [ | |
| 98 { | |
| 99 'name': 'Linux Perf', | |
| 100 'shards': [3] | |
| 101 }, | |
| 102 { | |
| 103 'name': 'Win 7 ATI GPU Perf', | |
| 104 'shards': [2] | |
|
Ken Russell (switch to Gerrit)
2016/08/25 01:26:17
Out of curiosity, why is only shard 2 for this bot
eyaich1
2016/08/25 13:09:46
Like Dave noted I think this is somewhat random an
| |
| 105 }, | |
| 106 { | |
| 107 'name': 'Win 7 Intel GPU Perf', | |
| 108 'shards': [1, 2, 3, 4, 5] | |
| 109 }, | |
| 110 { | |
| 111 'name': 'Win 7 Nvidia GPU Perf', | |
| 112 'shards': [2] | |
| 113 }, | |
| 114 { | |
| 115 'name': 'Win 7 Perf', | |
| 116 'shards': [3] | |
| 117 }, | |
| 118 { | |
| 119 'name': 'Win 7 x64 Perf', | |
| 120 'shards': [2] | |
| 121 }, | |
| 122 { | |
| 123 'name': 'Win 8 Perf', | |
| 124 'shards': [2] | |
| 125 }, | |
| 126 ] | |
| 127 }, | |
| 128 { | |
| 129 'args': [ | |
| 130 'performance_browser_tests', | |
| 131 '--test-launcher-print-test-stdio=always', | |
| 132 '--gtest_filter=TabCapturePerformanceTest.*:CastV2PerformanceTest.*', | |
| 133 '--test-launcher-jobs=1', | |
| 134 '--enable-gpu' | |
| 135 ], | |
| 136 'name': 'performance_browser_tests', | |
| 137 'script': 'gtest_perf_test.py', | |
| 138 'testers': [ | |
| 139 { | |
| 140 'name': 'Mac 10.8 Perf', | |
| 141 'shards': [3] | |
| 142 }, | |
| 143 { | |
| 144 'name': 'Mac 10.9 Perf', | |
| 145 'shards': [3] | |
| 146 }, | |
| 147 { | |
| 148 'name': 'Win 7 ATI GPU Perf', | |
| 149 'shards': [2] | |
| 150 }, | |
| 151 { | |
| 152 'name': 'Win 7 Intel GPU Perf', | |
| 153 'shards': [1, 2, 3, 4, 5] | |
| 154 }, | |
| 155 { | |
| 156 'name': 'Win 7 Nvidia GPU Perf', | |
| 157 'shards': [2] | |
| 158 }, | |
| 159 { | |
| 160 'name': 'Win 7 Perf', | |
| 161 'shards': [3] | |
| 162 }, | |
| 163 { | |
| 164 'name': 'Win 7 x64 Perf', | |
| 165 'shards': [2] | |
| 166 }, | |
| 167 { | |
| 168 'name': 'Win 8 Perf', | |
| 169 'shards': [2] | |
| 170 }, | |
| 171 ] | |
| 172 }, | |
| 173 { | |
| 174 'args': [ | |
| 175 'angle_perftests', | |
| 176 '--test-launcher-print-test-stdio=always', | |
| 177 '--test-launcher-jobs=1' | |
| 178 ], | |
| 179 'name': 'angle_perftests', | |
| 180 'script': 'gtest_perf_test.py', | |
| 181 'testers': [ | |
| 182 { | |
| 183 'name': 'Win 7 ATI GPU Perf', | |
| 184 'shards': [2] | |
| 185 }, | |
| 186 { | |
| 187 'name': 'Win 7 Intel GPU Perf', | |
| 188 'shards': [1, 2, 3, 4, 5] | |
| 189 }, | |
| 190 { | |
| 191 'name': 'Win 7 Nvidia GPU Perf', | |
| 192 'shards': [2] | |
| 193 }, | |
| 194 ] | |
| 195 }, | |
| 196 ] | |
| 197 | |
| 198 def AddTester(waterfall, name, perf_id, platform, target_bits=64, | |
| 199 num_host_shards=1, num_device_shards=1, swarming=None): | |
|
dtu
2016/08/25 17:39:05
style guide: align indent with parameters on the f
eyaich1
2016/08/29 18:38:09
Done.
| |
| 200 del perf_id # this will be needed | |
| 201 waterfall['testers'][name] = { | |
| 202 'platform': platform, | |
|
dtu
2016/08/25 17:39:05
style guide: 4-space indent
eyaich1
2016/08/29 18:38:09
Done.
| |
| 203 'num_device_shards': num_device_shards, | |
| 204 'num_host_shards': num_host_shards, | |
| 205 'target_bits': target_bits | |
| 206 } | |
| 207 if swarming is not None: | |
|
dtu
2016/08/25 17:39:05
style guide: implicit false. if swarming:
eyaich1
2016/08/29 18:38:09
Done.
| |
| 208 waterfall['testers'][name]['swarming_dimensions'] = { | |
| 209 'gpu': swarming['gpu'], | |
| 210 'os': swarming['os'] | |
| 211 } | |
| 212 waterfall['testers'][name]['swarming'] = True | |
| 213 | |
| 214 return waterfall | |
| 215 | |
| 216 def generateFyiWaterfall(): | |
|
dtu
2016/08/25 17:39:05
style guide: lower_with_under() or CapWords()
eyaich1
2016/08/29 18:38:10
Done.
| |
| 217 waterfall = {'builders':[], 'testers': {}} | |
| 218 waterfall = AddTester(waterfall, 'Win 10 Low-End 2 Core Perf', | |
| 219 'win-low-end-2-core', 'win', | |
|
dtu
2016/08/25 17:39:05
style guide: align indent with parameters on first
eyaich1
2016/08/29 18:38:10
Done.
| |
| 220 swarming={'gpu': '8086:22b1', 'os': 'Windows-10-10240'}) | |
| 221 waterfall = AddTester(waterfall, 'Win 10 Low-End 4 Core Perf', | |
| 222 'win-low-end-4-core', 'win', | |
| 223 swarming={'gpu': '1002:9830', 'os': 'Windows-10-10586'}) | |
| 224 return waterfall | |
| 225 | |
| 226 def generateWaterfall(): | |
| 227 waterfall = {'builders':[], 'testers': {}} | |
| 228 # These configurations are taken from chromium_perf.py in | |
| 229 # build/scripts/slave/recipe_modules/chromium_tests and must be kept in sync | |
| 230 # to generate the correct json for each tester | |
| 231 waterfall = AddTester(waterfall, 'Android Galaxy S5 Perf', | |
| 232 'android-galaxy-s5', 'android', target_bits=32, | |
| 233 num_device_shards=7, num_host_shards=3) | |
| 234 waterfall = AddTester(waterfall, 'Android Nexus5 Perf', 'android-nexus5', | |
| 235 'android', target_bits=32, num_device_shards=7, num_host_shards=3) | |
| 236 waterfall = AddTester(waterfall, 'Android Nexus5X Perf', 'android-nexus5X', | |
| 237 'android', target_bits=32, num_device_shards=7, num_host_shards=3) | |
| 238 waterfall = AddTester(waterfall, 'Android Nexus6 Perf', 'android-nexus6', | |
| 239 'android', target_bits=32, num_device_shards=7, num_host_shards=3) | |
| 240 waterfall = AddTester(waterfall, 'Android Nexus7v2 Perf', 'android-nexus7v2', | |
| 241 'android', target_bits=32, num_device_shards=7, num_host_shards=3) | |
| 242 waterfall = AddTester(waterfall, 'Android Nexus9 Perf', 'android-nexus9', | |
| 243 'android', num_device_shards=7, num_host_shards=3) | |
| 244 waterfall = AddTester(waterfall, 'Android One Perf', 'android-one', 'android', | |
| 245 target_bits=32, num_device_shards=7, num_host_shards=3) | |
| 246 | |
| 247 waterfall = AddTester(waterfall, 'Win Zenbook Perf', 'win-zenbook', 'win', | |
| 248 num_host_shards=5) | |
| 249 waterfall = AddTester(waterfall, 'Win 10 Perf', 'chromium-rel-win10', 'win', | |
| 250 num_host_shards=5) | |
| 251 waterfall = AddTester(waterfall, 'Win 8 Perf', 'chromium-rel-win8-dual', | |
| 252 'win', num_host_shards=5) | |
| 253 waterfall = AddTester(waterfall, 'Win 7 Perf', 'chromium-rel-win7-dual', | |
| 254 'win', target_bits=32, num_host_shards=5) | |
| 255 waterfall = AddTester(waterfall, 'Win 7 x64 Perf', | |
| 256 'chromium-rel-win7-x64-dual', 'win', num_host_shards=5) | |
| 257 waterfall = AddTester(waterfall, 'Win 7 ATI GPU Perf', | |
| 258 'chromium-rel-win7-gpu-ati', 'win', num_host_shards=5) | |
| 259 waterfall = AddTester(waterfall, 'Win 7 Intel GPU Perf', | |
| 260 'chromium-rel-win7-gpu-intel', 'win', num_host_shards=5) | |
| 261 waterfall = AddTester(waterfall, 'Win 7 Nvidia GPU Perf', | |
| 262 'chromium-rel-win7-gpu-nvidia', 'win', num_host_shards=5) | |
| 263 | |
| 264 waterfall = AddTester(waterfall, 'Mac 10.11 Perf', 'chromium-rel-mac11', | |
| 265 'mac', num_host_shards=5) | |
| 266 waterfall = AddTester(waterfall, 'Mac 10.10 Perf', 'chromium-rel-mac10', | |
| 267 'mac', num_host_shards=5) | |
| 268 waterfall = AddTester(waterfall, 'Mac Retina Perf', 'chromium-rel-mac-retina', | |
| 269 'mac', num_host_shards=5) | |
| 270 waterfall = AddTester(waterfall, 'Mac HDD Perf', 'chromium-rel-mac-hdd', | |
| 271 'mac', num_host_shards=5) | |
| 272 | |
| 273 waterfall = AddTester(waterfall, 'Linux Perf', 'linux-release', 'linux', | |
| 274 num_host_shards=5) | |
| 275 | |
| 276 return waterfall | |
| 277 | |
| 278 def generate_telemetry_test(tester_config, benchmark_name, browser): | |
| 279 # The step name must end in 'test' or 'tests' in order for the | |
| 280 # results to automatically show up on the flakiness dashboard. | |
| 281 # (At least, this was true some time ago.) Continue to use this | |
| 282 # naming convention for the time being to minimize changes. | |
| 283 | |
| 284 test_args = [ | |
| 285 benchmark_name, | |
| 286 '-v', | |
| 287 '--upload_results', | |
| 288 '--output-format=chartjson', | |
| 289 '--browser=%s' % browser | |
| 290 ] | |
|
dtu
2016/08/25 17:39:05
We need --device=android on android.
eyaich1
2016/08/29 18:38:09
We don't need it yet, I added a comment.
| |
| 291 | |
| 292 step_name = benchmark_name | |
| 293 if browser == 'reference': | |
| 294 test_args.append('--output-trace-tag=_ref') | |
| 295 step_name += '.reference' | |
| 296 swarming = { | |
| 297 # Always say this is true regardless of whether the tester | |
| 298 # supports swarming. It doesn't hurt. | |
| 299 'can_use_on_swarming_builders': True, | |
| 300 'dimension_sets': [ | |
| 301 tester_config['swarming_dimensions'] | |
| 302 ] | |
| 303 } | |
| 304 | |
| 305 result = { | |
| 306 'args': test_args, | |
| 307 'isolate_name': 'telemetry_perf_tests', | |
| 308 'name': step_name, | |
| 309 'override_compile_targets': 'telemetry_perf_tests', | |
| 310 'swarming': swarming, | |
| 311 } | |
| 312 | |
| 313 return result | |
| 314 | |
| 315 def script_test_enabled_on_tester(test, tester_name, shard): | |
| 316 for enabled_tester in test['testers']: | |
| 317 if enabled_tester['name'] == tester_name: | |
| 318 for enabled_shard in enabled_tester['shards']: | |
|
dtu
2016/08/25 17:39:05
if shard in enabled_tester['shards']:
return Tru
eyaich1
2016/08/29 18:38:09
Done.
| |
| 319 if enabled_shard == shard: | |
| 320 return True | |
| 321 return False | |
| 322 | |
| 323 def generate_script_tests(tester_name, shard): | |
| 324 script_tests = [] | |
| 325 for test in SCRIPT_TESTS: | |
| 326 if script_test_enabled_on_tester(test, tester_name, shard): | |
| 327 script = { | |
| 328 'args': test['args'], | |
| 329 'name': test['name'], | |
| 330 'script': test['script'] | |
| 331 } | |
| 332 script_tests.append(script) | |
| 333 return script_tests | |
| 334 | |
| 335 def generate_telemetry_tests(tester_config, benchmarks): | |
| 336 isolated_scripts = [] | |
| 337 # First determine the browser that you need based on the tester | |
| 338 browser_name = '' | |
| 339 if tester_config['platform'] == 'android': | |
| 340 browser_name = 'android-chromium' | |
| 341 elif tester_config['platform'] == 'win' \ | |
|
dtu
2016/08/25 17:39:05
style guide: don't use \. wrap in parentheses.
eyaich1
2016/08/29 18:38:09
Done.
| |
| 342 and tester_config['target_bits'] == 64: | |
| 343 browser_name = 'release_x64' | |
| 344 else: | |
| 345 browser_name ='release' | |
| 346 for benchmark in benchmarks: | |
| 347 test = generate_telemetry_test( | |
| 348 tester_config, benchmark.Name(), browser_name) | |
| 349 isolated_scripts.append(test) | |
| 350 # Now create another executable for this benchmark on the reference browser | |
| 351 reference_test = generate_telemetry_test( | |
| 352 tester_config, benchmark.Name(),'reference') | |
| 353 isolated_scripts.append(reference_test) | |
| 354 return isolated_scripts | |
| 355 | |
| 356 def current_benchmarks(): | |
| 357 current_dir = os.path.dirname(__file__) | |
| 358 benchmarks_dir = os.path.join(current_dir, 'benchmarks') | |
| 359 top_level_dir = os.path.dirname(benchmarks_dir) | |
| 360 | |
| 361 return discover.DiscoverClasses( | |
| 362 benchmarks_dir, top_level_dir, benchmark_module.Benchmark, | |
| 363 index_by_class_name=True).values() | |
| 364 | |
| 365 def generate_all_tests(waterfall, is_fyi): | |
| 366 tests = {} | |
| 367 for builder in waterfall['builders']: | |
| 368 tests[builder] = {} | |
| 369 for name, config in waterfall['testers'].iteritems(): | |
| 370 if is_fyi: | |
| 371 # Right now we are only generating benchmarks for the fyi waterfall | |
| 372 all_benchmarks = current_benchmarks() | |
| 373 isolated_scripts = generate_telemetry_tests(config, all_benchmarks) | |
| 374 tests[name] = { | |
| 375 'isolated_scripts': sorted(isolated_scripts, key=lambda x: x['name']) | |
| 376 } | |
| 377 else: | |
| 378 # scripts are only currently run in addition to the main waterfall. They | |
| 379 # are currently the only thing generated in the perf json file. | |
| 380 # TODO eyaich: will need to handle the sharding differently when we have | |
| 381 # swarmed bots on the main waterfall. | |
| 382 for shard in range(0, config['num_host_shards']): | |
| 383 tester_name = '%s (%d)' % (name, shard + 1) | |
| 384 scripts = generate_script_tests(name, shard + 1) | |
| 385 if len(scripts) > 0: | |
|
dtu
2016/08/25 17:39:05
style guide: implicit false. if scripts:
eyaich1
2016/08/29 18:38:09
Done.
| |
| 386 tests[tester_name] = { | |
| 387 'scripts': sorted(scripts, key=lambda x: x['name']) | |
| 388 } | |
| 389 | |
| 390 tests['AAAAA1 AUTOGENERATED FILE DO NOT EDIT'] = {} | |
| 391 tests['AAAAA2 See generate_perf_json.py to make changes'] = {} | |
| 392 filename = 'chromium.perf.fyi.json' if is_fyi else 'chromium.perf.json' | |
| 393 | |
| 394 current_dir = os.path.dirname(os.path.abspath(__file__)) | |
| 395 src_dir = os.path.dirname(os.path.dirname(current_dir)) | |
| 396 | |
| 397 with open(os.path.join(src_dir, 'testing', 'buildbot', filename), 'w') as fp: | |
| 398 json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True) | |
| 399 fp.write('\n') | |
| 400 | |
| 401 def main(): | |
| 402 waterfall = generateWaterfall() | |
|
dtu
2016/08/25 17:39:05
It's inconsistent that generateWaterfall and gener
eyaich1
2016/08/29 18:38:10
Done.
| |
| 403 fyi_waterfall = generateFyiWaterfall() | |
| 404 generate_all_tests(fyi_waterfall, True) | |
| 405 generate_all_tests(waterfall, False) | |
| 406 return 0 | |
| 407 | |
| 408 if __name__ == "__main__": | |
| 409 sys.exit(main()) | |
| OLD | NEW |