OLD | NEW |
(Empty) | |
| 1 #!/usr/bin/env python |
| 2 # Copyright 2016 The Chromium Authors. All rights reserved. |
| 3 # Use of this source code is governed by a BSD-style license that can be |
| 4 # found in the LICENSE file. |
| 5 |
| 6 """Script to generate chromium.perf.json and chromium.perf.fyi.json in |
| 7 the src/testing/buildbot directory. Maintaining these files by hand is |
| 8 too unwieldy. |
| 9 """ |
| 10 |
| 11 import json |
| 12 import os |
| 13 import sys |
| 14 |
| 15 from chrome_telemetry_build import chromium_config |
| 16 |
| 17 sys.path.append(chromium_config.GetTelemetryDir()) |
| 18 from telemetry import benchmark as benchmark_module |
| 19 from telemetry.core import discover |
| 20 |
| 21 |
| 22 SCRIPT_TESTS = [ |
| 23 { |
| 24 'args': [ |
| 25 'gpu_perftests' |
| 26 ], |
| 27 'name': 'gpu_perftests', |
| 28 'script': 'gtest_perf_test.py', |
| 29 'testers': [ |
| 30 { |
| 31 'name': 'Android Galaxy S5 Perf', |
| 32 'shards': [3] |
| 33 }, |
| 34 { |
| 35 'name': 'Android Nexus5 Perf', |
| 36 'shards': [2] |
| 37 }, |
| 38 { |
| 39 'name': 'Android Nexus7v2 Perf', |
| 40 'shards': [2] |
| 41 }, |
| 42 { |
| 43 'name': 'Android Nexus9 Perf', |
| 44 'shards': [2] |
| 45 }, |
| 46 ] |
| 47 }, |
| 48 { |
| 49 'args': [ |
| 50 'cc_perftests' |
| 51 ], |
| 52 'name': 'cc_perftests', |
| 53 'script': 'gtest_perf_test.py', |
| 54 'testers': [ |
| 55 { |
| 56 'name': 'Android Galaxy S5 Perf', |
| 57 'shards': [3] |
| 58 }, |
| 59 { |
| 60 'name': 'Android Nexus5 Perf', |
| 61 'shards': [2] |
| 62 }, |
| 63 { |
| 64 'name': 'Android Nexus6 Perf', |
| 65 'shards': [2] |
| 66 }, |
| 67 { |
| 68 'name': 'Android Nexus7v2 Perf', |
| 69 'shards': [2] |
| 70 }, |
| 71 { |
| 72 'name': 'Android Nexus9 Perf', |
| 73 'shards': [2] |
| 74 }, |
| 75 ] |
| 76 }, |
| 77 { |
| 78 'args': [ |
| 79 'cc_perftests', |
| 80 '--test-launcher-print-test-stdio=always' |
| 81 ], |
| 82 'name': 'cc_perftests', |
| 83 'script': 'gtest_perf_test.py', |
| 84 'testers': [ |
| 85 { |
| 86 'name': 'Linux Perf', |
| 87 'shards': [3] |
| 88 }, |
| 89 ] |
| 90 }, |
| 91 { |
| 92 'args': [ |
| 93 'load_library_perf_tests', |
| 94 '--test-launcher-print-test-stdio=always' |
| 95 ], |
| 96 'name': 'load_library_perf_tests', |
| 97 'script': 'gtest_perf_test.py', |
| 98 'testers': [ |
| 99 { |
| 100 'name': 'Linux Perf', |
| 101 'shards': [3] |
| 102 }, |
| 103 { |
| 104 'name': 'Win 7 ATI GPU Perf', |
| 105 'shards': [2] |
| 106 }, |
| 107 { |
| 108 'name': 'Win 7 Nvidia GPU Perf', |
| 109 'shards': [2] |
| 110 }, |
| 111 { |
| 112 'name': 'Win 7 Perf', |
| 113 'shards': [3] |
| 114 }, |
| 115 { |
| 116 'name': 'Win 7 x64 Perf', |
| 117 'shards': [2] |
| 118 }, |
| 119 { |
| 120 'name': 'Win 8 Perf', |
| 121 'shards': [2] |
| 122 }, |
| 123 ] |
| 124 }, |
| 125 { |
| 126 'args': [ |
| 127 'performance_browser_tests', |
| 128 '--test-launcher-print-test-stdio=always', |
| 129 '--gtest_filter=TabCapturePerformanceTest.*:CastV2PerformanceTest.*', |
| 130 '--test-launcher-jobs=1', |
| 131 '--enable-gpu' |
| 132 ], |
| 133 'name': 'performance_browser_tests', |
| 134 'script': 'gtest_perf_test.py', |
| 135 'testers': [ |
| 136 { |
| 137 'name': 'Mac 10.8 Perf', |
| 138 'shards': [3] |
| 139 }, |
| 140 { |
| 141 'name': 'Mac 10.9 Perf', |
| 142 'shards': [3] |
| 143 }, |
| 144 { |
| 145 'name': 'Win 7 ATI GPU Perf', |
| 146 'shards': [2] |
| 147 }, |
| 148 { |
| 149 'name': 'Win 7 Nvidia GPU Perf', |
| 150 'shards': [2] |
| 151 }, |
| 152 { |
| 153 'name': 'Win 7 Perf', |
| 154 'shards': [3] |
| 155 }, |
| 156 { |
| 157 'name': 'Win 7 x64 Perf', |
| 158 'shards': [2] |
| 159 }, |
| 160 { |
| 161 'name': 'Win 8 Perf', |
| 162 'shards': [2] |
| 163 }, |
| 164 ] |
| 165 }, |
| 166 { |
| 167 'args': [ |
| 168 'angle_perftests', |
| 169 '--test-launcher-print-test-stdio=always', |
| 170 '--test-launcher-jobs=1' |
| 171 ], |
| 172 'name': 'angle_perftests', |
| 173 'script': 'gtest_perf_test.py', |
| 174 'testers': [ |
| 175 { |
| 176 'name': 'Win 7 ATI GPU Perf', |
| 177 'shards': [2] |
| 178 }, |
| 179 { |
| 180 'name': 'Win 7 Nvidia GPU Perf', |
| 181 'shards': [2] |
| 182 }, |
| 183 ] |
| 184 }, |
| 185 ] |
| 186 |
| 187 def add_tester(waterfall, name, perf_id, platform, target_bits=64, |
| 188 num_host_shards=1, num_device_shards=1, swarming=None): |
| 189 del perf_id # this will be needed |
| 190 waterfall['testers'][name] = { |
| 191 'platform': platform, |
| 192 'num_device_shards': num_device_shards, |
| 193 'num_host_shards': num_host_shards, |
| 194 'target_bits': target_bits |
| 195 } |
| 196 if swarming: |
| 197 waterfall['testers'][name]['swarming_dimensions'] = { |
| 198 'gpu': swarming['gpu'], |
| 199 'os': swarming['os'] |
| 200 } |
| 201 waterfall['testers'][name]['swarming'] = True |
| 202 |
| 203 return waterfall |
| 204 |
| 205 def get_fyi_waterfall_config(): |
| 206 waterfall = {'builders':[], 'testers': {}} |
| 207 waterfall = add_tester( |
| 208 waterfall, 'Win 10 Low-End 2 Core Perf', |
| 209 'win-low-end-2-core', 'win', |
| 210 swarming={'gpu': '8086:22b1', 'os': 'Windows-10-10240'}) |
| 211 waterfall = add_tester( |
| 212 waterfall, 'Win 10 Low-End 4 Core Perf', |
| 213 'win-low-end-4-core', 'win', |
| 214 swarming={'gpu': '1002:9830', 'os': 'Windows-10-10586'}) |
| 215 return waterfall |
| 216 |
| 217 def get_waterfall_config(): |
| 218 waterfall = {'builders':[], 'testers': {}} |
| 219 # These configurations are taken from chromium_perf.py in |
| 220 # build/scripts/slave/recipe_modules/chromium_tests and must be kept in sync |
| 221 # to generate the correct json for each tester |
| 222 waterfall = add_tester( |
| 223 waterfall, 'Android Galaxy S5 Perf', |
| 224 'android-galaxy-s5', 'android', target_bits=32, |
| 225 num_device_shards=7, num_host_shards=3) |
| 226 waterfall = add_tester( |
| 227 waterfall, 'Android Nexus5 Perf', 'android-nexus5', |
| 228 'android', target_bits=32, num_device_shards=7, num_host_shards=3) |
| 229 waterfall = add_tester( |
| 230 waterfall, 'Android Nexus5X Perf', 'android-nexus5X', |
| 231 'android', target_bits=32, num_device_shards=7, num_host_shards=3) |
| 232 waterfall = add_tester( |
| 233 waterfall, 'Android Nexus6 Perf', 'android-nexus6', |
| 234 'android', target_bits=32, num_device_shards=7, num_host_shards=3) |
| 235 waterfall = add_tester( |
| 236 waterfall, 'Android Nexus7v2 Perf', 'android-nexus7v2', |
| 237 'android', target_bits=32, num_device_shards=7, num_host_shards=3) |
| 238 waterfall = add_tester( |
| 239 waterfall, 'Android Nexus9 Perf', 'android-nexus9', |
| 240 'android', num_device_shards=7, num_host_shards=3) |
| 241 waterfall = add_tester( |
| 242 waterfall, 'Android One Perf', 'android-one', |
| 243 'android', target_bits=32, num_device_shards=7, num_host_shards=3) |
| 244 |
| 245 waterfall = add_tester( |
| 246 waterfall, 'Win Zenbook Perf', 'win-zenbook', 'win', num_host_shards=5) |
| 247 waterfall = add_tester( |
| 248 waterfall, 'Win 10 Perf', 'chromium-rel-win10', 'win', num_host_shards=5) |
| 249 waterfall = add_tester( |
| 250 waterfall, 'Win 8 Perf', 'chromium-rel-win8-dual', 'win', num_host_shards=5) |
| 251 waterfall = add_tester( |
| 252 waterfall, 'Win 7 Perf', 'chromium-rel-win7-dual', |
| 253 'win', target_bits=32, num_host_shards=5) |
| 254 waterfall = add_tester( |
| 255 waterfall, 'Win 7 x64 Perf', |
| 256 'chromium-rel-win7-x64-dual', 'win', num_host_shards=5) |
| 257 waterfall = add_tester( |
| 258 waterfall, 'Win 7 ATI GPU Perf', |
| 259 'chromium-rel-win7-gpu-ati', 'win', num_host_shards=5) |
| 260 waterfall = add_tester( |
| 261 waterfall, 'Win 7 Intel GPU Perf', |
| 262 'chromium-rel-win7-gpu-intel', 'win', num_host_shards=5) |
| 263 waterfall = add_tester( |
| 264 waterfall, 'Win 7 Nvidia GPU Perf', |
| 265 'chromium-rel-win7-gpu-nvidia', 'win', num_host_shards=5) |
| 266 |
| 267 waterfall = add_tester( |
| 268 waterfall, 'Mac 10.11 Perf', 'chromium-rel-mac11', |
| 269 'mac', num_host_shards=5) |
| 270 waterfall = add_tester( |
| 271 waterfall, 'Mac 10.10 Perf', 'chromium-rel-mac10', |
| 272 'mac', num_host_shards=5) |
| 273 waterfall = add_tester( |
| 274 waterfall, 'Mac Retina Perf', |
| 275 'chromium-rel-mac-retina', 'mac', num_host_shards=5) |
| 276 waterfall = add_tester( |
| 277 waterfall, 'Mac HDD Perf', 'chromium-rel-mac-hdd', 'mac', num_host_shards=5) |
| 278 |
| 279 waterfall = add_tester( |
| 280 waterfall, 'Linux Perf', 'linux-release', 'linux', num_host_shards=5) |
| 281 |
| 282 return waterfall |
| 283 |
| 284 def generate_telemetry_test(tester_config, benchmark_name, browser): |
| 285 # The step name must end in 'test' or 'tests' in order for the |
| 286 # results to automatically show up on the flakiness dashboard. |
| 287 # (At least, this was true some time ago.) Continue to use this |
| 288 # naming convention for the time being to minimize changes. |
| 289 |
| 290 test_args = [ |
| 291 benchmark_name, |
| 292 '-v', |
| 293 '--upload_results', |
| 294 '--output-format=chartjson', |
| 295 '--browser=%s' % browser |
| 296 ] |
| 297 # When this is enabled on more than just windows machines we will need |
| 298 # --device=android |
| 299 |
| 300 step_name = benchmark_name |
| 301 if browser == 'reference': |
| 302 test_args.append('--output-trace-tag=_ref') |
| 303 step_name += '.reference' |
| 304 swarming = { |
| 305 # Always say this is true regardless of whether the tester |
| 306 # supports swarming. It doesn't hurt. |
| 307 'can_use_on_swarming_builders': True, |
| 308 'dimension_sets': [ |
| 309 tester_config['swarming_dimensions'] |
| 310 ] |
| 311 } |
| 312 |
| 313 result = { |
| 314 'args': test_args, |
| 315 'isolate_name': 'telemetry_perf_tests', |
| 316 'name': step_name, |
| 317 'override_compile_targets': 'telemetry_perf_tests', |
| 318 'swarming': swarming, |
| 319 } |
| 320 |
| 321 return result |
| 322 |
| 323 def script_test_enabled_on_tester(test, tester_name, shard): |
| 324 for enabled_tester in test['testers']: |
| 325 if enabled_tester['name'] == tester_name: |
| 326 if shard in enabled_tester['shards']: |
| 327 return True |
| 328 return False |
| 329 |
| 330 def generate_script_tests(tester_name, shard): |
| 331 script_tests = [] |
| 332 for test in SCRIPT_TESTS: |
| 333 if script_test_enabled_on_tester(test, tester_name, shard): |
| 334 script = { |
| 335 'args': test['args'], |
| 336 'name': test['name'], |
| 337 'script': test['script'] |
| 338 } |
| 339 script_tests.append(script) |
| 340 return script_tests |
| 341 |
| 342 def generate_telemetry_tests(tester_config, benchmarks): |
| 343 isolated_scripts = [] |
| 344 # First determine the browser that you need based on the tester |
| 345 browser_name = '' |
| 346 if tester_config['platform'] == 'android': |
| 347 browser_name = 'android-chromium' |
| 348 elif (tester_config['platform'] == 'win' |
| 349 and tester_config['target_bits'] == 64): |
| 350 browser_name = 'release_x64' |
| 351 else: |
| 352 browser_name ='release' |
| 353 for benchmark in benchmarks: |
| 354 test = generate_telemetry_test( |
| 355 tester_config, benchmark.Name(), browser_name) |
| 356 isolated_scripts.append(test) |
| 357 # Now create another executable for this benchmark on the reference browser |
| 358 reference_test = generate_telemetry_test( |
| 359 tester_config, benchmark.Name(),'reference') |
| 360 isolated_scripts.append(reference_test) |
| 361 return isolated_scripts |
| 362 |
| 363 def current_benchmarks(): |
| 364 current_dir = os.path.dirname(__file__) |
| 365 benchmarks_dir = os.path.join(current_dir, 'benchmarks') |
| 366 top_level_dir = os.path.dirname(benchmarks_dir) |
| 367 |
| 368 return discover.DiscoverClasses( |
| 369 benchmarks_dir, top_level_dir, benchmark_module.Benchmark, |
| 370 index_by_class_name=True).values() |
| 371 |
| 372 def generate_all_tests(waterfall, is_fyi): |
| 373 tests = {} |
| 374 for builder in waterfall['builders']: |
| 375 tests[builder] = {} |
| 376 for name, config in waterfall['testers'].iteritems(): |
| 377 if is_fyi: |
| 378 # Right now we are only generating benchmarks for the fyi waterfall |
| 379 all_benchmarks = current_benchmarks() |
| 380 isolated_scripts = generate_telemetry_tests(config, all_benchmarks) |
| 381 tests[name] = { |
| 382 'isolated_scripts': sorted(isolated_scripts, key=lambda x: x['name']) |
| 383 } |
| 384 else: |
| 385 # scripts are only currently run in addition to the main waterfall. They |
| 386 # are currently the only thing generated in the perf json file. |
| 387 # TODO eyaich: will need to handle the sharding differently when we have |
| 388 # swarmed bots on the main waterfall. |
| 389 for shard in range(0, config['num_host_shards']): |
| 390 tester_name = '%s (%d)' % (name, shard + 1) |
| 391 scripts = generate_script_tests(name, shard + 1) |
| 392 if scripts: |
| 393 tests[tester_name] = { |
| 394 'scripts': sorted(scripts, key=lambda x: x['name']) |
| 395 } |
| 396 |
| 397 tests['AAAAA1 AUTOGENERATED FILE DO NOT EDIT'] = {} |
| 398 tests['AAAAA2 See generate_perf_json.py to make changes'] = {} |
| 399 filename = 'chromium.perf.fyi.json' if is_fyi else 'chromium.perf.json' |
| 400 |
| 401 current_dir = os.path.dirname(os.path.abspath(__file__)) |
| 402 src_dir = os.path.dirname(os.path.dirname(current_dir)) |
| 403 |
| 404 with open(os.path.join(src_dir, 'testing', 'buildbot', filename), 'w') as fp: |
| 405 json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True) |
| 406 fp.write('\n') |
| 407 |
| 408 def main(): |
| 409 waterfall = get_waterfall_config() |
| 410 fyi_waterfall = get_fyi_waterfall_config() |
| 411 generate_all_tests(fyi_waterfall, True) |
| 412 generate_all_tests(waterfall, False) |
| 413 return 0 |
| 414 |
| 415 if __name__ == "__main__": |
| 416 sys.exit(main()) |
OLD | NEW |