Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
| 2 # Copyright 2016 The Chromium Authors. All rights reserved. | 2 # Copyright 2016 The Chromium Authors. All rights reserved. |
| 3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
| 4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
| 5 | 5 |
| 6 """Script to generate chromium.perf.json and chromium.perf.fyi.json in | 6 """Script to generate chromium.perf.json and chromium.perf.fyi.json in |
| 7 the src/testing/buildbot directory. Maintaining these files by hand is | 7 the src/testing/buildbot directory. Maintaining these files by hand is |
| 8 too unwieldy. | 8 too unwieldy. |
| 9 """ | 9 """ |
| 10 | 10 |
| (...skipping 212 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 223 { | 223 { |
| 224 'name': 'Win 7 Nvidia GPU Perf', | 224 'name': 'Win 7 Nvidia GPU Perf', |
| 225 'shards': [2] | 225 'shards': [2] |
| 226 }, | 226 }, |
| 227 ] | 227 ] |
| 228 } | 228 } |
| 229 }, | 229 }, |
| 230 ] | 230 ] |
| 231 | 231 |
| 232 def add_tester(waterfall, name, perf_id, platform, target_bits=64, | 232 def add_tester(waterfall, name, perf_id, platform, target_bits=64, |
| 233 num_host_shards=1, num_device_shards=1, swarming=None): | 233 num_host_shards=1, num_device_shards=1, swarming=None, |
| 234 use_whitelist=False): | |
| 234 del perf_id # this will be needed | 235 del perf_id # this will be needed |
| 235 waterfall['testers'][name] = { | 236 waterfall['testers'][name] = { |
| 236 'platform': platform, | 237 'platform': platform, |
| 237 'num_device_shards': num_device_shards, | 238 'num_device_shards': num_device_shards, |
| 238 'num_host_shards': num_host_shards, | 239 'num_host_shards': num_host_shards, |
| 239 'target_bits': target_bits, | 240 'target_bits': target_bits, |
| 241 'use_whitelist': use_whitelist | |
| 240 } | 242 } |
| 241 | 243 |
| 242 if swarming: | 244 if swarming: |
| 243 waterfall['testers'][name]['swarming_dimensions'] = swarming | 245 waterfall['testers'][name]['swarming_dimensions'] = swarming |
| 244 waterfall['testers'][name]['swarming'] = True | 246 waterfall['testers'][name]['swarming'] = True |
| 245 | 247 |
| 246 return waterfall | 248 return waterfall |
| 247 | 249 |
| 248 def get_fyi_waterfall_config(): | 250 def get_fyi_waterfall_config(): |
| 249 waterfall = {'builders':[], 'testers': {}} | 251 waterfall = {'builders':[], 'testers': {}} |
| 250 waterfall = add_tester( | 252 waterfall = add_tester( |
| 251 waterfall, 'Android Galaxy S5 Perf', | 253 waterfall, 'Android Galaxy S5 Perf', |
| 252 'android-galaxy-s5-perf', 'android') | 254 'android-galaxy-s5-perf', 'android') |
| 253 waterfall = add_tester( | 255 waterfall = add_tester( |
| 254 waterfall, 'Win 10 Low-End Perf Tests', | 256 waterfall, 'Win 10 Low-End Perf Tests', |
| 255 'win-low-end-2-core', 'win', | 257 'win-10-low-end', 'win', |
| 256 swarming=[ | 258 swarming=[ |
| 257 { | 259 { |
| 258 'gpu': '1002:9874', | 260 'gpu': '1002:9874', |
| 259 'os': 'Windows-10-10586', | 261 'os': 'Windows-10-10586', |
| 260 'device_ids': ['build171-b4', 'build186-b4'] | 262 'device_ids': [ |
| 263 'build171-b4', 'build186-b4', 'build202-b4', 'build203-b4', | |
| 264 'build204-b4', 'build205-b4', 'build206-b4', 'build207-b4', | |
| 265 'build208-b4', 'build209-b4', 'build210-b4', 'build211-b4', | |
| 266 'build212-b4', 'build213-b4', 'build214-b4', 'build215-b4', | |
| 267 'build216-b4', 'build217-b4', 'build218-b4', 'build219-b4', | |
| 268 'build220-b4', 'build221-b4'] | |
| 261 } | 269 } |
| 262 ]) | 270 ]) |
| 271 waterfall = add_tester( | |
| 272 waterfall, 'Win 10 4 Core Low-End Perf Tests', | |
| 273 'win-10-4-core-low-end', 'win', | |
| 274 swarming=[ | |
| 275 { | |
| 276 'gpu': '8086:22b1', | |
| 277 'os': 'Windows-10-10586', | |
| 278 'device_ids': ['build47-b4', 'build48-b4'] | |
| 279 } | |
| 280 ], | |
| 281 use_whitelist=True) | |
| 263 return waterfall | 282 return waterfall |
| 264 | 283 |
| 265 def get_waterfall_config(): | 284 def get_waterfall_config(): |
| 266 waterfall = {'builders':[], 'testers': {}} | 285 waterfall = {'builders':[], 'testers': {}} |
| 267 | 286 |
| 268 # These configurations are taken from chromium_perf.py in | 287 # These configurations are taken from chromium_perf.py in |
| 269 # build/scripts/slave/recipe_modules/chromium_tests and must be kept in sync | 288 # build/scripts/slave/recipe_modules/chromium_tests and must be kept in sync |
| 270 # to generate the correct json for each tester | 289 # to generate the correct json for each tester |
| 271 waterfall = add_tester( | 290 waterfall = add_tester( |
| 272 waterfall, 'Android Galaxy S5 Perf', | 291 waterfall, 'Android Galaxy S5 Perf', |
| (...skipping 138 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 411 for test in SCRIPT_TESTS: | 430 for test in SCRIPT_TESTS: |
| 412 if script_test_enabled_on_tester(master, test, tester_name, shard): | 431 if script_test_enabled_on_tester(master, test, tester_name, shard): |
| 413 script = { | 432 script = { |
| 414 'args': test['args'], | 433 'args': test['args'], |
| 415 'name': test['name'], | 434 'name': test['name'], |
| 416 'script': test['script'] | 435 'script': test['script'] |
| 417 } | 436 } |
| 418 script_tests.append(script) | 437 script_tests.append(script) |
| 419 return script_tests | 438 return script_tests |
| 420 | 439 |
| 421 def generate_telemetry_tests(tester_config, benchmarks, benchmark_sharding_map): | 440 def generate_telemetry_tests( |
| 441 tester_config, benchmarks, benchmark_sharding_map, use_whitelist): | |
| 422 isolated_scripts = [] | 442 isolated_scripts = [] |
| 423 # First determine the browser that you need based on the tester | 443 # First determine the browser that you need based on the tester |
| 424 browser_name = '' | 444 browser_name = '' |
| 425 if tester_config['platform'] == 'android': | 445 if tester_config['platform'] == 'android': |
| 426 browser_name = 'android-chromium' | 446 browser_name = 'android-chromium' |
| 427 elif (tester_config['platform'] == 'win' | 447 elif (tester_config['platform'] == 'win' |
| 428 and tester_config['target_bits'] == 64): | 448 and tester_config['target_bits'] == 64): |
| 429 browser_name = 'release_x64' | 449 browser_name = 'release_x64' |
| 430 else: | 450 else: |
| 431 browser_name ='release' | 451 browser_name ='release' |
| 432 | 452 |
| 453 # Our current configuration only ever has one set of swarming dimensions | |
| 454 # Make sure this still holds true | |
| 455 if len(tester_config['swarming_dimensions']) > 1: | |
| 456 raise Exception('Invalid assumption on number of swarming dimensions') | |
| 457 num_shards = len(tester_config['swarming_dimensions'][0]['device_ids']) | |
| 458 current_shard = 0 | |
| 433 for benchmark in benchmarks: | 459 for benchmark in benchmarks: |
| 434 # First figure out swarming dimensions this test needs to be triggered on. | 460 # First figure out swarming dimensions this test needs to be triggered on. |
| 435 # For each set of dimensions it is only triggered on one of the devices | 461 # For each set of dimensions it is only triggered on one of the devices |
| 436 swarming_dimensions = [] | 462 swarming_dimensions = [] |
| 437 for dimension in tester_config['swarming_dimensions']: | 463 for dimension in tester_config['swarming_dimensions']: |
| 438 num_shards = len(dimension['device_ids']) | 464 #num_shards = len(dimension['device_ids']) |
|
dtu
2016/11/09 02:35:18
leftover code?
| |
| 439 sharding_map = benchmark_sharding_map.get(str(num_shards), None) | 465 sharding_map = benchmark_sharding_map.get(str(num_shards), None) |
| 440 if not sharding_map: | 466 if not sharding_map and not use_whitelist: |
| 441 raise Exception('Invalid number of shards, generate new sharding map') | 467 raise Exception('Invalid number of shards, generate new sharding map') |
| 442 device_affinity = sharding_map.get(benchmark.Name(), None) | 468 device_affinity = None |
| 469 if use_whitelist: | |
| 470 device_affinity = current_shard | |
| 471 else: | |
| 472 device_affinity = sharding_map.get(benchmark.Name(), None) | |
| 443 if device_affinity is None: | 473 if device_affinity is None: |
| 444 raise Exception('Device affinity for benchmark %s not found' | 474 raise Exception('Device affinity for benchmark %s not found' |
| 445 % benchmark.Name()) | 475 % benchmark.Name()) |
| 446 | |
| 447 device_id = dimension['device_ids'][device_affinity] | 476 device_id = dimension['device_ids'][device_affinity] |
| 448 # Id is unique within the swarming pool so it is the only needed | 477 # Id is unique within the swarming pool so it is the only needed |
| 449 # identifier for the bot to run the test on | 478 # identifier for the bot to run the test on |
| 450 swarming_dimensions.append({ | 479 swarming_dimensions.append({ |
| 451 'id': device_id, | 480 'id': device_id, |
| 452 'gpu': dimension['gpu'], | 481 'gpu': dimension['gpu'], |
| 453 'os': dimension['os'], | 482 'os': dimension['os'], |
| 454 'pool': 'Chrome-perf', | 483 'pool': 'Chrome-perf', |
| 455 }) | 484 }) |
| 456 | 485 |
| 457 test = generate_telemetry_test( | 486 test = generate_telemetry_test( |
| 458 swarming_dimensions, benchmark.Name(), browser_name) | 487 swarming_dimensions, benchmark.Name(), browser_name) |
| 459 isolated_scripts.append(test) | 488 isolated_scripts.append(test) |
| 460 # Now create another executable for this benchmark on the reference browser | 489 # Now create another executable for this benchmark on the reference browser |
| 461 reference_test = generate_telemetry_test( | 490 reference_test = generate_telemetry_test( |
| 462 swarming_dimensions, benchmark.Name(),'reference') | 491 swarming_dimensions, benchmark.Name(),'reference') |
| 463 isolated_scripts.append(reference_test) | 492 isolated_scripts.append(reference_test) |
| 493 if current_shard == (num_shards - 1): | |
| 494 current_shard = 0 | |
| 495 else: | |
| 496 current_shard += 1 | |
| 464 | 497 |
| 465 return isolated_scripts | 498 return isolated_scripts |
| 466 | 499 |
| 467 | 500 |
| 468 BENCHMARK_NAME_WHITELIST = set([ | 501 BENCHMARK_NAME_WHITELIST = set([ |
| 469 u'smoothness.top_25_smooth', | 502 u'smoothness.top_25_smooth', |
| 470 u'sunspider', | 503 u'sunspider', |
| 471 u'system_health.webview_startup', | 504 u'system_health.webview_startup', |
| 472 u'page_cycler_v2.intl_hi_ru', | 505 u'page_cycler_v2.intl_hi_ru', |
| 473 u'dromaeo.cssqueryjquery', | 506 u'dromaeo.cssqueryjquery', |
| (...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 551 benchmark_to_shard_dict[benchmark[0].Name()] = min_index | 584 benchmark_to_shard_dict[benchmark[0].Name()] = min_index |
| 552 shard_execution_times[min_index] += benchmark[1] | 585 shard_execution_times[min_index] += benchmark[1] |
| 553 # For all the benchmarks that didn't have avg run times, use the default | 586 # For all the benchmarks that didn't have avg run times, use the default |
| 554 # device affinity algorithm | 587 # device affinity algorithm |
| 555 for benchmark in new_benchmarks: | 588 for benchmark in new_benchmarks: |
| 556 device_affinity = bot_utils.GetDeviceAffinity(num_shards, benchmark.Name()) | 589 device_affinity = bot_utils.GetDeviceAffinity(num_shards, benchmark.Name()) |
| 557 benchmark_to_shard_dict[benchmark.Name()] = device_affinity | 590 benchmark_to_shard_dict[benchmark.Name()] = device_affinity |
| 558 return benchmark_to_shard_dict | 591 return benchmark_to_shard_dict |
| 559 | 592 |
| 560 | 593 |
| 561 def generate_all_tests(waterfall, use_whitelist): | 594 def generate_all_tests(waterfall): |
| 562 tests = {} | 595 tests = {} |
| 563 for builder in waterfall['builders']: | 596 for builder in waterfall['builders']: |
| 564 tests[builder] = {} | 597 tests[builder] = {} |
| 565 all_benchmarks = current_benchmarks(use_whitelist) | 598 all_benchmarks = current_benchmarks(False) |
| 599 whitelist_benchmarks = current_benchmarks(True) | |
| 566 # Get benchmark sharding according to common sharding configurations | 600 # Get benchmark sharding according to common sharding configurations |
| 567 # Currently we only have bots sharded 5 directions and 1 direction | 601 # Currently we only have bots sharded 5 directions and 1 direction |
| 568 benchmark_sharding_map = {} | 602 benchmark_sharding_map = {} |
| 603 benchmark_sharding_map['22'] = shard_benchmarks(22, all_benchmarks) | |
| 569 benchmark_sharding_map['5'] = shard_benchmarks(5, all_benchmarks) | 604 benchmark_sharding_map['5'] = shard_benchmarks(5, all_benchmarks) |
| 570 benchmark_sharding_map['1'] = shard_benchmarks(1, all_benchmarks) | 605 benchmark_sharding_map['1'] = shard_benchmarks(1, all_benchmarks) |
| 571 | 606 |
| 572 for name, config in waterfall['testers'].iteritems(): | 607 for name, config in waterfall['testers'].iteritems(): |
| 608 use_whitelist = config['use_whitelist'] | |
| 609 benchmark_list = all_benchmarks | |
| 610 if use_whitelist: | |
| 611 benchmark_list = whitelist_benchmarks | |
| 573 if config.get('swarming', False): | 612 if config.get('swarming', False): |
| 574 # Right now we are only generating benchmarks for the fyi waterfall | 613 # Right now we are only generating benchmarks for the fyi waterfall |
| 575 isolated_scripts = generate_telemetry_tests( | 614 isolated_scripts = generate_telemetry_tests( |
| 576 config, all_benchmarks, benchmark_sharding_map) | 615 config, benchmark_list, benchmark_sharding_map, use_whitelist) |
| 577 tests[name] = { | 616 tests[name] = { |
| 578 'isolated_scripts': sorted(isolated_scripts, key=lambda x: x['name']) | 617 'isolated_scripts': sorted(isolated_scripts, key=lambda x: x['name']) |
| 579 } | 618 } |
| 580 else: | 619 else: |
| 581 # scripts are only currently run in addition to the main waterfall. They | 620 # scripts are only currently run in addition to the main waterfall. They |
| 582 # are currently the only thing generated in the perf json file. | 621 # are currently the only thing generated in the perf json file. |
| 583 # TODO eyaich: will need to handle the sharding differently when we have | 622 # TODO eyaich: will need to handle the sharding differently when we have |
| 584 # swarmed bots on the main waterfall. | 623 # swarmed bots on the main waterfall. |
| 585 for shard in range(0, config['num_host_shards']): | 624 for shard in range(0, config['num_host_shards']): |
| 586 tester_name = '%s (%d)' % (name, shard + 1) | 625 tester_name = '%s (%d)' % (name, shard + 1) |
| (...skipping 14 matching lines...) Expand all Loading... | |
| 601 json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True) | 640 json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True) |
| 602 fp.write('\n') | 641 fp.write('\n') |
| 603 | 642 |
| 604 | 643 |
| 605 def main(): | 644 def main(): |
| 606 waterfall = get_waterfall_config() | 645 waterfall = get_waterfall_config() |
| 607 waterfall['name'] = 'chromium.perf' | 646 waterfall['name'] = 'chromium.perf' |
| 608 fyi_waterfall = get_fyi_waterfall_config() | 647 fyi_waterfall = get_fyi_waterfall_config() |
| 609 fyi_waterfall['name'] = 'chromium.perf.fyi' | 648 fyi_waterfall['name'] = 'chromium.perf.fyi' |
| 610 | 649 |
| 611 generate_all_tests(fyi_waterfall, True) | 650 generate_all_tests(fyi_waterfall) |
| 612 generate_all_tests(waterfall, False) | 651 generate_all_tests(waterfall) |
| 613 return 0 | 652 return 0 |
| 614 | 653 |
| 615 if __name__ == '__main__': | 654 if __name__ == '__main__': |
| 616 sys.exit(main()) | 655 sys.exit(main()) |
| OLD | NEW |