| OLD | NEW |
| 1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
| 2 # Copyright 2016 The Chromium Authors. All rights reserved. | 2 # Copyright 2016 The Chromium Authors. All rights reserved. |
| 3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
| 4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
| 5 | 5 |
| 6 """Script to generate chromium.perf.json and chromium.perf.fyi.json in | 6 """Script to generate chromium.perf.json and chromium.perf.fyi.json in |
| 7 the src/testing/buildbot directory. Maintaining these files by hand is | 7 the src/testing/buildbot directory. Maintaining these files by hand is |
| 8 too unwieldy. | 8 too unwieldy. |
| 9 """ | 9 """ |
| 10 | 10 |
| (...skipping 543 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 554 # Certain swarming bots are not sharding correctly with the new device affinity | 554 # Certain swarming bots are not sharding correctly with the new device affinity |
| 555 # algorithm. Reverting to legacy algorithm to try and get them to complete. | 555 # algorithm. Reverting to legacy algorithm to try and get them to complete. |
| 556 # See crbug.com/670284 | 556 # See crbug.com/670284 |
| 557 LEGACY_DEVICE_AFFIINITY_ALGORITHM = [ | 557 LEGACY_DEVICE_AFFIINITY_ALGORITHM = [ |
| 558 'Win Zenbook Perf', | 558 'Win Zenbook Perf', |
| 559 'Win 10 High-DPI Perf', | 559 'Win 10 High-DPI Perf', |
| 560 'Mac HDD Perf', | 560 'Mac HDD Perf', |
| 561 ] | 561 ] |
| 562 | 562 |
| 563 def current_benchmarks(use_whitelist): | 563 def current_benchmarks(use_whitelist): |
| 564 benchmarks_dir = os.path.join(os.getcwd(), 'benchmarks') | 564 benchmarks_dir = os.path.join(src_dir(), 'tools', 'perf', 'benchmarks') |
| 565 top_level_dir = os.path.dirname(benchmarks_dir) | 565 top_level_dir = os.path.dirname(benchmarks_dir) |
| 566 | 566 |
| 567 all_benchmarks = discover.DiscoverClasses( | 567 all_benchmarks = discover.DiscoverClasses( |
| 568 benchmarks_dir, top_level_dir, benchmark_module.Benchmark, | 568 benchmarks_dir, top_level_dir, benchmark_module.Benchmark, |
| 569 index_by_class_name=True).values() | 569 index_by_class_name=True).values() |
| 570 # Remove all blacklisted benchmarks | 570 # Remove all blacklisted benchmarks |
| 571 for blacklisted in BENCHMARK_NAME_BLACKLIST: | 571 for blacklisted in BENCHMARK_NAME_BLACKLIST: |
| 572 for benchmark in all_benchmarks: | 572 for benchmark in all_benchmarks: |
| 573 if benchmark.Name() == blacklisted: | 573 if benchmark.Name() == blacklisted: |
| 574 all_benchmarks.remove(benchmark) | 574 all_benchmarks.remove(benchmark) |
| 575 break | 575 break |
| 576 | 576 |
| 577 if use_whitelist: | 577 if use_whitelist: |
| 578 all_benchmarks = ( | 578 all_benchmarks = ( |
| 579 bench for bench in all_benchmarks | 579 bench for bench in all_benchmarks |
| 580 if bench.Name() in BENCHMARK_NAME_WHITELIST) | 580 if bench.Name() in BENCHMARK_NAME_WHITELIST) |
| 581 return sorted(all_benchmarks, key=lambda b: b.Name()) | 581 return sorted(all_benchmarks, key=lambda b: b.Name()) |
| 582 | 582 |
| 583 | 583 |
| 584 # Returns a sorted list of (benchmark, avg_runtime) pairs for every | 584 # Returns a sorted list of (benchmark, avg_runtime) pairs for every |
| 585 # benchmark in the all_benchmarks list where avg_runtime is in seconds. Also | 585 # benchmark in the all_benchmarks list where avg_runtime is in seconds. Also |
| 586 # returns a list of benchmarks whose run time have not been seen before | 586 # returns a list of benchmarks whose run time have not been seen before |
| 587 def get_sorted_benchmark_list_by_time(all_benchmarks): | 587 def get_sorted_benchmark_list_by_time(all_benchmarks): |
| 588 runtime_list = [] | 588 runtime_list = [] |
| 589 benchmark_avgs = {} | 589 benchmark_avgs = {} |
| 590 new_benchmarks = [] | 590 new_benchmarks = [] |
| 591 timing_file_path = os.path.join(src_dir(), 'tools', 'perf', |
| 592 'desktop_benchmark_avg_times.json') |
| 591 # Load in the avg times as calculated on Nov 1st, 2016 | 593 # Load in the avg times as calculated on Nov 1st, 2016 |
| 592 with open('desktop_benchmark_avg_times.json') as f: | 594 with open(timing_file_path) as f: |
| 593 benchmark_avgs = json.load(f) | 595 benchmark_avgs = json.load(f) |
| 594 | 596 |
| 595 for benchmark in all_benchmarks: | 597 for benchmark in all_benchmarks: |
| 596 benchmark_avg_time = benchmark_avgs.get(benchmark.Name(), None) | 598 benchmark_avg_time = benchmark_avgs.get(benchmark.Name(), None) |
| 597 if benchmark_avg_time is None: | 599 if benchmark_avg_time is None: |
| 598 # Assume that this is a new benchmark that was added after 11/1/16 when | 600 # Assume that this is a new benchmark that was added after 11/1/16 when |
| 599 # we generated the benchmarks. Use the old affinity algorithm after | 601 # we generated the benchmarks. Use the old affinity algorithm after |
| 600 # we have given the rest the same distribution, add it to the | 602 # we have given the rest the same distribution, add it to the |
| 601 # new benchmarks list. | 603 # new benchmarks list. |
| 602 new_benchmarks.append(benchmark) | 604 new_benchmarks.append(benchmark) |
| (...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 676 scripts = generate_script_tests(waterfall['name'], name, shard + 1) | 678 scripts = generate_script_tests(waterfall['name'], name, shard + 1) |
| 677 if scripts: | 679 if scripts: |
| 678 tests[tester_name] = { | 680 tests[tester_name] = { |
| 679 'scripts': sorted(scripts, key=lambda x: x['name']) | 681 'scripts': sorted(scripts, key=lambda x: x['name']) |
| 680 } | 682 } |
| 681 | 683 |
| 682 tests['AAAAA1 AUTOGENERATED FILE DO NOT EDIT'] = {} | 684 tests['AAAAA1 AUTOGENERATED FILE DO NOT EDIT'] = {} |
| 683 tests['AAAAA2 See //tools/perf/generate_perf_json.py to make changes'] = {} | 685 tests['AAAAA2 See //tools/perf/generate_perf_json.py to make changes'] = {} |
| 684 filename = '%s.json' % waterfall['name'] | 686 filename = '%s.json' % waterfall['name'] |
| 685 | 687 |
| 686 src_dir = os.path.dirname(os.path.dirname(os.getcwd())) | 688 buildbot_dir = os.path.join(src_dir(), 'testing', 'buildbot') |
| 687 | 689 with open(os.path.join(buildbot_dir, filename), 'w') as fp: |
| 688 with open(os.path.join(src_dir, 'testing', 'buildbot', filename), 'w') as fp: | |
| 689 json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True) | 690 json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True) |
| 690 fp.write('\n') | 691 fp.write('\n') |
| 691 | 692 |
| 692 def chdir_to_parent_directory(): | 693 def src_dir(): |
| 693 parent_directory = os.path.dirname(os.path.abspath(__file__)) | 694 file_path = os.path.abspath(__file__) |
| 694 os.chdir(parent_directory) | 695 return os.path.dirname(os.path.dirname(os.path.dirname(file_path))) |
| 695 | 696 |
| 696 def main(): | 697 def main(): |
| 697 chdir_to_parent_directory() | |
| 698 | |
| 699 waterfall = get_waterfall_config() | 698 waterfall = get_waterfall_config() |
| 700 waterfall['name'] = 'chromium.perf' | 699 waterfall['name'] = 'chromium.perf' |
| 701 fyi_waterfall = get_fyi_waterfall_config() | 700 fyi_waterfall = get_fyi_waterfall_config() |
| 702 fyi_waterfall['name'] = 'chromium.perf.fyi' | 701 fyi_waterfall['name'] = 'chromium.perf.fyi' |
| 703 | 702 |
| 704 generate_all_tests(fyi_waterfall) | 703 generate_all_tests(fyi_waterfall) |
| 705 generate_all_tests(waterfall) | 704 generate_all_tests(waterfall) |
| 706 return 0 | 705 return 0 |
| 707 | 706 |
| 708 if __name__ == '__main__': | 707 if __name__ == '__main__': |
| 709 sys.exit(main()) | 708 sys.exit(main()) |
| OLD | NEW |