Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
| 2 # Copyright 2016 The Chromium Authors. All rights reserved. | 2 # Copyright 2016 The Chromium Authors. All rights reserved. |
| 3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
| 4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
| 5 | 5 |
| 6 """Script to generate chromium.perf.json and chromium.perf.fyi.json in | 6 """Script to generate chromium.perf.json and chromium.perf.fyi.json in |
| 7 the src/testing/buildbot directory. Maintaining these files by hand is | 7 the src/testing/buildbot directory and benchmark.csv in the src/tools/perf |
|
ashleymarie1
2017/03/16 17:09:49
Is there a better directory for benchmark.csv to e
| |
| 8 too unwieldy. | 8 directory. Maintaining these files by hand is too unwieldy. |
| 9 """ | 9 """ |
| 10 import argparse | 10 import argparse |
| 11 import csv | |
| 11 import json | 12 import json |
| 12 import os | 13 import os |
| 13 import sys | 14 import sys |
| 14 | 15 |
| 15 from chrome_telemetry_build import chromium_config | 16 from chrome_telemetry_build import chromium_config |
| 16 | 17 |
| 17 sys.path.append(chromium_config.GetTelemetryDir()) | 18 sys.path.append(chromium_config.GetTelemetryDir()) |
| 18 from telemetry import benchmark as benchmark_module | 19 from telemetry import benchmark as benchmark_module |
| 20 from telemetry import decorators | |
| 19 from telemetry.core import discover | 21 from telemetry.core import discover |
| 20 from telemetry.util import bot_utils | 22 from telemetry.util import bot_utils |
| 21 | 23 |
| 22 | 24 |
| 23 SCRIPT_TESTS = [ | 25 SCRIPT_TESTS = [ |
| 24 { | 26 { |
| 25 'args': [ | 27 'args': [ |
| 26 'gpu_perftests', | 28 'gpu_perftests', |
| 27 '--adb-path', | 29 '--adb-path', |
| 28 'src/third_party/catapult/devil/bin/deps/linux2/x86_64/bin/adb', | 30 'src/third_party/catapult/devil/bin/deps/linux2/x86_64/bin/adb', |
| (...skipping 618 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 647 } | 649 } |
| 648 | 650 |
| 649 # Certain swarming bots are not sharding correctly with the new device affinity | 651 # Certain swarming bots are not sharding correctly with the new device affinity |
| 650 # algorithm. Reverting to legacy algorithm to try and get them to complete. | 652 # algorithm. Reverting to legacy algorithm to try and get them to complete. |
| 651 # See crbug.com/670284 | 653 # See crbug.com/670284 |
| 652 LEGACY_DEVICE_AFFIINITY_ALGORITHM = [ | 654 LEGACY_DEVICE_AFFIINITY_ALGORITHM = [ |
| 653 'Win Zenbook Perf', | 655 'Win Zenbook Perf', |
| 654 'Win 10 High-DPI Perf', | 656 'Win 10 High-DPI Perf', |
| 655 ] | 657 ] |
| 656 | 658 |
| 657 def current_benchmarks(use_whitelist): | 659 def current_benchmarks(use_whitelist, use_blacklist = True): |
| 658 benchmarks_dir = os.path.join(src_dir(), 'tools', 'perf', 'benchmarks') | 660 benchmarks_dir = os.path.join(src_dir(), 'tools', 'perf', 'benchmarks') |
| 659 top_level_dir = os.path.dirname(benchmarks_dir) | 661 top_level_dir = os.path.dirname(benchmarks_dir) |
| 660 | 662 |
| 661 all_benchmarks = discover.DiscoverClasses( | 663 all_benchmarks = discover.DiscoverClasses( |
| 662 benchmarks_dir, top_level_dir, benchmark_module.Benchmark, | 664 benchmarks_dir, top_level_dir, benchmark_module.Benchmark, |
| 663 index_by_class_name=True).values() | 665 index_by_class_name=True).values() |
| 664 # Remove all blacklisted benchmarks | 666 |
| 665 for blacklisted in BENCHMARK_NAME_BLACKLIST: | 667 if use_blacklist: |
| 666 for benchmark in all_benchmarks: | 668 # Remove all blacklisted benchmarks |
| 667 if benchmark.Name() == blacklisted: | 669 for blacklisted in BENCHMARK_NAME_BLACKLIST: |
| 668 all_benchmarks.remove(benchmark) | 670 for benchmark in all_benchmarks: |
| 669 break | 671 if benchmark.Name() == blacklisted: |
| 672 all_benchmarks.remove(benchmark) | |
| 673 break | |
| 670 | 674 |
| 671 if use_whitelist: | 675 if use_whitelist: |
| 672 all_benchmarks = ( | 676 all_benchmarks = ( |
| 673 bench for bench in all_benchmarks | 677 bench for bench in all_benchmarks |
| 674 if bench.Name() in BENCHMARK_NAME_WHITELIST) | 678 if bench.Name() in BENCHMARK_NAME_WHITELIST) |
| 675 return sorted(all_benchmarks, key=lambda b: b.Name()) | 679 return sorted(all_benchmarks, key=lambda b: b.Name()) |
| 676 | 680 |
| 677 | 681 |
| 678 # Returns a sorted list of (benchmark, avg_runtime) pairs for every | 682 # Returns a sorted list of (benchmark, avg_runtime) pairs for every |
| 679 # benchmark in the all_benchmarks list where avg_runtime is in seconds. Also | 683 # benchmark in the all_benchmarks list where avg_runtime is in seconds. Also |
| (...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 772 scripts = generate_script_tests(waterfall['name'], name, shard + 1) | 776 scripts = generate_script_tests(waterfall['name'], name, shard + 1) |
| 773 if scripts: | 777 if scripts: |
| 774 tests[tester_name] = { | 778 tests[tester_name] = { |
| 775 'scripts': sorted(scripts, key=lambda x: x['name']) | 779 'scripts': sorted(scripts, key=lambda x: x['name']) |
| 776 } | 780 } |
| 777 | 781 |
| 778 for name, config in waterfall['builders'].iteritems(): | 782 for name, config in waterfall['builders'].iteritems(): |
| 779 tests[name] = config | 783 tests[name] = config |
| 780 | 784 |
| 781 tests['AAAAA1 AUTOGENERATED FILE DO NOT EDIT'] = {} | 785 tests['AAAAA1 AUTOGENERATED FILE DO NOT EDIT'] = {} |
| 782 tests['AAAAA2 See //tools/perf/generate_perf_json.py to make changes'] = {} | 786 tests['AAAAA2 See //tools/perf/generate_perf_data.py to make changes'] = {} |
| 783 return tests | 787 return tests |
| 784 | 788 |
| 785 | 789 |
| 786 def get_json_config_file_for_waterfall(waterfall): | 790 def get_json_config_file_for_waterfall(waterfall): |
| 787 filename = '%s.json' % waterfall['name'] | 791 filename = '%s.json' % waterfall['name'] |
| 788 buildbot_dir = os.path.join(src_dir(), 'testing', 'buildbot') | 792 buildbot_dir = os.path.join(src_dir(), 'testing', 'buildbot') |
| 789 return os.path.join(buildbot_dir, filename) | 793 return os.path.join(buildbot_dir, filename) |
| 790 | 794 |
| 791 | 795 |
| 792 def tests_are_up_to_date(waterfall): | 796 def tests_are_up_to_date(waterfall): |
| (...skipping 13 matching lines...) Expand all Loading... | |
| 806 json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True) | 810 json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True) |
| 807 fp.write('\n') | 811 fp.write('\n') |
| 808 | 812 |
| 809 | 813 |
| 810 def src_dir(): | 814 def src_dir(): |
| 811 file_path = os.path.abspath(__file__) | 815 file_path = os.path.abspath(__file__) |
| 812 return os.path.dirname(os.path.dirname( | 816 return os.path.dirname(os.path.dirname( |
| 813 os.path.dirname(os.path.dirname(file_path)))) | 817 os.path.dirname(os.path.dirname(file_path)))) |
| 814 | 818 |
| 815 | 819 |
| 820 MANUAL_BENCHMARKS = [ | |
| 821 ["angle_perftests", "jmadill@chromium.org", None], | |
| 822 ["cc_perftest", "enne@chromium.org", None], | |
| 823 ["gpu_perftests", "reveman@chromium.org", None], | |
| 824 ["tracing_perftests", "kkraynov@chromium.org, primiano@chromium.org", | |
| 825 None], | |
| 826 ["indexeddb_perf", "cmumford@chromium.org", None], | |
| 827 ["load_library_perf_tests", None, None], | |
| 828 ["media_perftests", "crouleau@chromium.org", None], | |
| 829 ["performance_browser_tests", | |
| 830 "hubbe@chromium.org, justinlin@chromium.org, miu@chromium.org", None] | |
| 831 ] | |
| 832 | |
| 833 | |
| 834 def update_benchmark_csv(): | |
| 835 """Updates go/chrome-benchmarks. | |
| 836 | |
| 837 Updates telemetry/perf/benchmark.csv containing the current benchmark names, | |
| 838 owners, and components. | |
| 839 """ | |
| 840 benchmark_list = current_benchmarks( | |
| 841 use_whitelist = False, use_blacklist = False) | |
| 842 data = MANUAL_BENCHMARKS | |
| 843 for benchmark in benchmark_list: | |
| 844 emails = decorators.GetEmails(benchmark) | |
| 845 if emails: | |
| 846 emails = ", ".join(emails) | |
| 847 data.append([ | |
| 848 benchmark.Name(), | |
| 849 emails, | |
| 850 decorators.GetComponent(benchmark) | |
| 851 ]) | |
| 852 | |
| 853 data = sorted(data, key=lambda b: b[0]) | |
| 854 | |
| 855 perf_dir = os.path.join(src_dir(), 'tools', 'perf') | |
| 856 file = os.path.join(perf_dir, 'benchmark.csv') | |
| 857 with open(file, 'wb') as f: | |
| 858 writer = csv.writer(f) | |
| 859 writer.writerows(data) | |
| 860 | |
| 861 | |
| 816 def main(args): | 862 def main(args): |
| 817 parser = argparse.ArgumentParser( | 863 parser = argparse.ArgumentParser( |
| 818 description=('Generate perf test\' json config. This need to be done ' | 864 description=('Generate perf test\' json config. This need to be done ' |
| 819 'anytime you add/remove any existing benchmarks in ' | 865 'anytime you add/remove any existing benchmarks in ' |
| 820 'tools/perf/benchmarks.')) | 866 'tools/perf/benchmarks.')) |
| 821 parser.add_argument( | 867 parser.add_argument( |
| 822 '--validate-only', action='store_true', default=False, | 868 '--validate-only', action='store_true', default=False, |
| 823 help=('Validate whether the perf json generated will be the same as the ' | 869 help=('Validate whether the perf json generated will be the same as the ' |
| 824 'existing configs. This does not change the contain of existing ' | 870 'existing configs. This does not change the contain of existing ' |
| 825 'configs')) | 871 'configs')) |
| 826 options = parser.parse_args(args) | 872 options = parser.parse_args(args) |
| 827 | 873 |
| 874 update_benchmark_csv() | |
|
ashleymarie1
2017/03/16 17:09:49
Should I add an argument to not update the benchma
| |
| 875 | |
| 828 waterfall = get_waterfall_config() | 876 waterfall = get_waterfall_config() |
| 829 waterfall['name'] = 'chromium.perf' | 877 waterfall['name'] = 'chromium.perf' |
| 830 fyi_waterfall = get_fyi_waterfall_config() | 878 fyi_waterfall = get_fyi_waterfall_config() |
| 831 fyi_waterfall['name'] = 'chromium.perf.fyi' | 879 fyi_waterfall['name'] = 'chromium.perf.fyi' |
| 832 | 880 |
| 833 if options.validate_only: | 881 if options.validate_only: |
| 834 if tests_are_up_to_date(fyi_waterfall) and tests_are_up_to_date(waterfall): | 882 if tests_are_up_to_date(fyi_waterfall) and tests_are_up_to_date(waterfall): |
| 835 print 'All the perf JSON config files are up-to-date. \\o/' | 883 print 'All the perf JSON config files are up-to-date. \\o/' |
| 836 return 0 | 884 return 0 |
| 837 else: | 885 else: |
| 838 print ('The perf JSON config files are not up-to-date. Please run %s ' | 886 print ('The perf JSON config files are not up-to-date. Please run %s ' |
| 839 'without --validate-only flag to update the perf JSON ' | 887 'without --validate-only flag to update the perf JSON ' |
| 840 'configs.') % sys.argv[0] | 888 'configs.') % sys.argv[0] |
| 841 return 1 | 889 return 1 |
| 842 else: | 890 else: |
| 843 update_all_tests(fyi_waterfall) | 891 update_all_tests(fyi_waterfall) |
| 844 update_all_tests(waterfall) | 892 update_all_tests(waterfall) |
| 845 return 0 | 893 return 0 |
| OLD | NEW |