Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
| 2 # Copyright 2016 The Chromium Authors. All rights reserved. | 2 # Copyright 2016 The Chromium Authors. All rights reserved. |
| 3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
| 4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
| 5 | 5 |
| 6 """Script to generate chromium.perf.json and chromium.perf.fyi.json in | 6 """Script to generate chromium.perf.json and chromium.perf.fyi.json in |
| 7 the src/testing/buildbot directory. Maintaining these files by hand is | 7 the src/testing/buildbot directory and benchmark.csv in the src/tools/perf |
| 8 too unwieldy. | 8 directory. Maintaining these files by hand is too unwieldy. |
| 9 """ | 9 """ |
| 10 import argparse | 10 import argparse |
| 11 import collections | |
| 12 import csv | |
| 11 import json | 13 import json |
| 12 import os | 14 import os |
| 15 import re | |
| 13 import sys | 16 import sys |
| 17 import sets | |
| 14 | 18 |
| 15 from chrome_telemetry_build import chromium_config | 19 from chrome_telemetry_build import chromium_config |
| 16 | 20 |
| 17 sys.path.append(chromium_config.GetTelemetryDir()) | 21 sys.path.append(chromium_config.GetTelemetryDir()) |
| 18 from telemetry import benchmark as benchmark_module | 22 from telemetry import benchmark as benchmark_module |
| 23 from telemetry import decorators | |
| 19 from telemetry.core import discover | 24 from telemetry.core import discover |
| 20 from telemetry.util import bot_utils | 25 from telemetry.util import bot_utils |
| 21 | 26 |
| 22 | 27 |
| 23 SCRIPT_TESTS = [ | 28 SCRIPT_TESTS = [ |
| 24 { | 29 { |
| 25 'args': [ | 30 'args': [ |
| 26 'gpu_perftests', | 31 'gpu_perftests', |
| 27 '--adb-path', | 32 '--adb-path', |
| 28 'src/third_party/catapult/devil/bin/deps/linux2/x86_64/bin/adb', | 33 'src/third_party/catapult/devil/bin/deps/linux2/x86_64/bin/adb', |
| (...skipping 763 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 792 def tests_are_up_to_date(waterfall): | 797 def tests_are_up_to_date(waterfall): |
| 793 tests = generate_all_tests(waterfall) | 798 tests = generate_all_tests(waterfall) |
| 794 tests_data = json.dumps(tests, indent=2, separators=(',', ': '), | 799 tests_data = json.dumps(tests, indent=2, separators=(',', ': '), |
| 795 sort_keys=True) | 800 sort_keys=True) |
| 796 config_file = get_json_config_file_for_waterfall(waterfall) | 801 config_file = get_json_config_file_for_waterfall(waterfall) |
| 797 with open(config_file, 'r') as fp: | 802 with open(config_file, 'r') as fp: |
| 798 config_data = fp.read().strip() | 803 config_data = fp.read().strip() |
| 799 return tests_data == config_data | 804 return tests_data == config_data |
| 800 | 805 |
| 801 | 806 |
| 802 def update_all_tests(waterfall): | 807 def update_all_tests(waterfalls): |
| 803 tests = generate_all_tests(waterfall) | 808 all_tests = {} |
| 804 config_file = get_json_config_file_for_waterfall(waterfall) | 809 for w in waterfalls: |
| 805 with open(config_file, 'w') as fp: | 810 tests = generate_all_tests(w) |
| 806 json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True) | 811 config_file = get_json_config_file_for_waterfall(w) |
| 807 fp.write('\n') | 812 with open(config_file, 'w') as fp: |
| 813 json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True) | |
| 814 fp.write('\n') | |
| 815 all_tests.update(tests) | |
| 816 verify_all_tests_in_benchmark_csv(all_tests, get_all_benchmarks_metadata()) | |
| 808 | 817 |
| 809 | 818 |
| 810 def src_dir(): | 819 def src_dir(): |
| 811 file_path = os.path.abspath(__file__) | 820 file_path = os.path.abspath(__file__) |
| 812 return os.path.dirname(os.path.dirname( | 821 return os.path.dirname(os.path.dirname( |
| 813 os.path.dirname(os.path.dirname(file_path)))) | 822 os.path.dirname(os.path.dirname(file_path)))) |
| 814 | 823 |
| 815 | 824 |
| 825 BenchmarkMetadata = collections.namedtuple( | |
| 826 'BenchmarkMetadata', 'emails component') | |
| 827 NON_TELEMETRY_BENCHMARKS = { | |
| 828 "angle_perftests": BenchmarkMetadata("jmadill@chromium.org", None), | |
|
nednguyen
2017/03/21 21:56:03
nits: string should always be singly quoted
ashleymarie1
2017/03/22 19:17:11
Thanks! Updated all the double quotes I added to s
nednguyen
2017/03/22 19:22:56
It's just our convention to use singly quoted stri
| |
| 829 "cc_perftests": BenchmarkMetadata("enne@chromium.org", None), | |
| 830 "gpu_perftests": BenchmarkMetadata("reveman@chromium.org", None), | |
| 831 "tracing_perftests": BenchmarkMetadata( | |
| 832 "kkraynov@chromium.org, primiano@chromium.org", None), | |
| 833 "load_library_perf_tests": BenchmarkMetadata(None, None), | |
| 834 "media_perftests": BenchmarkMetadata("crouleau@chromium.org", None), | |
| 835 "performance_browser_tests": BenchmarkMetadata( | |
| 836 "hubbe@chromium.org, justinlin@chromium.org, miu@chromium.org", None) | |
| 837 } | |
| 838 | |
| 839 | |
| 840 # Returns a dictionary mapping benchmark name to benchmark owner metadata | |
| 841 def get_all_benchmarks_metadata(): | |
| 842 metadata = NON_TELEMETRY_BENCHMARKS | |
| 843 benchmark_list = current_benchmarks(False) | |
| 844 | |
| 845 for benchmark in benchmark_list: | |
| 846 emails = decorators.GetEmails(benchmark) | |
| 847 if emails: | |
| 848 emails = ", ".join(emails) | |
| 849 metadata[benchmark.Name()] = BenchmarkMetadata( | |
| 850 emails, decorators.GetComponent(benchmark)) | |
| 851 return metadata | |
| 852 | |
| 853 | |
| 854 def verify_all_tests_in_benchmark_csv(tests, benchmark_metadata): | |
| 855 benchmark_names = sets.Set(benchmark_metadata) | |
| 856 test_names = sets.Set() | |
| 857 for t in tests: | |
| 858 scripts = [] | |
| 859 if 'isolated_scripts' in tests[t]: | |
| 860 scripts = tests[t]['isolated_scripts'] | |
| 861 elif 'scripts' in tests[t]: | |
| 862 scripts = tests[t]['scripts'] | |
| 863 else: | |
| 864 assert('Android Compile' == t | |
| 865 or 'Android arm64 Compile' == t | |
| 866 or t.startswith('AAAAA')), 'Unknown test data %s' % t | |
| 867 for s in scripts: | |
| 868 name = s['name'] | |
| 869 name = re.sub('\\.reference$', '', name) | |
| 870 test_names.add(name) | |
| 871 | |
| 872 error_messages = [] | |
| 873 for test in benchmark_names - test_names: | |
| 874 error_messages.append('Remove ' + test + ' from NON_TELEMETRY_BENCHMARKS') | |
| 875 for test in test_names - benchmark_names: | |
| 876 error_messages.append('Add ' + test + ' to NON_TELEMETRY_BENCHMARKS') | |
| 877 | |
| 878 assert benchmark_names == test_names, ('Please update ' | |
| 879 'NON_TELEMETRY_BENCHMARKS as below:\n' + '\n'.join(error_messages)) | |
| 880 | |
| 881 | |
| 882 def update_benchmark_csv(): | |
| 883 """Updates go/chrome-benchmarks. | |
| 884 | |
| 885 Updates telemetry/perf/benchmark.csv containing the current benchmark names, | |
| 886 owners, and components. | |
| 887 """ | |
| 888 header_data = [['AUTOGENERATED FILE DO NOT EDIT'], | |
| 889 ['See //tools/perf/generate_perf_data.py to make changes'], | |
| 890 ['Benchmark name', 'Individual owners', 'Component'] | |
| 891 ] | |
| 892 | |
| 893 csv_data = [] | |
| 894 benchmark_metadata = get_all_benchmarks_metadata() | |
| 895 for benchmark_name in benchmark_metadata: | |
| 896 csv_data.append([ | |
| 897 benchmark_name, | |
| 898 benchmark_metadata[benchmark_name].emails, | |
| 899 benchmark_metadata[benchmark_name].component | |
| 900 ]) | |
| 901 | |
| 902 csv_data = sorted(csv_data, key=lambda b: b[0]) | |
| 903 csv_data = header_data + csv_data | |
| 904 | |
| 905 perf_dir = os.path.join(src_dir(), 'tools', 'perf') | |
| 906 benchmark_file = os.path.join(perf_dir, 'benchmark.csv') | |
| 907 with open(benchmark_file, 'wb') as f: | |
| 908 writer = csv.writer(f) | |
| 909 writer.writerows(csv_data) | |
| 910 | |
| 911 | |
| 816 def main(args): | 912 def main(args): |
| 817 parser = argparse.ArgumentParser( | 913 parser = argparse.ArgumentParser( |
| 818 description=('Generate perf test\' json config. This need to be done ' | 914 description=('Generate perf test\' json config and benchmark.csv. ' |
| 819 'anytime you add/remove any existing benchmarks in ' | 915 'This needs to be done anytime you add/remove any existing' |
| 820 'tools/perf/benchmarks.')) | 916 'benchmarks in tools/perf/benchmarks.')) |
| 821 parser.add_argument( | 917 parser.add_argument( |
| 822 '--validate-only', action='store_true', default=False, | 918 '--validate-only', action='store_true', default=False, |
| 823 help=('Validate whether the perf json generated will be the same as the ' | 919 help=('Validate whether the perf json generated will be the same as the ' |
| 824 'existing configs. This does not change the contain of existing ' | 920 'existing configs. This does not change the contain of existing ' |
| 825 'configs')) | 921 'configs')) |
| 826 options = parser.parse_args(args) | 922 options = parser.parse_args(args) |
| 827 | 923 |
| 828 waterfall = get_waterfall_config() | 924 waterfall = get_waterfall_config() |
| 829 waterfall['name'] = 'chromium.perf' | 925 waterfall['name'] = 'chromium.perf' |
| 830 fyi_waterfall = get_fyi_waterfall_config() | 926 fyi_waterfall = get_fyi_waterfall_config() |
| 831 fyi_waterfall['name'] = 'chromium.perf.fyi' | 927 fyi_waterfall['name'] = 'chromium.perf.fyi' |
| 832 | 928 |
| 833 if options.validate_only: | 929 if options.validate_only: |
|
nednguyen
2017/03/21 21:59:58
actually I think this should also check for verify
ashleymarie1
2017/03/22 19:17:11
Good point! I hadn't thought about that
| |
| 834 if tests_are_up_to_date(fyi_waterfall) and tests_are_up_to_date(waterfall): | 930 if tests_are_up_to_date(fyi_waterfall) and tests_are_up_to_date(waterfall): |
| 835 print 'All the perf JSON config files are up-to-date. \\o/' | 931 print 'All the perf JSON config files are up-to-date. \\o/' |
| 836 return 0 | 932 return 0 |
| 837 else: | 933 else: |
| 838 print ('The perf JSON config files are not up-to-date. Please run %s ' | 934 print ('The perf JSON config files are not up-to-date. Please run %s ' |
| 839 'without --validate-only flag to update the perf JSON ' | 935 'without --validate-only flag to update the perf JSON ' |
| 840 'configs.') % sys.argv[0] | 936 'configs and benchmark.csv.') % sys.argv[0] |
| 841 return 1 | 937 return 1 |
| 842 else: | 938 else: |
| 843 update_all_tests(fyi_waterfall) | 939 update_all_tests([fyi_waterfall, waterfall]) |
| 844 update_all_tests(waterfall) | 940 update_benchmark_csv() |
| 845 return 0 | 941 return 0 |
| OLD | NEW |