Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(17)

Side by Side Diff: tools/perf/core/perf_data_generator.py

Issue 2773743002: Updating the script to include sizes and resource sizes benchmarks (Closed)
Patch Set: retrying upload after more git magic Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « tools/perf/benchmark.csv ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright 2016 The Chromium Authors. All rights reserved. 2 # Copyright 2016 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """Script to generate chromium.perf.json and chromium.perf.fyi.json in 6 """Script to generate chromium.perf.json and chromium.perf.fyi.json in
7 the src/testing/buildbot directory and benchmark.csv in the src/tools/perf 7 the src/testing/buildbot directory and benchmark.csv in the src/tools/perf
8 directory. Maintaining these files by hand is too unwieldy. 8 directory. Maintaining these files by hand is too unwieldy.
9 """ 9 """
10 import argparse 10 import argparse
(...skipping 788 matching lines...) Expand 10 before | Expand all | Expand 10 after
799 all_tests = {} 799 all_tests = {}
800 for w in waterfalls: 800 for w in waterfalls:
801 tests = generate_all_tests(w) 801 tests = generate_all_tests(w)
802 tests_data = json.dumps(tests, indent=2, separators=(',', ': '), 802 tests_data = json.dumps(tests, indent=2, separators=(',', ': '),
803 sort_keys=True) 803 sort_keys=True)
804 config_file = get_json_config_file_for_waterfall(w) 804 config_file = get_json_config_file_for_waterfall(w)
805 with open(config_file, 'r') as fp: 805 with open(config_file, 'r') as fp:
806 config_data = fp.read().strip() 806 config_data = fp.read().strip()
807 all_tests.update(tests) 807 all_tests.update(tests)
808 up_to_date &= tests_data == config_data 808 up_to_date &= tests_data == config_data
809 verify_all_tests_in_benchmark_csv(all_tests, get_all_benchmarks_metadata()) 809 verify_all_tests_in_benchmark_csv(all_tests,
810 get_all_waterfall_benchmarks_metadata())
810 return up_to_date 811 return up_to_date
811 812
812 813
813 def update_all_tests(waterfalls): 814 def update_all_tests(waterfalls):
814 all_tests = {} 815 all_tests = {}
815 for w in waterfalls: 816 for w in waterfalls:
816 tests = generate_all_tests(w) 817 tests = generate_all_tests(w)
817 config_file = get_json_config_file_for_waterfall(w) 818 config_file = get_json_config_file_for_waterfall(w)
818 with open(config_file, 'w') as fp: 819 with open(config_file, 'w') as fp:
819 json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True) 820 json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True)
820 fp.write('\n') 821 fp.write('\n')
821 all_tests.update(tests) 822 all_tests.update(tests)
822 verify_all_tests_in_benchmark_csv(all_tests, get_all_benchmarks_metadata()) 823 verify_all_tests_in_benchmark_csv(all_tests,
824 get_all_waterfall_benchmarks_metadata())
823 825
824 826
825 def src_dir(): 827 def src_dir():
826 file_path = os.path.abspath(__file__) 828 file_path = os.path.abspath(__file__)
827 return os.path.dirname(os.path.dirname( 829 return os.path.dirname(os.path.dirname(
828 os.path.dirname(os.path.dirname(file_path)))) 830 os.path.dirname(os.path.dirname(file_path))))
829 831
830 832
831 BenchmarkMetadata = collections.namedtuple( 833 BenchmarkMetadata = collections.namedtuple(
832 'BenchmarkMetadata', 'emails component') 834 'BenchmarkMetadata', 'emails component')
833 NON_TELEMETRY_BENCHMARKS = { 835 NON_TELEMETRY_BENCHMARKS = {
834 'angle_perftests': BenchmarkMetadata('jmadill@chromium.org', None), 836 'angle_perftests': BenchmarkMetadata('jmadill@chromium.org', None),
835 'cc_perftests': BenchmarkMetadata('enne@chromium.org', None), 837 'cc_perftests': BenchmarkMetadata('enne@chromium.org', None),
836 'gpu_perftests': BenchmarkMetadata('reveman@chromium.org', None), 838 'gpu_perftests': BenchmarkMetadata('reveman@chromium.org', None),
837 'tracing_perftests': BenchmarkMetadata( 839 'tracing_perftests': BenchmarkMetadata(
838 'kkraynov@chromium.org, primiano@chromium.org', None), 840 'kkraynov@chromium.org, primiano@chromium.org', None),
839 'load_library_perf_tests': BenchmarkMetadata(None, None), 841 'load_library_perf_tests': BenchmarkMetadata(None, None),
840 'media_perftests': BenchmarkMetadata('crouleau@chromium.org', None), 842 'media_perftests': BenchmarkMetadata('crouleau@chromium.org', None),
841 'performance_browser_tests': BenchmarkMetadata( 843 'performance_browser_tests': BenchmarkMetadata(
842 'hubbe@chromium.org, justinlin@chromium.org, miu@chromium.org', None) 844 'hubbe@chromium.org, justinlin@chromium.org, miu@chromium.org', None)
843 } 845 }
844 846
845 847
846 # Returns a dictionary mapping benchmark name to benchmark owner metadata 848 # If you change this dictionary, run tools/perf/generate_perf_data
847 def get_all_benchmarks_metadata(): 849 NON_WATERFALL_BENCHMARKS = {
848 metadata = NON_TELEMETRY_BENCHMARKS 850 'sizes (mac)': BenchmarkMetadata('tapted@chromium.org', None),
851 'sizes (win)': BenchmarkMetadata('grt@chromium.org', None),
852 'sizes (linux)': BenchmarkMetadata('thestig@chromium.org', None),
853 'resource_sizes': BenchmarkMetadata(
854 'agrieve@chromium.org, rnephew@chromium.org, perezju@chromium.org',
855 None)
856 }
857
858
859 # Returns a dictionary mapping waterfall benchmark name to benchmark owner
860 # metadata
861 def get_all_waterfall_benchmarks_metadata():
862 return get_all_benchmarks_metadata(NON_TELEMETRY_BENCHMARKS)
863
864
865 def get_all_benchmarks_metadata(metadata):
849 benchmark_list = current_benchmarks(False) 866 benchmark_list = current_benchmarks(False)
850 867
851 for benchmark in benchmark_list: 868 for benchmark in benchmark_list:
852 emails = decorators.GetEmails(benchmark) 869 emails = decorators.GetEmails(benchmark)
853 if emails: 870 if emails:
854 emails = ', '.join(emails) 871 emails = ', '.join(emails)
855 metadata[benchmark.Name()] = BenchmarkMetadata( 872 metadata[benchmark.Name()] = BenchmarkMetadata(
856 emails, decorators.GetComponent(benchmark)) 873 emails, decorators.GetComponent(benchmark))
857 return metadata 874 return metadata
858 875
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
890 907
891 Updates telemetry/perf/benchmark.csv containing the current benchmark names, 908 Updates telemetry/perf/benchmark.csv containing the current benchmark names,
892 owners, and components. 909 owners, and components.
893 """ 910 """
894 header_data = [['AUTOGENERATED FILE DO NOT EDIT'], 911 header_data = [['AUTOGENERATED FILE DO NOT EDIT'],
895 ['See //tools/perf/generate_perf_data.py to make changes'], 912 ['See //tools/perf/generate_perf_data.py to make changes'],
896 ['Benchmark name', 'Individual owners', 'Component'] 913 ['Benchmark name', 'Individual owners', 'Component']
897 ] 914 ]
898 915
899 csv_data = [] 916 csv_data = []
900 benchmark_metadata = get_all_benchmarks_metadata() 917 all_benchmarks = NON_TELEMETRY_BENCHMARKS
918 all_benchmarks.update(NON_WATERFALL_BENCHMARKS)
919 benchmark_metadata = get_all_benchmarks_metadata(all_benchmarks)
901 for benchmark_name in benchmark_metadata: 920 for benchmark_name in benchmark_metadata:
902 csv_data.append([ 921 csv_data.append([
903 benchmark_name, 922 benchmark_name,
904 benchmark_metadata[benchmark_name].emails, 923 benchmark_metadata[benchmark_name].emails,
905 benchmark_metadata[benchmark_name].component 924 benchmark_metadata[benchmark_name].component
906 ]) 925 ])
907 926
908 csv_data = sorted(csv_data, key=lambda b: b[0]) 927 csv_data = sorted(csv_data, key=lambda b: b[0])
909 csv_data = header_data + csv_data 928 csv_data = header_data + csv_data
910 929
(...skipping 27 matching lines...) Expand all
938 return 0 957 return 0
939 else: 958 else:
940 print ('The perf JSON config files are not up-to-date. Please run %s ' 959 print ('The perf JSON config files are not up-to-date. Please run %s '
941 'without --validate-only flag to update the perf JSON ' 960 'without --validate-only flag to update the perf JSON '
942 'configs and benchmark.csv.') % sys.argv[0] 961 'configs and benchmark.csv.') % sys.argv[0]
943 return 1 962 return 1
944 else: 963 else:
945 update_all_tests([fyi_waterfall, waterfall]) 964 update_all_tests([fyi_waterfall, waterfall])
946 update_benchmark_csv() 965 update_benchmark_csv()
947 return 0 966 return 0
OLDNEW
« no previous file with comments | « tools/perf/benchmark.csv ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698