Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(263)

Side by Side Diff: tools/perf/core/perf_data_generator.py

Issue 2754883002: Generating benchmark.csv file (Closed)
Patch Set: Generating benchmark.csv file Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « tools/perf/benchmark.csv ('k') | tools/perf/core/perf_data_generator_unittest.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright 2016 The Chromium Authors. All rights reserved. 2 # Copyright 2016 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """Script to generate chromium.perf.json and chromium.perf.fyi.json in 6 """Script to generate chromium.perf.json and chromium.perf.fyi.json in
7 the src/testing/buildbot directory. Maintaining these files by hand is 7 the src/testing/buildbot directory and benchmark.csv in the src/tools/perf
8 too unwieldy. 8 directory. Maintaining these files by hand is too unwieldy.
9 """ 9 """
10 import argparse 10 import argparse
11 import collections
12 import csv
11 import json 13 import json
12 import os 14 import os
15 import re
13 import sys 16 import sys
17 import sets
14 18
15 from chrome_telemetry_build import chromium_config 19 from chrome_telemetry_build import chromium_config
16 20
17 sys.path.append(chromium_config.GetTelemetryDir()) 21 sys.path.append(chromium_config.GetTelemetryDir())
18 from telemetry import benchmark as benchmark_module 22 from telemetry import benchmark as benchmark_module
23 from telemetry import decorators
19 from telemetry.core import discover 24 from telemetry.core import discover
20 from telemetry.util import bot_utils 25 from telemetry.util import bot_utils
21 26
22 27
23 SCRIPT_TESTS = [ 28 SCRIPT_TESTS = [
24 { 29 {
25 'args': [ 30 'args': [
26 'gpu_perftests', 31 'gpu_perftests',
27 '--adb-path', 32 '--adb-path',
28 'src/third_party/catapult/devil/bin/deps/linux2/x86_64/bin/adb', 33 'src/third_party/catapult/devil/bin/deps/linux2/x86_64/bin/adb',
(...skipping 753 matching lines...) Expand 10 before | Expand all | Expand 10 after
782 tests['AAAAA2 See //tools/perf/generate_perf_data.py to make changes'] = {} 787 tests['AAAAA2 See //tools/perf/generate_perf_data.py to make changes'] = {}
783 return tests 788 return tests
784 789
785 790
786 def get_json_config_file_for_waterfall(waterfall): 791 def get_json_config_file_for_waterfall(waterfall):
787 filename = '%s.json' % waterfall['name'] 792 filename = '%s.json' % waterfall['name']
788 buildbot_dir = os.path.join(src_dir(), 'testing', 'buildbot') 793 buildbot_dir = os.path.join(src_dir(), 'testing', 'buildbot')
789 return os.path.join(buildbot_dir, filename) 794 return os.path.join(buildbot_dir, filename)
790 795
791 796
792 def tests_are_up_to_date(waterfall): 797 def tests_are_up_to_date(waterfalls):
793 tests = generate_all_tests(waterfall) 798 up_to_date = True
794 tests_data = json.dumps(tests, indent=2, separators=(',', ': '), 799 all_tests = {}
795 sort_keys=True) 800 for w in waterfalls:
796 config_file = get_json_config_file_for_waterfall(waterfall) 801 tests = generate_all_tests(w)
797 with open(config_file, 'r') as fp: 802 tests_data = json.dumps(tests, indent=2, separators=(',', ': '),
798 config_data = fp.read().strip() 803 sort_keys=True)
799 return tests_data == config_data 804 config_file = get_json_config_file_for_waterfall(w)
805 with open(config_file, 'r') as fp:
806 config_data = fp.read().strip()
807 all_tests.update(tests)
808 up_to_date &= tests_data == config_data
809 verify_all_tests_in_benchmark_csv(all_tests, get_all_benchmarks_metadata())
810 return up_to_date
800 811
801 812
802 def update_all_tests(waterfall): 813 def update_all_tests(waterfalls):
803 tests = generate_all_tests(waterfall) 814 all_tests = {}
804 config_file = get_json_config_file_for_waterfall(waterfall) 815 for w in waterfalls:
805 with open(config_file, 'w') as fp: 816 tests = generate_all_tests(w)
806 json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True) 817 config_file = get_json_config_file_for_waterfall(w)
807 fp.write('\n') 818 with open(config_file, 'w') as fp:
819 json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True)
820 fp.write('\n')
821 all_tests.update(tests)
822 verify_all_tests_in_benchmark_csv(all_tests, get_all_benchmarks_metadata())
808 823
809 824
810 def src_dir(): 825 def src_dir():
811 file_path = os.path.abspath(__file__) 826 file_path = os.path.abspath(__file__)
812 return os.path.dirname(os.path.dirname( 827 return os.path.dirname(os.path.dirname(
813 os.path.dirname(os.path.dirname(file_path)))) 828 os.path.dirname(os.path.dirname(file_path))))
814 829
815 830
831 BenchmarkMetadata = collections.namedtuple(
832 'BenchmarkMetadata', 'emails component')
833 NON_TELEMETRY_BENCHMARKS = {
834 'angle_perftests': BenchmarkMetadata('jmadill@chromium.org', None),
835 'cc_perftests': BenchmarkMetadata('enne@chromium.org', None),
836 'gpu_perftests': BenchmarkMetadata('reveman@chromium.org', None),
837 'tracing_perftests': BenchmarkMetadata(
838 'kkraynov@chromium.org, primiano@chromium.org', None),
839 'load_library_perf_tests': BenchmarkMetadata(None, None),
840 'media_perftests': BenchmarkMetadata('crouleau@chromium.org', None),
841 'performance_browser_tests': BenchmarkMetadata(
842 'hubbe@chromium.org, justinlin@chromium.org, miu@chromium.org', None)
843 }
844
845
846 # Returns a dictionary mapping benchmark name to benchmark owner metadata
847 def get_all_benchmarks_metadata():
848 metadata = NON_TELEMETRY_BENCHMARKS
849 benchmark_list = current_benchmarks(False)
850
851 for benchmark in benchmark_list:
852 emails = decorators.GetEmails(benchmark)
853 if emails:
854 emails = ', '.join(emails)
855 metadata[benchmark.Name()] = BenchmarkMetadata(
856 emails, decorators.GetComponent(benchmark))
857 return metadata
858
859
860 def verify_all_tests_in_benchmark_csv(tests, benchmark_metadata):
861 benchmark_names = sets.Set(benchmark_metadata)
862 test_names = sets.Set()
863 for t in tests:
864 scripts = []
865 if 'isolated_scripts' in tests[t]:
866 scripts = tests[t]['isolated_scripts']
867 elif 'scripts' in tests[t]:
868 scripts = tests[t]['scripts']
869 else:
870 assert('Android Compile' == t
871 or 'Android arm64 Compile' == t
872 or t.startswith('AAAAA')), 'Unknown test data %s' % t
873 for s in scripts:
874 name = s['name']
875 name = re.sub('\\.reference$', '', name)
876 test_names.add(name)
877
878 error_messages = []
879 for test in benchmark_names - test_names:
880 error_messages.append('Remove ' + test + ' from NON_TELEMETRY_BENCHMARKS')
881 for test in test_names - benchmark_names:
882 error_messages.append('Add ' + test + ' to NON_TELEMETRY_BENCHMARKS')
883
884 assert benchmark_names == test_names, ('Please update '
885 'NON_TELEMETRY_BENCHMARKS as below:\n' + '\n'.join(error_messages))
886
887
888 def update_benchmark_csv():
889 """Updates go/chrome-benchmarks.
890
891 Updates telemetry/perf/benchmark.csv containing the current benchmark names,
892 owners, and components.
893 """
894 header_data = [['AUTOGENERATED FILE DO NOT EDIT'],
895 ['See //tools/perf/generate_perf_data.py to make changes'],
896 ['Benchmark name', 'Individual owners', 'Component']
897 ]
898
899 csv_data = []
900 benchmark_metadata = get_all_benchmarks_metadata()
901 for benchmark_name in benchmark_metadata:
902 csv_data.append([
903 benchmark_name,
904 benchmark_metadata[benchmark_name].emails,
905 benchmark_metadata[benchmark_name].component
906 ])
907
908 csv_data = sorted(csv_data, key=lambda b: b[0])
909 csv_data = header_data + csv_data
910
911 perf_dir = os.path.join(src_dir(), 'tools', 'perf')
912 benchmark_file = os.path.join(perf_dir, 'benchmark.csv')
913 with open(benchmark_file, 'wb') as f:
914 writer = csv.writer(f)
915 writer.writerows(csv_data)
916
917
816 def main(args): 918 def main(args):
817 parser = argparse.ArgumentParser( 919 parser = argparse.ArgumentParser(
818 description=('Generate perf test\' json config. This need to be done ' 920 description=('Generate perf test\' json config and benchmark.csv. '
819 'anytime you add/remove any existing benchmarks in ' 921 'This needs to be done anytime you add/remove any existing'
820 'tools/perf/benchmarks.')) 922 'benchmarks in tools/perf/benchmarks.'))
821 parser.add_argument( 923 parser.add_argument(
822 '--validate-only', action='store_true', default=False, 924 '--validate-only', action='store_true', default=False,
823 help=('Validate whether the perf json generated will be the same as the ' 925 help=('Validate whether the perf json generated will be the same as the '
824 'existing configs. This does not change the contain of existing ' 926 'existing configs. This does not change the contain of existing '
825 'configs')) 927 'configs'))
826 options = parser.parse_args(args) 928 options = parser.parse_args(args)
827 929
828 waterfall = get_waterfall_config() 930 waterfall = get_waterfall_config()
829 waterfall['name'] = 'chromium.perf' 931 waterfall['name'] = 'chromium.perf'
830 fyi_waterfall = get_fyi_waterfall_config() 932 fyi_waterfall = get_fyi_waterfall_config()
831 fyi_waterfall['name'] = 'chromium.perf.fyi' 933 fyi_waterfall['name'] = 'chromium.perf.fyi'
832 934
833 if options.validate_only: 935 if options.validate_only:
834 if tests_are_up_to_date(fyi_waterfall) and tests_are_up_to_date(waterfall): 936 if tests_are_up_to_date([fyi_waterfall, waterfall]):
835 print 'All the perf JSON config files are up-to-date. \\o/' 937 print 'All the perf JSON config files are up-to-date. \\o/'
836 return 0 938 return 0
837 else: 939 else:
838 print ('The perf JSON config files are not up-to-date. Please run %s ' 940 print ('The perf JSON config files are not up-to-date. Please run %s '
839 'without --validate-only flag to update the perf JSON ' 941 'without --validate-only flag to update the perf JSON '
840 'configs.') % sys.argv[0] 942 'configs and benchmark.csv.') % sys.argv[0]
841 return 1 943 return 1
842 else: 944 else:
843 update_all_tests(fyi_waterfall) 945 update_all_tests([fyi_waterfall, waterfall])
844 update_all_tests(waterfall) 946 update_benchmark_csv()
845 return 0 947 return 0
OLDNEW
« no previous file with comments | « tools/perf/benchmark.csv ('k') | tools/perf/core/perf_data_generator_unittest.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698