Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(223)

Side by Side Diff: tools/perf/core/perf_json_generator.py

Issue 2754883002: Generating benchmark.csv file (Closed)
Patch Set: Generating benchmark.csv file Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« tools/perf/benchmark.csv ('K') | « tools/perf/benchmark.csv ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright 2016 The Chromium Authors. All rights reserved. 2 # Copyright 2016 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """Script to generate chromium.perf.json and chromium.perf.fyi.json in 6 """Script to generate chromium.perf.json and chromium.perf.fyi.json in
7 the src/testing/buildbot directory. Maintaining these files by hand is 7 the src/testing/buildbot directory and benchmark.csv in the src/tools/perf
8 too unwieldy. 8 directory. Maintaining these files by hand is too unwieldy.
9 """ 9 """
10 import argparse 10 import argparse
11 import collections
12 import csv
11 import json 13 import json
12 import os 14 import os
13 import sys 15 import sys
14 16
15 from chrome_telemetry_build import chromium_config 17 from chrome_telemetry_build import chromium_config
16 18
17 sys.path.append(chromium_config.GetTelemetryDir()) 19 sys.path.append(chromium_config.GetTelemetryDir())
18 from telemetry import benchmark as benchmark_module 20 from telemetry import benchmark as benchmark_module
21 from telemetry import decorators
19 from telemetry.core import discover 22 from telemetry.core import discover
20 from telemetry.util import bot_utils 23 from telemetry.util import bot_utils
21 24
22 25
23 SCRIPT_TESTS = [ 26 SCRIPT_TESTS = [
24 { 27 {
25 'args': [ 28 'args': [
26 'gpu_perftests', 29 'gpu_perftests',
27 '--adb-path', 30 '--adb-path',
28 'src/third_party/catapult/devil/bin/deps/linux2/x86_64/bin/adb', 31 'src/third_party/catapult/devil/bin/deps/linux2/x86_64/bin/adb',
(...skipping 618 matching lines...) Expand 10 before | Expand all | Expand 10 after
647 } 650 }
648 651
649 # Certain swarming bots are not sharding correctly with the new device affinity 652 # Certain swarming bots are not sharding correctly with the new device affinity
650 # algorithm. Reverting to legacy algorithm to try and get them to complete. 653 # algorithm. Reverting to legacy algorithm to try and get them to complete.
651 # See crbug.com/670284 654 # See crbug.com/670284
652 LEGACY_DEVICE_AFFIINITY_ALGORITHM = [ 655 LEGACY_DEVICE_AFFIINITY_ALGORITHM = [
653 'Win Zenbook Perf', 656 'Win Zenbook Perf',
654 'Win 10 High-DPI Perf', 657 'Win 10 High-DPI Perf',
655 ] 658 ]
656 659
657 def current_benchmarks(use_whitelist): 660 def current_benchmarks(use_whitelist, use_blacklist = True):
658 benchmarks_dir = os.path.join(src_dir(), 'tools', 'perf', 'benchmarks') 661 benchmarks_dir = os.path.join(src_dir(), 'tools', 'perf', 'benchmarks')
659 top_level_dir = os.path.dirname(benchmarks_dir) 662 top_level_dir = os.path.dirname(benchmarks_dir)
660 663
661 all_benchmarks = discover.DiscoverClasses( 664 all_benchmarks = discover.DiscoverClasses(
662 benchmarks_dir, top_level_dir, benchmark_module.Benchmark, 665 benchmarks_dir, top_level_dir, benchmark_module.Benchmark,
663 index_by_class_name=True).values() 666 index_by_class_name=True).values()
664 # Remove all blacklisted benchmarks 667
665 for blacklisted in BENCHMARK_NAME_BLACKLIST: 668 if use_blacklist:
666 for benchmark in all_benchmarks: 669 # Remove all blacklisted benchmarks
667 if benchmark.Name() == blacklisted: 670 for blacklisted in BENCHMARK_NAME_BLACKLIST:
668 all_benchmarks.remove(benchmark) 671 for benchmark in all_benchmarks:
669 break 672 if benchmark.Name() == blacklisted:
673 all_benchmarks.remove(benchmark)
674 break
670 675
671 if use_whitelist: 676 if use_whitelist:
672 all_benchmarks = ( 677 all_benchmarks = (
673 bench for bench in all_benchmarks 678 bench for bench in all_benchmarks
674 if bench.Name() in BENCHMARK_NAME_WHITELIST) 679 if bench.Name() in BENCHMARK_NAME_WHITELIST)
675 return sorted(all_benchmarks, key=lambda b: b.Name()) 680 return sorted(all_benchmarks, key=lambda b: b.Name())
676 681
677 682
678 # Returns a sorted list of (benchmark, avg_runtime) pairs for every 683 # Returns a sorted list of (benchmark, avg_runtime) pairs for every
679 # benchmark in the all_benchmarks list where avg_runtime is in seconds. Also 684 # benchmark in the all_benchmarks list where avg_runtime is in seconds. Also
(...skipping 126 matching lines...) Expand 10 before | Expand all | Expand 10 after
806 json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True) 811 json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True)
807 fp.write('\n') 812 fp.write('\n')
808 813
809 814
810 def src_dir(): 815 def src_dir():
811 file_path = os.path.abspath(__file__) 816 file_path = os.path.abspath(__file__)
812 return os.path.dirname(os.path.dirname( 817 return os.path.dirname(os.path.dirname(
813 os.path.dirname(os.path.dirname(file_path)))) 818 os.path.dirname(os.path.dirname(file_path))))
814 819
815 820
821 BenchmarkMetadata = collections.namedtuple(
822 'BenchmarkMetadata', 'emails component')
823 NON_TELEMETRY_BENCHMARKS = {
nednguyen 2017/03/16 21:08:17 My concern here is that there is no way to make su
ashleymarie1 2017/03/16 21:18:02 I, too, am concerned about that. I'm having troubl
nednguyen 2017/03/16 21:23:43 One idea I have is make a method that return a map
ashleymarie1 2017/03/17 15:30:19 So this solves the problem of a user adding a test
824 "angle_perftests": BenchmarkMetadata("jmadill@chromium.org", None),
825 "cc_perftest": BenchmarkMetadata("enne@chromium.org", None),
826 "gpu_perftests": BenchmarkMetadata("reveman@chromium.org", None),
827 "tracing_perftests": BenchmarkMetadata(
828 "kkraynov@chromium.org, primiano@chromium.org", None),
829 "indexeddb_perf": BenchmarkMetadata("cmumford@chromium.org", None),
830 "load_library_perf_tests": BenchmarkMetadata(None, None),
831 "media_perftests": BenchmarkMetadata("crouleau@chromium.org", None),
832 "performance_browser_tests": BenchmarkMetadata(
833 "hubbe@chromium.org, justinlin@chromium.org, miu@chromium.org", None)
834 }
835
836
837 def update_benchmark_csv():
838 """Updates go/chrome-benchmarks.
839
840 Updates telemetry/perf/benchmark.csv containing the current benchmark names,
841 owners, and components.
842 """
843 benchmark_list = current_benchmarks(
844 use_whitelist = False, use_blacklist = False)
845
846 header_data = [['AUTOGENERATED FILE DO NOT EDIT'],
847 ['See //tools/perf/generate_perf_data.py to make changes'],
848 ['Benchmark name', 'Individual owners', 'Component']
849 ]
850 data = []
851 for benchmark_name in NON_TELEMETRY_BENCHMARKS:
852 data.append([
853 benchmark_name,
854 NON_TELEMETRY_BENCHMARKS[benchmark_name].emails,
855 NON_TELEMETRY_BENCHMARKS[benchmark_name].component
856 ])
857 for benchmark in benchmark_list:
858 emails = decorators.GetEmails(benchmark)
859 if emails:
860 emails = ", ".join(emails)
861 data.append([
862 benchmark.Name(),
863 emails,
864 decorators.GetComponent(benchmark)
865 ])
866
867 data = sorted(data, key=lambda b: b[0])
868 data = header_data + data
869 perf_dir = os.path.join(src_dir(), 'tools', 'perf')
870 benchmark_file = os.path.join(perf_dir, 'benchmark.csv')
871 with open(benchmark_file, 'wb') as f:
872 writer = csv.writer(f)
873 writer.writerows(data)
874
875
816 def main(args): 876 def main(args):
817 parser = argparse.ArgumentParser( 877 parser = argparse.ArgumentParser(
818 description=('Generate perf test\' json config. This need to be done ' 878 description=('Generate perf test\' json config and benchmark.csv. '
819 'anytime you add/remove any existing benchmarks in ' 879 'This needs to be done anytime you add/remove any existing'
820 'tools/perf/benchmarks.')) 880 'benchmarks in tools/perf/benchmarks.'))
821 parser.add_argument( 881 parser.add_argument(
822 '--validate-only', action='store_true', default=False, 882 '--validate-only', action='store_true', default=False,
823 help=('Validate whether the perf json generated will be the same as the ' 883 help=('Validate whether the perf json generated will be the same as the '
824 'existing configs. This does not change the contain of existing ' 884 'existing configs. This does not change the contain of existing '
825 'configs')) 885 'configs'))
826 options = parser.parse_args(args) 886 options = parser.parse_args(args)
827 887
828 waterfall = get_waterfall_config() 888 waterfall = get_waterfall_config()
829 waterfall['name'] = 'chromium.perf' 889 waterfall['name'] = 'chromium.perf'
830 fyi_waterfall = get_fyi_waterfall_config() 890 fyi_waterfall = get_fyi_waterfall_config()
831 fyi_waterfall['name'] = 'chromium.perf.fyi' 891 fyi_waterfall['name'] = 'chromium.perf.fyi'
832 892
833 if options.validate_only: 893 if options.validate_only:
834 if tests_are_up_to_date(fyi_waterfall) and tests_are_up_to_date(waterfall): 894 if tests_are_up_to_date(fyi_waterfall) and tests_are_up_to_date(waterfall):
835 print 'All the perf JSON config files are up-to-date. \\o/' 895 print 'All the perf JSON config files are up-to-date. \\o/'
836 return 0 896 return 0
837 else: 897 else:
838 print ('The perf JSON config files are not up-to-date. Please run %s ' 898 print ('The perf JSON config files are not up-to-date. Please run %s '
839 'without --validate-only flag to update the perf JSON ' 899 'without --validate-only flag to update the perf JSON '
840 'configs.') % sys.argv[0] 900 'configs.') % sys.argv[0]
841 return 1 901 return 1
842 else: 902 else:
843 update_all_tests(fyi_waterfall) 903 update_all_tests(fyi_waterfall)
844 update_all_tests(waterfall) 904 update_all_tests(waterfall)
905 update_benchmark_csv()
845 return 0 906 return 0
OLDNEW
« tools/perf/benchmark.csv ('K') | « tools/perf/benchmark.csv ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698