Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(175)

Side by Side Diff: tools/perf/core/perf_json_generator.py

Issue 2754883002: Generating benchmark.csv file (Closed)
Patch Set: Fixing silly Python mistake Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« tools/perf/benchmark.csv ('K') | « tools/perf/benchmark.csv ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright 2016 The Chromium Authors. All rights reserved. 2 # Copyright 2016 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """Script to generate chromium.perf.json and chromium.perf.fyi.json in 6 """Script to generate chromium.perf.json and chromium.perf.fyi.json in
7 the src/testing/buildbot directory. Maintaining these files by hand is 7 the src/testing/buildbot directory and benchmark.csv in the src/tools/perf
8 too unwieldy. 8 directory. Maintaining these files by hand is too unwieldy.
9 """ 9 """
10 import argparse 10 import argparse
11 import csv
11 import json 12 import json
12 import os 13 import os
13 import sys 14 import sys
14 15
15 from chrome_telemetry_build import chromium_config 16 from chrome_telemetry_build import chromium_config
16 17
17 sys.path.append(chromium_config.GetTelemetryDir()) 18 sys.path.append(chromium_config.GetTelemetryDir())
18 from telemetry import benchmark as benchmark_module 19 from telemetry import benchmark as benchmark_module
20 from telemetry import decorators
19 from telemetry.core import discover 21 from telemetry.core import discover
20 from telemetry.util import bot_utils 22 from telemetry.util import bot_utils
21 23
22 24
23 SCRIPT_TESTS = [ 25 SCRIPT_TESTS = [
24 { 26 {
25 'args': [ 27 'args': [
26 'gpu_perftests', 28 'gpu_perftests',
27 '--adb-path', 29 '--adb-path',
28 'src/third_party/catapult/devil/bin/deps/linux2/x86_64/bin/adb', 30 'src/third_party/catapult/devil/bin/deps/linux2/x86_64/bin/adb',
(...skipping 618 matching lines...) Expand 10 before | Expand all | Expand 10 after
647 } 649 }
648 650
649 # Certain swarming bots are not sharding correctly with the new device affinity 651 # Certain swarming bots are not sharding correctly with the new device affinity
650 # algorithm. Reverting to legacy algorithm to try and get them to complete. 652 # algorithm. Reverting to legacy algorithm to try and get them to complete.
651 # See crbug.com/670284 653 # See crbug.com/670284
652 LEGACY_DEVICE_AFFIINITY_ALGORITHM = [ 654 LEGACY_DEVICE_AFFIINITY_ALGORITHM = [
653 'Win Zenbook Perf', 655 'Win Zenbook Perf',
654 'Win 10 High-DPI Perf', 656 'Win 10 High-DPI Perf',
655 ] 657 ]
656 658
657 def current_benchmarks(use_whitelist): 659 def current_benchmarks(use_whitelist, use_blacklist = True):
658 benchmarks_dir = os.path.join(src_dir(), 'tools', 'perf', 'benchmarks') 660 benchmarks_dir = os.path.join(src_dir(), 'tools', 'perf', 'benchmarks')
659 top_level_dir = os.path.dirname(benchmarks_dir) 661 top_level_dir = os.path.dirname(benchmarks_dir)
660 662
661 all_benchmarks = discover.DiscoverClasses( 663 all_benchmarks = discover.DiscoverClasses(
662 benchmarks_dir, top_level_dir, benchmark_module.Benchmark, 664 benchmarks_dir, top_level_dir, benchmark_module.Benchmark,
663 index_by_class_name=True).values() 665 index_by_class_name=True).values()
664 # Remove all blacklisted benchmarks 666
665 for blacklisted in BENCHMARK_NAME_BLACKLIST: 667 if use_blacklist:
666 for benchmark in all_benchmarks: 668 # Remove all blacklisted benchmarks
667 if benchmark.Name() == blacklisted: 669 for blacklisted in BENCHMARK_NAME_BLACKLIST:
668 all_benchmarks.remove(benchmark) 670 for benchmark in all_benchmarks:
669 break 671 if benchmark.Name() == blacklisted:
672 all_benchmarks.remove(benchmark)
673 break
670 674
671 if use_whitelist: 675 if use_whitelist:
672 all_benchmarks = ( 676 all_benchmarks = (
673 bench for bench in all_benchmarks 677 bench for bench in all_benchmarks
674 if bench.Name() in BENCHMARK_NAME_WHITELIST) 678 if bench.Name() in BENCHMARK_NAME_WHITELIST)
675 return sorted(all_benchmarks, key=lambda b: b.Name()) 679 return sorted(all_benchmarks, key=lambda b: b.Name())
676 680
677 681
678 # Returns a sorted list of (benchmark, avg_runtime) pairs for every 682 # Returns a sorted list of (benchmark, avg_runtime) pairs for every
679 # benchmark in the all_benchmarks list where avg_runtime is in seconds. Also 683 # benchmark in the all_benchmarks list where avg_runtime is in seconds. Also
(...skipping 126 matching lines...) Expand 10 before | Expand all | Expand 10 after
806 json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True) 810 json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True)
807 fp.write('\n') 811 fp.write('\n')
808 812
809 813
810 def src_dir(): 814 def src_dir():
811 file_path = os.path.abspath(__file__) 815 file_path = os.path.abspath(__file__)
812 return os.path.dirname(os.path.dirname( 816 return os.path.dirname(os.path.dirname(
813 os.path.dirname(os.path.dirname(file_path)))) 817 os.path.dirname(os.path.dirname(file_path))))
814 818
815 819
820 MANUAL_BENCHMARKS = [
nednguyen 2017/03/16 19:11:18 Let name this NON_TElEMETRY_BENCHMARKS. Also can y
ashleymarie1 2017/03/16 19:40:51 Done. I had never heard of namedtuple before today
821 ["angle_perftests", "jmadill@chromium.org", None],
822 ["cc_perftest", "enne@chromium.org", None],
823 ["gpu_perftests", "reveman@chromium.org", None],
824 ["tracing_perftests", "kkraynov@chromium.org, primiano@chromium.org",
825 None],
826 ["indexeddb_perf", "cmumford@chromium.org", None],
827 ["load_library_perf_tests", None, None],
828 ["media_perftests", "crouleau@chromium.org", None],
829 ["performance_browser_tests",
830 "hubbe@chromium.org, justinlin@chromium.org, miu@chromium.org", None]
831 ]
832
833
834 def update_benchmark_csv():
835 """Updates go/chrome-benchmarks.
836
837 Updates telemetry/perf/benchmark.csv containing the current benchmark names,
838 owners, and components.
839 """
840 benchmark_list = current_benchmarks(
841 use_whitelist = False, use_blacklist = False)
842 data = MANUAL_BENCHMARKS
843 for benchmark in benchmark_list:
844 emails = decorators.GetEmails(benchmark)
845 if emails:
846 emails = ", ".join(emails)
847 data.append([
848 benchmark.Name(),
849 emails,
850 decorators.GetComponent(benchmark)
851 ])
852
853 data = sorted(data, key=lambda b: b[0])
854
855 perf_dir = os.path.join(src_dir(), 'tools', 'perf')
856 benchmark_file = os.path.join(perf_dir, 'benchmark.csv')
857 with open(benchmark_file, 'wb') as f:
858 writer = csv.writer(f)
859 writer.writerows(data)
860
861
816 def main(args): 862 def main(args):
817 parser = argparse.ArgumentParser( 863 parser = argparse.ArgumentParser(
818 description=('Generate perf test\' json config. This need to be done ' 864 description=('Generate perf test\' json config. This need to be done '
819 'anytime you add/remove any existing benchmarks in ' 865 'anytime you add/remove any existing benchmarks in '
nednguyen 2017/03/16 19:11:18 The description needs update
ashleymarie1 2017/03/16 19:40:51 Done.
820 'tools/perf/benchmarks.')) 866 'tools/perf/benchmarks.'))
821 parser.add_argument( 867 parser.add_argument(
822 '--validate-only', action='store_true', default=False, 868 '--validate-only', action='store_true', default=False,
823 help=('Validate whether the perf json generated will be the same as the ' 869 help=('Validate whether the perf json generated will be the same as the '
824 'existing configs. This does not change the contain of existing ' 870 'existing configs. This does not change the contain of existing '
825 'configs')) 871 'configs'))
826 options = parser.parse_args(args) 872 options = parser.parse_args(args)
827 873
874 update_benchmark_csv()
nednguyen 2017/03/16 19:11:18 If --validate-only flag is enabled, we should not
ashleymarie1 2017/03/16 19:40:51 Done.
875
828 waterfall = get_waterfall_config() 876 waterfall = get_waterfall_config()
829 waterfall['name'] = 'chromium.perf' 877 waterfall['name'] = 'chromium.perf'
830 fyi_waterfall = get_fyi_waterfall_config() 878 fyi_waterfall = get_fyi_waterfall_config()
831 fyi_waterfall['name'] = 'chromium.perf.fyi' 879 fyi_waterfall['name'] = 'chromium.perf.fyi'
832 880
833 if options.validate_only: 881 if options.validate_only:
834 if tests_are_up_to_date(fyi_waterfall) and tests_are_up_to_date(waterfall): 882 if tests_are_up_to_date(fyi_waterfall) and tests_are_up_to_date(waterfall):
835 print 'All the perf JSON config files are up-to-date. \\o/' 883 print 'All the perf JSON config files are up-to-date. \\o/'
836 return 0 884 return 0
837 else: 885 else:
838 print ('The perf JSON config files are not up-to-date. Please run %s ' 886 print ('The perf JSON config files are not up-to-date. Please run %s '
839 'without --validate-only flag to update the perf JSON ' 887 'without --validate-only flag to update the perf JSON '
840 'configs.') % sys.argv[0] 888 'configs.') % sys.argv[0]
841 return 1 889 return 1
842 else: 890 else:
843 update_all_tests(fyi_waterfall) 891 update_all_tests(fyi_waterfall)
844 update_all_tests(waterfall) 892 update_all_tests(waterfall)
845 return 0 893 return 0
OLDNEW
« tools/perf/benchmark.csv ('K') | « tools/perf/benchmark.csv ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698