Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(61)

Side by Side Diff: tools/perf/core/perf_data_generator.py

Issue 2973733002: Enable loading.desktop benchmark with network service enabled on perf fyi bot. (Closed)
Patch Set: . Created 3 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright 2016 The Chromium Authors. All rights reserved. 2 # Copyright 2016 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 # pylint: disable=too-many-lines 6 # pylint: disable=too-many-lines
7 7
8 """Script to generate chromium.perf.json and chromium.perf.fyi.json in 8 """Script to generate chromium.perf.json and chromium.perf.fyi.json in
9 the src/testing/buildbot directory and benchmark.csv in the src/tools/perf 9 the src/testing/buildbot directory and benchmark.csv in the src/tools/perf
10 directory. Maintaining these files by hand is too unwieldy. 10 directory. Maintaining these files by hand is too unwieldy.
(...skipping 829 matching lines...) Expand 10 before | Expand all | Expand 10 after
840 return tests 840 return tests
841 841
842 842
843 def get_json_config_file_for_waterfall(waterfall): 843 def get_json_config_file_for_waterfall(waterfall):
844 filename = '%s.json' % waterfall['name'] 844 filename = '%s.json' % waterfall['name']
845 buildbot_dir = os.path.join( 845 buildbot_dir = os.path.join(
846 path_util.GetChromiumSrcDir(), 'testing', 'buildbot') 846 path_util.GetChromiumSrcDir(), 'testing', 'buildbot')
847 return os.path.join(buildbot_dir, filename) 847 return os.path.join(buildbot_dir, filename)
848 848
849 849
850 def get_extras_json_config_file_for_waterfall(waterfall):
851 filename = '%s.extras.json' % waterfall['name']
852 buildbot_dir = os.path.join(path_util.GetChromiumSrcDir(), 'tools', 'perf')
853 return os.path.join(buildbot_dir, filename)
854
855
856 def append_extra_tests(waterfall, tests):
857 """Appends extra tests to |tests|.
858
859 Those extra tests are loaded from tools/perf/<waterfall name>.extras.json.
860 """
861 extra_config_file = get_extras_json_config_file_for_waterfall(waterfall)
862 if os.path.isfile(extra_config_file):
863 with open(extra_config_file) as extra_fp:
864 extra_tests = json.load(extra_fp)
865 for key, value in extra_tests.iteritems():
866 if key == 'comment':
867 continue
868 assert key not in tests
869 tests[key] = value
870
871
850 def tests_are_up_to_date(waterfalls): 872 def tests_are_up_to_date(waterfalls):
851 up_to_date = True 873 up_to_date = True
852 all_tests = {} 874 all_tests = {}
853 for w in waterfalls: 875 for w in waterfalls:
854 tests = generate_all_tests(w) 876 tests = generate_all_tests(w)
877 # Note: |all_tests| don't cover those manually-specified tests added by
878 # append_extra_tests().
879 all_tests.update(tests)
880 append_extra_tests(w, tests)
855 tests_data = json.dumps(tests, indent=2, separators=(',', ': '), 881 tests_data = json.dumps(tests, indent=2, separators=(',', ': '),
856 sort_keys=True) 882 sort_keys=True)
857 config_file = get_json_config_file_for_waterfall(w) 883 config_file = get_json_config_file_for_waterfall(w)
858 with open(config_file, 'r') as fp: 884 with open(config_file, 'r') as fp:
859 config_data = fp.read().strip() 885 config_data = fp.read().strip()
860 all_tests.update(tests)
861 up_to_date &= tests_data == config_data 886 up_to_date &= tests_data == config_data
862 verify_all_tests_in_benchmark_csv(all_tests, 887 verify_all_tests_in_benchmark_csv(all_tests,
863 get_all_waterfall_benchmarks_metadata()) 888 get_all_waterfall_benchmarks_metadata())
864 return up_to_date 889 return up_to_date
865 890
866 891
867 def update_all_tests(waterfalls): 892 def update_all_tests(waterfalls):
868 all_tests = {} 893 all_tests = {}
869 for w in waterfalls: 894 for w in waterfalls:
870 tests = generate_all_tests(w) 895 tests = generate_all_tests(w)
896 # Note: |all_tests| don't cover those manually-specified tests added by
897 # append_extra_tests().
898 all_tests.update(tests)
899 append_extra_tests(w, tests)
871 config_file = get_json_config_file_for_waterfall(w) 900 config_file = get_json_config_file_for_waterfall(w)
872 with open(config_file, 'w') as fp: 901 with open(config_file, 'w') as fp:
873 json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True) 902 json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True)
874 fp.write('\n') 903 fp.write('\n')
875 all_tests.update(tests)
876 verify_all_tests_in_benchmark_csv(all_tests, 904 verify_all_tests_in_benchmark_csv(all_tests,
877 get_all_waterfall_benchmarks_metadata()) 905 get_all_waterfall_benchmarks_metadata())
878 906
879 907
880 # not_scheduled means this test is not scheduled on any of the chromium.perf 908 # not_scheduled means this test is not scheduled on any of the chromium.perf
881 # waterfalls. Right now, all the below benchmarks are scheduled, but some other 909 # waterfalls. Right now, all the below benchmarks are scheduled, but some other
882 # benchmarks are not scheduled, because they're disabled on all platforms. 910 # benchmarks are not scheduled, because they're disabled on all platforms.
883 BenchmarkMetadata = collections.namedtuple( 911 BenchmarkMetadata = collections.namedtuple(
884 'BenchmarkMetadata', 'emails component not_scheduled') 912 'BenchmarkMetadata', 'emails component not_scheduled')
885 NON_TELEMETRY_BENCHMARKS = { 913 NON_TELEMETRY_BENCHMARKS = {
(...skipping 160 matching lines...) Expand 10 before | Expand all | Expand 10 after
1046 return 0 1074 return 0
1047 else: 1075 else:
1048 print ('The perf JSON config files are not up-to-date. Please run %s ' 1076 print ('The perf JSON config files are not up-to-date. Please run %s '
1049 'without --validate-only flag to update the perf JSON ' 1077 'without --validate-only flag to update the perf JSON '
1050 'configs and benchmark.csv.') % sys.argv[0] 1078 'configs and benchmark.csv.') % sys.argv[0]
1051 return 1 1079 return 1
1052 else: 1080 else:
1053 update_all_tests([fyi_waterfall, waterfall]) 1081 update_all_tests([fyi_waterfall, waterfall])
1054 update_benchmark_csv() 1082 update_benchmark_csv()
1055 return 0 1083 return 0
OLDNEW
« no previous file with comments | « tools/perf/contrib/network_service/loading.py ('k') | tools/perf/core/perf_data_generator_unittest.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698