Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
| 2 # Copyright 2016 The Chromium Authors. All rights reserved. | 2 # Copyright 2016 The Chromium Authors. All rights reserved. |
| 3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
| 4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
| 5 | 5 |
| 6 # pylint: disable=too-many-lines | 6 # pylint: disable=too-many-lines |
| 7 | 7 |
| 8 """Script to generate chromium.perf.json and chromium.perf.fyi.json in | 8 """Script to generate chromium.perf.json and chromium.perf.fyi.json in |
| 9 the src/testing/buildbot directory and benchmark.csv in the src/tools/perf | 9 the src/testing/buildbot directory and benchmark.csv in the src/tools/perf |
| 10 directory. Maintaining these files by hand is too unwieldy. | 10 directory. Maintaining these files by hand is too unwieldy. |
| (...skipping 835 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 846 return tests | 846 return tests |
| 847 | 847 |
| 848 | 848 |
| 849 def get_json_config_file_for_waterfall(waterfall): | 849 def get_json_config_file_for_waterfall(waterfall): |
| 850 filename = '%s.json' % waterfall['name'] | 850 filename = '%s.json' % waterfall['name'] |
| 851 buildbot_dir = os.path.join( | 851 buildbot_dir = os.path.join( |
| 852 path_util.GetChromiumSrcDir(), 'testing', 'buildbot') | 852 path_util.GetChromiumSrcDir(), 'testing', 'buildbot') |
| 853 return os.path.join(buildbot_dir, filename) | 853 return os.path.join(buildbot_dir, filename) |
| 854 | 854 |
| 855 | 855 |
| 856 def get_extras_json_config_file_for_waterfall(waterfall): | |
| 857 filename = '%s.extras.json' % waterfall['name'] | |
| 858 buildbot_dir = os.path.join(path_util.GetChromiumSrcDir(), 'tools', 'perf') | |
| 859 return os.path.join(buildbot_dir, filename) | |
| 860 | |
| 861 | |
| 862 def append_extra_tests(waterfall, tests): | |
|
martiniss
2017/07/06 22:11:22
Can you add a small docstring here?
yzshen1
2017/07/07 18:02:44
Done.
| |
| 863 extra_config_file = get_extras_json_config_file_for_waterfall(waterfall) | |
| 864 if os.path.isfile(extra_config_file): | |
| 865 with open(extra_config_file) as extra_fp: | |
| 866 extra_tests = json.load(extra_fp) | |
| 867 for key, value in extra_tests.iteritems(): | |
| 868 assert key not in tests | |
| 869 tests[key] = value | |
| 870 | |
| 871 | |
| 856 def tests_are_up_to_date(waterfalls): | 872 def tests_are_up_to_date(waterfalls): |
| 857 up_to_date = True | 873 up_to_date = True |
| 858 all_tests = {} | 874 all_tests = {} |
| 859 for w in waterfalls: | 875 for w in waterfalls: |
| 860 tests = generate_all_tests(w) | 876 tests = generate_all_tests(w) |
| 877 # Note: |all_tests| don't cover those manually-specified tests added by | |
| 878 # append_extra_tests(). | |
| 879 all_tests.update(tests) | |
| 880 append_extra_tests(w, tests) | |
| 861 tests_data = json.dumps(tests, indent=2, separators=(',', ': '), | 881 tests_data = json.dumps(tests, indent=2, separators=(',', ': '), |
| 862 sort_keys=True) | 882 sort_keys=True) |
| 863 config_file = get_json_config_file_for_waterfall(w) | 883 config_file = get_json_config_file_for_waterfall(w) |
| 864 with open(config_file, 'r') as fp: | 884 with open(config_file, 'r') as fp: |
| 865 config_data = fp.read().strip() | 885 config_data = fp.read().strip() |
| 866 all_tests.update(tests) | |
| 867 up_to_date &= tests_data == config_data | 886 up_to_date &= tests_data == config_data |
| 868 verify_all_tests_in_benchmark_csv(all_tests, | 887 verify_all_tests_in_benchmark_csv(all_tests, |
| 869 get_all_waterfall_benchmarks_metadata()) | 888 get_all_waterfall_benchmarks_metadata()) |
| 870 return up_to_date | 889 return up_to_date |
| 871 | 890 |
| 872 | 891 |
| 873 def update_all_tests(waterfalls): | 892 def update_all_tests(waterfalls): |
| 874 all_tests = {} | 893 all_tests = {} |
| 875 for w in waterfalls: | 894 for w in waterfalls: |
| 876 tests = generate_all_tests(w) | 895 tests = generate_all_tests(w) |
| 896 # Note: |all_tests| don't cover those manually-specified tests added by | |
| 897 # append_extra_tests(). | |
| 898 all_tests.update(tests) | |
| 899 append_extra_tests(w, tests) | |
| 877 config_file = get_json_config_file_for_waterfall(w) | 900 config_file = get_json_config_file_for_waterfall(w) |
| 878 with open(config_file, 'w') as fp: | 901 with open(config_file, 'w') as fp: |
| 879 json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True) | 902 json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True) |
| 880 fp.write('\n') | 903 fp.write('\n') |
| 881 all_tests.update(tests) | |
| 882 verify_all_tests_in_benchmark_csv(all_tests, | 904 verify_all_tests_in_benchmark_csv(all_tests, |
| 883 get_all_waterfall_benchmarks_metadata()) | 905 get_all_waterfall_benchmarks_metadata()) |
| 884 | 906 |
| 885 | 907 |
| 886 # not_scheduled means this test is not scheduled on any of the chromium.perf | 908 # not_scheduled means this test is not scheduled on any of the chromium.perf |
| 887 # waterfalls. Right now, all the below benchmarks are scheduled, but some other | 909 # waterfalls. Right now, all the below benchmarks are scheduled, but some other |
| 888 # benchmarks are not scheduled, because they're disabled on all platforms. | 910 # benchmarks are not scheduled, because they're disabled on all platforms. |
| 889 BenchmarkMetadata = collections.namedtuple( | 911 BenchmarkMetadata = collections.namedtuple( |
| 890 'BenchmarkMetadata', 'emails component not_scheduled') | 912 'BenchmarkMetadata', 'emails component not_scheduled') |
| 891 NON_TELEMETRY_BENCHMARKS = { | 913 NON_TELEMETRY_BENCHMARKS = { |
| (...skipping 160 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1052 return 0 | 1074 return 0 |
| 1053 else: | 1075 else: |
| 1054 print ('The perf JSON config files are not up-to-date. Please run %s ' | 1076 print ('The perf JSON config files are not up-to-date. Please run %s ' |
| 1055 'without --validate-only flag to update the perf JSON ' | 1077 'without --validate-only flag to update the perf JSON ' |
| 1056 'configs and benchmark.csv.') % sys.argv[0] | 1078 'configs and benchmark.csv.') % sys.argv[0] |
| 1057 return 1 | 1079 return 1 |
| 1058 else: | 1080 else: |
| 1059 update_all_tests([fyi_waterfall, waterfall]) | 1081 update_all_tests([fyi_waterfall, waterfall]) |
| 1060 update_benchmark_csv() | 1082 update_benchmark_csv() |
| 1061 return 0 | 1083 return 0 |
| OLD | NEW |