OLD | NEW |
---|---|
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 # Copyright 2016 The Chromium Authors. All rights reserved. | 2 # Copyright 2016 The Chromium Authors. All rights reserved. |
3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
5 | 5 |
6 """Script to generate chromium.perf.json and chromium.perf.fyi.json in | 6 """Script to generate chromium.perf.json and chromium.perf.fyi.json in |
7 the src/testing/buildbot directory. Maintaining these files by hand is | 7 the src/testing/buildbot directory and benchmark.csv in the src/tools/perf |
8 too unwieldy. | 8 directory. Maintaining these files by hand is too unwieldy. |
9 """ | 9 """ |
10 import argparse | 10 import argparse |
11 import collections | |
12 import csv | |
11 import json | 13 import json |
12 import os | 14 import os |
15 import re | |
13 import sys | 16 import sys |
17 import sets | |
14 | 18 |
15 from chrome_telemetry_build import chromium_config | 19 from chrome_telemetry_build import chromium_config |
16 | 20 |
17 sys.path.append(chromium_config.GetTelemetryDir()) | 21 sys.path.append(chromium_config.GetTelemetryDir()) |
18 from telemetry import benchmark as benchmark_module | 22 from telemetry import benchmark as benchmark_module |
23 from telemetry import decorators | |
19 from telemetry.core import discover | 24 from telemetry.core import discover |
20 from telemetry.util import bot_utils | 25 from telemetry.util import bot_utils |
21 | 26 |
22 | 27 |
23 SCRIPT_TESTS = [ | 28 SCRIPT_TESTS = [ |
24 { | 29 { |
25 'args': [ | 30 'args': [ |
26 'gpu_perftests', | 31 'gpu_perftests', |
27 '--adb-path', | 32 '--adb-path', |
28 'src/third_party/catapult/devil/bin/deps/linux2/x86_64/bin/adb', | 33 'src/third_party/catapult/devil/bin/deps/linux2/x86_64/bin/adb', |
(...skipping 763 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
792 def tests_are_up_to_date(waterfall): | 797 def tests_are_up_to_date(waterfall): |
793 tests = generate_all_tests(waterfall) | 798 tests = generate_all_tests(waterfall) |
794 tests_data = json.dumps(tests, indent=2, separators=(',', ': '), | 799 tests_data = json.dumps(tests, indent=2, separators=(',', ': '), |
795 sort_keys=True) | 800 sort_keys=True) |
796 config_file = get_json_config_file_for_waterfall(waterfall) | 801 config_file = get_json_config_file_for_waterfall(waterfall) |
797 with open(config_file, 'r') as fp: | 802 with open(config_file, 'r') as fp: |
798 config_data = fp.read().strip() | 803 config_data = fp.read().strip() |
799 return tests_data == config_data | 804 return tests_data == config_data |
800 | 805 |
801 | 806 |
802 def update_all_tests(waterfall): | 807 def update_all_tests(waterfalls): |
803 tests = generate_all_tests(waterfall) | 808 all_tests = {} |
804 config_file = get_json_config_file_for_waterfall(waterfall) | 809 for w in waterfalls: |
805 with open(config_file, 'w') as fp: | 810 tests = generate_all_tests(w) |
806 json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True) | 811 config_file = get_json_config_file_for_waterfall(w) |
807 fp.write('\n') | 812 with open(config_file, 'w') as fp: |
813 json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True) | |
814 fp.write('\n') | |
815 all_tests.update(tests) | |
816 verify_all_tests_in_benchmark_csv(all_tests) | |
808 | 817 |
809 | 818 |
810 def src_dir(): | 819 def src_dir(): |
811 file_path = os.path.abspath(__file__) | 820 file_path = os.path.abspath(__file__) |
812 return os.path.dirname(os.path.dirname( | 821 return os.path.dirname(os.path.dirname( |
813 os.path.dirname(os.path.dirname(file_path)))) | 822 os.path.dirname(os.path.dirname(file_path)))) |
814 | 823 |
815 | 824 |
825 BenchmarkMetadata = collections.namedtuple( | |
826 'BenchmarkMetadata', 'emails component') | |
827 NON_TELEMETRY_BENCHMARKS = { | |
828 "angle_perftests": BenchmarkMetadata("jmadill@chromium.org", None), | |
829 "cc_perftests": BenchmarkMetadata("enne@chromium.org", None), | |
830 "gpu_perftests": BenchmarkMetadata("reveman@chromium.org", None), | |
831 "tracing_perftests": BenchmarkMetadata( | |
832 "kkraynov@chromium.org, primiano@chromium.org", None), | |
833 "load_library_perf_tests": BenchmarkMetadata(None, None), | |
834 "media_perftests": BenchmarkMetadata("crouleau@chromium.org", None), | |
835 "performance_browser_tests": BenchmarkMetadata( | |
836 "hubbe@chromium.org, justinlin@chromium.org, miu@chromium.org", None) | |
837 } | |
838 | |
839 | |
840 def get_benchmark_metadata(): | |
nednguyen
2017/03/21 19:02:04
nits: get_all_benchmarks_metadata()
Also can you
ashleymarie1
2017/03/21 19:58:55
Done.
| |
841 metadata = NON_TELEMETRY_BENCHMARKS | |
842 benchmark_list = current_benchmarks(False) | |
843 | |
844 for benchmark in benchmark_list: | |
845 emails = decorators.GetEmails(benchmark) | |
846 if emails: | |
847 emails = ", ".join(emails) | |
848 metadata[benchmark.Name()] = BenchmarkMetadata( | |
849 emails, decorators.GetComponent(benchmark)) | |
850 return metadata | |
851 | |
852 | |
853 def verify_all_tests_in_benchmark_csv(tests): | |
nednguyen
2017/03/21 19:02:05
make this "def verify_all_tests_in_benchmark_csv(t
ashleymarie1
2017/03/21 19:58:55
Done.
| |
854 benchmark_metadata = get_benchmark_metadata() | |
855 benchmark_names = sets.Set(benchmark_metadata) | |
856 test_names = sets.Set() | |
857 for t in tests: | |
858 scripts = [] | |
859 if 'isolated_scripts' in tests[t]: | |
860 scripts = tests[t]['isolated_scripts'] | |
861 elif 'scripts' in tests[t]: | |
862 scripts = tests[t]['scripts'] | |
863 else: | |
864 assert('Android Compile' == t | |
nednguyen
2017/03/21 19:02:05
+1 I like this assertion to make sure that people
ashleymarie1
2017/03/21 19:58:55
Done.
| |
865 or 'Android arm64 Compile' == t | |
866 or t.startswith('AAAAA')) | |
867 for s in scripts: | |
868 name = s['name'] | |
869 name = re.sub('\\.reference$', '', name) | |
870 test_names.add(name) | |
871 | |
872 for test in benchmark_names - test_names: | |
873 print 'remove ' + test + ' from NON_TELEMETRY_BENCHMARKS' | |
nednguyen
2017/03/21 19:02:05
Instead of printing the message here, you can do:
nednguyen
2017/03/21 19:02:05
nits: "Remove"
ashleymarie1
2017/03/21 19:58:55
Done.
| |
874 for test in test_names - benchmark_names: | |
875 print 'add ' + test + ' to NON_TELEMETRY_BENCHMARKS' | |
nednguyen
2017/03/21 19:02:05
nits "Add"
ashleymarie1
2017/03/21 19:58:55
Done.
| |
876 | |
877 assert benchmark_names == test_names, 'Please update NON_TELEMETRY_BENCHMARKS' | |
878 | |
879 | |
880 def update_benchmark_csv(): | |
881 """Updates go/chrome-benchmarks. | |
882 | |
883 Updates telemetry/perf/benchmark.csv containing the current benchmark names, | |
884 owners, and components. | |
885 """ | |
886 header_data = [['AUTOGENERATED FILE DO NOT EDIT'], | |
887 ['See //tools/perf/generate_perf_data.py to make changes'], | |
888 ['Benchmark name', 'Individual owners', 'Component'] | |
889 ] | |
890 | |
891 csv_data = [] | |
892 benchmark_metadata = get_benchmark_metadata() | |
893 for benchmark_name in benchmark_metadata: | |
894 csv_data.append([ | |
895 benchmark_name, | |
896 benchmark_metadata[benchmark_name].emails, | |
897 benchmark_metadata[benchmark_name].component | |
898 ]) | |
899 | |
900 csv_data = sorted(csv_data, key=lambda b: b[0]) | |
901 csv_data = header_data + csv_data | |
902 | |
903 perf_dir = os.path.join(src_dir(), 'tools', 'perf') | |
904 benchmark_file = os.path.join(perf_dir, 'benchmark.csv') | |
905 with open(benchmark_file, 'wb') as f: | |
906 writer = csv.writer(f) | |
907 writer.writerows(csv_data) | |
908 | |
909 | |
816 def main(args): | 910 def main(args): |
817 parser = argparse.ArgumentParser( | 911 parser = argparse.ArgumentParser( |
818 description=('Generate perf test\' json config. This need to be done ' | 912 description=('Generate perf test\' json config and benchmark.csv. ' |
819 'anytime you add/remove any existing benchmarks in ' | 913 'This needs to be done anytime you add/remove any existing' |
820 'tools/perf/benchmarks.')) | 914 'benchmarks in tools/perf/benchmarks.')) |
821 parser.add_argument( | 915 parser.add_argument( |
822 '--validate-only', action='store_true', default=False, | 916 '--validate-only', action='store_true', default=False, |
823 help=('Validate whether the perf json generated will be the same as the ' | 917 help=('Validate whether the perf json generated will be the same as the ' |
824 'existing configs. This does not change the contain of existing ' | 918 'existing configs. This does not change the contain of existing ' |
825 'configs')) | 919 'configs')) |
826 options = parser.parse_args(args) | 920 options = parser.parse_args(args) |
827 | 921 |
828 waterfall = get_waterfall_config() | 922 waterfall = get_waterfall_config() |
829 waterfall['name'] = 'chromium.perf' | 923 waterfall['name'] = 'chromium.perf' |
830 fyi_waterfall = get_fyi_waterfall_config() | 924 fyi_waterfall = get_fyi_waterfall_config() |
831 fyi_waterfall['name'] = 'chromium.perf.fyi' | 925 fyi_waterfall['name'] = 'chromium.perf.fyi' |
832 | 926 |
833 if options.validate_only: | 927 if options.validate_only: |
834 if tests_are_up_to_date(fyi_waterfall) and tests_are_up_to_date(waterfall): | 928 if tests_are_up_to_date(fyi_waterfall) and tests_are_up_to_date(waterfall): |
835 print 'All the perf JSON config files are up-to-date. \\o/' | 929 print 'All the perf JSON config files are up-to-date. \\o/' |
836 return 0 | 930 return 0 |
837 else: | 931 else: |
838 print ('The perf JSON config files are not up-to-date. Please run %s ' | 932 print ('The perf JSON config files are not up-to-date. Please run %s ' |
839 'without --validate-only flag to update the perf JSON ' | 933 'without --validate-only flag to update the perf JSON ' |
840 'configs.') % sys.argv[0] | 934 'configs and benchmark.csv.') % sys.argv[0] |
841 return 1 | 935 return 1 |
842 else: | 936 else: |
843 update_all_tests(fyi_waterfall) | 937 update_all_tests([fyi_waterfall, waterfall]) |
844 update_all_tests(waterfall) | 938 update_benchmark_csv() |
845 return 0 | 939 return 0 |
OLD | NEW |