Chromium Code Reviews| Index: tools/perf/core/perf_json_generator.py |
| diff --git a/tools/perf/core/perf_json_generator.py b/tools/perf/core/perf_json_generator.py |
| index 5e908f6f53d8be4f045c53ba7daf1b58e89ebc84..4d4f7acaff07bb9b209158fa8d88b76f1a7e0805 100755 |
| --- a/tools/perf/core/perf_json_generator.py |
| +++ b/tools/perf/core/perf_json_generator.py |
| @@ -4,18 +4,23 @@ |
| # found in the LICENSE file. |
| """Script to generate chromium.perf.json and chromium.perf.fyi.json in |
| -the src/testing/buildbot directory. Maintaining these files by hand is |
| -too unwieldy. |
| +the src/testing/buildbot directory and benchmark.csv in the src/tools/perf |
| +directory. Maintaining these files by hand is too unwieldy. |
| """ |
| import argparse |
| +import collections |
| +import csv |
| import json |
| import os |
| +import re |
| import sys |
| +import sets |
| from chrome_telemetry_build import chromium_config |
| sys.path.append(chromium_config.GetTelemetryDir()) |
| from telemetry import benchmark as benchmark_module |
| +from telemetry import decorators |
| from telemetry.core import discover |
| from telemetry.util import bot_utils |
| @@ -799,12 +804,16 @@ def tests_are_up_to_date(waterfall): |
| return tests_data == config_data |
| -def update_all_tests(waterfall): |
| - tests = generate_all_tests(waterfall) |
| - config_file = get_json_config_file_for_waterfall(waterfall) |
| - with open(config_file, 'w') as fp: |
| - json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True) |
| - fp.write('\n') |
| +def update_all_tests(waterfalls): |
| + all_tests = {} |
| + for w in waterfalls: |
| + tests = generate_all_tests(w) |
| + config_file = get_json_config_file_for_waterfall(w) |
| + with open(config_file, 'w') as fp: |
| + json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True) |
| + fp.write('\n') |
| + all_tests.update(tests) |
| + verify_all_tests_in_benchmark_csv(all_tests) |
| def src_dir(): |
| @@ -813,11 +822,96 @@ def src_dir(): |
| os.path.dirname(os.path.dirname(file_path)))) |
| +BenchmarkMetadata = collections.namedtuple( |
| + 'BenchmarkMetadata', 'emails component') |
| +NON_TELEMETRY_BENCHMARKS = { |
| + "angle_perftests": BenchmarkMetadata("jmadill@chromium.org", None), |
| + "cc_perftests": BenchmarkMetadata("enne@chromium.org", None), |
| + "gpu_perftests": BenchmarkMetadata("reveman@chromium.org", None), |
| + "tracing_perftests": BenchmarkMetadata( |
| + "kkraynov@chromium.org, primiano@chromium.org", None), |
| + "load_library_perf_tests": BenchmarkMetadata(None, None), |
| + "media_perftests": BenchmarkMetadata("crouleau@chromium.org", None), |
| + "performance_browser_tests": BenchmarkMetadata( |
| + "hubbe@chromium.org, justinlin@chromium.org, miu@chromium.org", None) |
| +} |
| + |
| + |
| +def get_benchmark_metadata(): |
|
nednguyen
2017/03/21 19:02:04
nits: get_all_benchmarks_metadata()
Also can you
ashleymarie1
2017/03/21 19:58:55
Done.
|
| + metadata = NON_TELEMETRY_BENCHMARKS |
| + benchmark_list = current_benchmarks(False) |
| + |
| + for benchmark in benchmark_list: |
| + emails = decorators.GetEmails(benchmark) |
| + if emails: |
| + emails = ", ".join(emails) |
| + metadata[benchmark.Name()] = BenchmarkMetadata( |
| + emails, decorators.GetComponent(benchmark)) |
| + return metadata |
| + |
| + |
| +def verify_all_tests_in_benchmark_csv(tests): |
|
nednguyen
2017/03/21 19:02:05
make this "def verify_all_tests_in_benchmark_csv(t
ashleymarie1
2017/03/21 19:58:55
Done.
|
| + benchmark_metadata = get_benchmark_metadata() |
| + benchmark_names = sets.Set(benchmark_metadata) |
| + test_names = sets.Set() |
| + for t in tests: |
| + scripts = [] |
| + if 'isolated_scripts' in tests[t]: |
| + scripts = tests[t]['isolated_scripts'] |
| + elif 'scripts' in tests[t]: |
| + scripts = tests[t]['scripts'] |
| + else: |
| + assert('Android Compile' == t |
|
nednguyen
2017/03/21 19:02:05
+1 I like this assertion to make sure that people
ashleymarie1
2017/03/21 19:58:55
Done.
|
| + or 'Android arm64 Compile' == t |
| + or t.startswith('AAAAA')) |
| + for s in scripts: |
| + name = s['name'] |
| + name = re.sub('\\.reference$', '', name) |
| + test_names.add(name) |
| + |
| + for test in benchmark_names - test_names: |
| + print 'remove ' + test + ' from NON_TELEMETRY_BENCHMARKS' |
|
nednguyen
2017/03/21 19:02:05
Instead of printing the message here, you can do:
nednguyen
2017/03/21 19:02:05
nits: "Remove"
ashleymarie1
2017/03/21 19:58:55
Done.
|
| + for test in test_names - benchmark_names: |
| + print 'add ' + test + ' to NON_TELEMETRY_BENCHMARKS' |
|
nednguyen
2017/03/21 19:02:05
nits "Add"
ashleymarie1
2017/03/21 19:58:55
Done.
|
| + |
| + assert benchmark_names == test_names, 'Please update NON_TELEMETRY_BENCHMARKS' |
| + |
| + |
| +def update_benchmark_csv(): |
| + """Updates go/chrome-benchmarks. |
| + |
| + Updates telemetry/perf/benchmark.csv containing the current benchmark names, |
| + owners, and components. |
| + """ |
| + header_data = [['AUTOGENERATED FILE DO NOT EDIT'], |
| + ['See //tools/perf/generate_perf_data.py to make changes'], |
| + ['Benchmark name', 'Individual owners', 'Component'] |
| + ] |
| + |
| + csv_data = [] |
| + benchmark_metadata = get_benchmark_metadata() |
| + for benchmark_name in benchmark_metadata: |
| + csv_data.append([ |
| + benchmark_name, |
| + benchmark_metadata[benchmark_name].emails, |
| + benchmark_metadata[benchmark_name].component |
| + ]) |
| + |
| + csv_data = sorted(csv_data, key=lambda b: b[0]) |
| + csv_data = header_data + csv_data |
| + |
| + perf_dir = os.path.join(src_dir(), 'tools', 'perf') |
| + benchmark_file = os.path.join(perf_dir, 'benchmark.csv') |
| + with open(benchmark_file, 'wb') as f: |
| + writer = csv.writer(f) |
| + writer.writerows(csv_data) |
| + |
| + |
| def main(args): |
| parser = argparse.ArgumentParser( |
| - description=('Generate perf test\' json config. This need to be done ' |
| - 'anytime you add/remove any existing benchmarks in ' |
| - 'tools/perf/benchmarks.')) |
| + description=('Generate perf test\' json config and benchmark.csv. ' |
| + 'This needs to be done anytime you add/remove any existing' |
| + 'benchmarks in tools/perf/benchmarks.')) |
| parser.add_argument( |
| '--validate-only', action='store_true', default=False, |
| help=('Validate whether the perf json generated will be the same as the ' |
| @@ -837,9 +931,9 @@ def main(args): |
| else: |
| print ('The perf JSON config files are not up-to-date. Please run %s ' |
| 'without --validate-only flag to update the perf JSON ' |
| - 'configs.') % sys.argv[0] |
| + 'configs and benchmark.csv.') % sys.argv[0] |
| return 1 |
| else: |
| - update_all_tests(fyi_waterfall) |
| - update_all_tests(waterfall) |
| + update_all_tests([fyi_waterfall, waterfall]) |
| + update_benchmark_csv() |
| return 0 |