| Index: tools/perf/generate_perf_json.py
|
| diff --git a/tools/perf/generate_perf_json.py b/tools/perf/generate_perf_json.py
|
| index 78a9e4b17cfb3f42f014392379253b105f175acb..49e739f0d0368e48c0aa58b9b242f8c09b1995d3 100755
|
| --- a/tools/perf/generate_perf_json.py
|
| +++ b/tools/perf/generate_perf_json.py
|
| @@ -7,7 +7,7 @@
|
| the src/testing/buildbot directory. Maintaining these files by hand is
|
| too unwieldy.
|
| """
|
| -
|
| +import argparse
|
| import json
|
| import os
|
| import sys
|
| @@ -778,26 +778,68 @@ def generate_all_tests(waterfall):
|
|
|
| tests['AAAAA1 AUTOGENERATED FILE DO NOT EDIT'] = {}
|
| tests['AAAAA2 See //tools/perf/generate_perf_json.py to make changes'] = {}
|
| - filename = '%s.json' % waterfall['name']
|
| + return tests
|
|
|
| +
|
| +def get_json_config_file_for_waterfall(waterfall):
|
| + filename = '%s.json' % waterfall['name']
|
| buildbot_dir = os.path.join(src_dir(), 'testing', 'buildbot')
|
| - with open(os.path.join(buildbot_dir, filename), 'w') as fp:
|
| + return os.path.join(buildbot_dir, filename)
|
| +
|
| +
|
| +def tests_are_up_to_date(waterfall):
|
| + tests = generate_all_tests(waterfall)
|
| + tests_data = json.dumps(tests, indent=2, separators=(',', ': '),
|
| + sort_keys=True)
|
| + config_file = get_json_config_file_for_waterfall(waterfall)
|
| + with open(config_file, 'r') as fp:
|
| + config_data = fp.read().strip()
|
| + return tests_data == config_data
|
| +
|
| +
|
| +def update_all_tests(waterfall):
|
| + tests = generate_all_tests(waterfall)
|
| + config_file = get_json_config_file_for_waterfall(waterfall)
|
| + with open(config_file, 'w') as fp:
|
| json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True)
|
| fp.write('\n')
|
|
|
| +
|
| def src_dir():
|
| file_path = os.path.abspath(__file__)
|
| return os.path.dirname(os.path.dirname(os.path.dirname(file_path)))
|
|
|
| -def main():
|
| +
|
| +def main(args):
|
| + parser = argparse.ArgumentParser(
|
| + description=('Generate perf test\' json config. This need to be done '
|
| + 'anytime you add/remove any existing benchmarks in '
|
| + 'tools/perf/benchmarks.'))
|
| + parser.add_argument(
|
| + '--validate-only', action='store_true', default=False,
|
| + help=('Validate whether the perf json generated will be the same as the '
|
| + 'existing configs. This does not change the contain of existing '
|
| + 'configs'))
|
| + options = parser.parse_args(args)
|
| +
|
| waterfall = get_waterfall_config()
|
| waterfall['name'] = 'chromium.perf'
|
| fyi_waterfall = get_fyi_waterfall_config()
|
| fyi_waterfall['name'] = 'chromium.perf.fyi'
|
|
|
| - generate_all_tests(fyi_waterfall)
|
| - generate_all_tests(waterfall)
|
| + if options.validate_only:
|
| + if tests_are_up_to_date(fyi_waterfall) and tests_are_up_to_date(waterfall):
|
| + print 'All the perf JSON config files are up-to-date. \\o/'
|
| + return 0
|
| + else:
|
| + print ('The perf JSON config files are not up-to-date. Please run %s '
|
| + 'without --validate-only flag to update the perf JSON '
|
| + 'configs.') % sys.argv[0]
|
| + return 1
|
| + else:
|
| + update_all_tests(fyi_waterfall)
|
| + update_all_tests(waterfall)
|
| return 0
|
|
|
| if __name__ == '__main__':
|
| - sys.exit(main())
|
| + sys.exit(main(sys.argv[1:]))
|
|
|