Chromium Code Reviews| Index: tools/gen_bench_expectations_from_codereview.py |
| diff --git a/tools/gen_bench_expectations_from_codereview.py b/tools/gen_bench_expectations_from_codereview.py |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..8e77381e1ead99fc5ea2dc7f0d0248e776c913c2 |
| --- /dev/null |
| +++ b/tools/gen_bench_expectations_from_codereview.py |
| @@ -0,0 +1,141 @@ |
| +#!/usr/bin/python |
| + |
| +# Copyright (c) 2014 The Chromium Authors. All rights reserved. |
| +# Use of this source code is governed by a BSD-style license that can be |
| +# found in the LICENSE file. |
| + |
| + |
| +"""Generate new bench expectations from results of trybots on a code review.""" |
|
epoger
2014/05/27 15:29:56
Is this tool intended to be run by humans?
If so:
borenet
2014/05/27 19:36:14
For now, the primary user will be the RecreateSKPs
benchen
2014/05/27 20:51:00
Yes, that'll be cool.
On 2014/05/27 19:36:14, bore
|
| + |
| + |
| +import collections |
| +import compare_codereview |
| +import os |
| +import re |
| +import subprocess |
| +import sys |
| + |
| + |
| +BENCH_DATA_URL = 'gs://chromium-skia-gm/perfdata/%s/%s/*' |
| + |
| + |
| +def find_all_builds(codereview_url): |
| + """Finds and returns information about trybot runs for a code review. |
| + |
| + Args: |
| + codereview_url: URL of the codereview in question. |
| + |
| + Returns: |
| + List of NamedTuples: (builder_name, build_number, is_finished) |
| + """ |
| + results = compare_codereview.CodeReviewHTMLParser().parse(codereview_url) |
| + TryBuild = collections.namedtuple( |
| + 'TryBuild', ['builder_name', 'build_number', 'is_finished']) |
| + try_builds = [] |
| + |
| + for builder, data in results.iteritems(): |
| + if builder.startswith('Perf'): |
| + try_builds.append(TryBuild(builder, data.url.split('/')[-1], |
| + data.status != 'pending')) |
| + return try_builds |
| + |
| + |
| +def get_bench_data(builder, build_num, dest_dir): |
| + """Download the bench data for the given builder at the given build_num. |
| + |
| + Args: |
| + builder: string; name of the builder. |
| + build_num: string; build number. |
| + dest_dir: string; destination directory for the bench data. |
| + """ |
| + url = BENCH_DATA_URL % (builder, build_num) |
| + subprocess.check_call(['gsutil', 'cp', '-R', url, dest_dir], |
| + stdout=subprocess.PIPE, |
| + stderr=subprocess.PIPE) |
| + |
| + |
| +def find_revision_from_downloaded_data(dest_dir): |
| + """Finds the revision at which the downloaded data was generated. |
| + |
| + Args: |
| + dest_dir: string; directory holding the downloaded data. |
| + |
| + Returns: |
| + The revision (git commit hash) at which the downloaded data was |
| + generated, or None if no revision can be found. |
| + """ |
| + for data_file in os.listdir(dest_dir): |
| + match = re.match('bench_(?P<revision>[0-9a-fA-F]{2,40})_data*', data_file) |
|
benchen
2014/05/23 22:51:02
The files that end with _data are microbenches onl
borenet
2014/05/27 19:36:14
It doesn't really matter for this use case; all of
benchen
2014/05/27 20:51:00
That's right, thanks.
On 2014/05/27 19:36:14, bore
|
| + if match: |
| + return match.group('revision') |
| + return None |
| + |
| + |
| +def gen_bench_expectations_from_codereview(codereview_url, |
| + error_on_unfinished=True): |
| + """Generate bench expectations from a code review. |
| + |
| + Scans the given code review for Perf trybot runs. Downloads the results of |
| + finished trybots and uses them to generate new expectations for their |
| + waterfall counterparts. |
| + |
| + Args: |
| + url: string; URL of the code review. |
| + error_on_unfinished: bool; throw an error if any trybot has not finished. |
| + """ |
| + try_builds = find_all_builds(codereview_url) |
| + |
| + # Verify that all trybots have finished running. |
| + if error_on_unfinished: |
| + for try_build in try_builds: |
| + if not try_build.is_finished: |
| + raise Exception('%s: #%s is not finished.' % (try_build.builder_name, |
| + try_build.build_number)) |
|
borenet
2014/05/23 22:24:47
Maybe the error_on_unfinished should be pulled out
epoger
2014/05/27 15:29:56
I think either one is fine...
Maybe we should thr
borenet
2014/05/27 19:36:14
Done.
|
| + failed_data_pull = [] |
| + failed_gen_expectations = [] |
| + |
| + for try_build in try_builds: |
| + builder = try_build.builder_name |
| + |
| + # Download the data. |
| + dest_dir = os.path.join('.bench_data', builder) |
|
epoger
2014/05/27 15:29:56
please use a constant to hold '.bench_data'
borenet
2014/05/27 19:36:14
Done.
|
| + if not os.path.isdir(dest_dir): |
| + os.makedirs(dest_dir) |
| + try: |
| + get_bench_data(builder, try_build.build_number, dest_dir) |
| + except subprocess.CalledProcessError: |
| + failed_data_pull.append(builder) |
| + continue |
| + |
| + # Find the revision at which the data was generated. |
| + revision = find_revision_from_downloaded_data(dest_dir) |
| + if not revision: |
| + # If we can't find a revision, then something is wrong with the data we |
| + # downloaded. Skip this builder. |
| + failed_data_pull.append(builder) |
| + continue |
| + |
| + # Generate new expectations. |
| + output_file = os.path.join('expectations', 'bench', |
|
epoger
2014/05/27 15:29:56
This makes assumptions about what directory this s
borenet
2014/05/27 19:36:14
Done.
|
| + 'bench_expectations_%s.txt' % |
| + builder.rstrip('-Trybot')) |
| + try: |
| + subprocess.check_call(['python', 'bench/gen_bench_expectations.py', |
|
epoger
2014/05/27 15:29:56
Same as above (about CWD), but also / vs os.path.j
borenet
2014/05/27 19:36:14
Done.
|
| + '-b', builder, '-o', output_file, |
| + '-d', dest_dir, '-r', revision]) |
| + except subprocess.CalledProcessError: |
| + failed_gen_expectations.append(builder) |
| + |
| + failure = '' |
| + if failed_data_pull: |
| + failure += 'Failed to load data for: %s\n\n' % ','.join(failed_data_pull) |
| + if failed_gen_expectations: |
| + failure += 'Failed to generate expectations for: %s\n\n' % ','.join( |
| + failed_gen_expectations) |
| + if failure: |
| + raise Exception(failure) |
| + |
| + |
| +if __name__ == '__main__': |
| + gen_bench_expectations_from_codereview(sys.argv[1]) |
| + |