| Index: tools/gen_bench_expectations_from_codereview.py
|
| diff --git a/tools/gen_bench_expectations_from_codereview.py b/tools/gen_bench_expectations_from_codereview.py
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..cc28996fbe036e832321c3b440a591f39878bec9
|
| --- /dev/null
|
| +++ b/tools/gen_bench_expectations_from_codereview.py
|
| @@ -0,0 +1,154 @@
|
| +#!/usr/bin/python
|
| +
|
| +# Copyright (c) 2014 The Chromium Authors. All rights reserved.
|
| +# Use of this source code is governed by a BSD-style license that can be
|
| +# found in the LICENSE file.
|
| +
|
| +
|
| +"""Generate new bench expectations from results of trybots on a code review."""
|
| +
|
| +
|
| +import collections
|
| +import compare_codereview
|
| +import os
|
| +import re
|
| +import shutil
|
| +import subprocess
|
| +import sys
|
| +
|
| +
|
| +BENCH_DATA_URL = 'gs://chromium-skia-gm/perfdata/%s/%s/*'
|
| +CHECKOUT_PATH = os.path.realpath(os.path.join(
|
| + os.path.dirname(os.path.abspath(__file__)), os.pardir))
|
| +TMP_BENCH_DATA_DIR = os.path.join(CHECKOUT_PATH, '.bench_data')
|
| +
|
| +
|
| +def find_all_builds(codereview_url):
|
| + """Finds and returns information about trybot runs for a code review.
|
| +
|
| + Args:
|
| + codereview_url: URL of the codereview in question.
|
| +
|
| + Returns:
|
| + List of NamedTuples: (builder_name, build_number, is_finished)
|
| + """
|
| + results = compare_codereview.CodeReviewHTMLParser().parse(codereview_url)
|
| + TryBuild = collections.namedtuple(
|
| + 'TryBuild', ['builder_name', 'build_number', 'is_finished'])
|
| + try_builds = []
|
| +
|
| + for builder, data in results.iteritems():
|
| + if builder.startswith('Perf'):
|
| + try_builds.append(TryBuild(builder, data.url.split('/')[-1],
|
| + data.status != 'pending'))
|
| + return try_builds
|
| +
|
| +
|
| +def get_bench_data(builder, build_num, dest_dir):
|
| + """Download the bench data for the given builder at the given build_num.
|
| +
|
| + Args:
|
| + builder: string; name of the builder.
|
| + build_num: string; build number.
|
| + dest_dir: string; destination directory for the bench data.
|
| + """
|
| + url = BENCH_DATA_URL % (builder, build_num)
|
| + subprocess.check_call(['gsutil', 'cp', '-R', url, dest_dir],
|
| + stdout=subprocess.PIPE,
|
| + stderr=subprocess.PIPE)
|
| +
|
| +
|
| +def find_revision_from_downloaded_data(dest_dir):
|
| + """Finds the revision at which the downloaded data was generated.
|
| +
|
| + Args:
|
| + dest_dir: string; directory holding the downloaded data.
|
| +
|
| + Returns:
|
| + The revision (git commit hash) at which the downloaded data was
|
| + generated, or None if no revision can be found.
|
| + """
|
| + for data_file in os.listdir(dest_dir):
|
| + match = re.match('bench_(?P<revision>[0-9a-fA-F]{2,40})_data.*', data_file)
|
| + if match:
|
| + return match.group('revision')
|
| + return None
|
| +
|
| +
|
| +class TrybotNotFinishedError(Exception):
|
| + pass
|
| +
|
| +
|
| +def gen_bench_expectations_from_codereview(codereview_url,
|
| + error_on_unfinished=True):
|
| + """Generate bench expectations from a code review.
|
| +
|
| + Scans the given code review for Perf trybot runs. Downloads the results of
|
| + finished trybots and uses them to generate new expectations for their
|
| + waterfall counterparts.
|
| +
|
| + Args:
|
| + url: string; URL of the code review.
|
| + error_on_unfinished: bool; throw an error if any trybot has not finished.
|
| + """
|
| + try_builds = find_all_builds(codereview_url)
|
| +
|
| + # Verify that all trybots have finished running.
|
| + if error_on_unfinished:
|
| + for try_build in try_builds:
|
| + if not try_build.is_finished:
|
| + raise TrybotNotFinishedError('%s: #%s is not finished.' % (
|
| + try_build.builder_name,
|
| + try_build.build_number))
|
| + failed_data_pull = []
|
| + failed_gen_expectations = []
|
| +
|
| + if os.path.isdir(TMP_BENCH_DATA_DIR):
|
| + shutil.rmtree(TMP_BENCH_DATA_DIR)
|
| +
|
| + for try_build in try_builds:
|
| + try_builder = try_build.builder_name
|
| + builder = try_builder.replace('-Trybot', '')
|
| +
|
| + # Download the data.
|
| + dest_dir = os.path.join(TMP_BENCH_DATA_DIR, builder)
|
| + os.makedirs(dest_dir)
|
| + try:
|
| + get_bench_data(try_builder, try_build.build_number, dest_dir)
|
| + except subprocess.CalledProcessError:
|
| + failed_data_pull.append(try_builder)
|
| + continue
|
| +
|
| + # Find the revision at which the data was generated.
|
| + revision = find_revision_from_downloaded_data(dest_dir)
|
| + if not revision:
|
| + # If we can't find a revision, then something is wrong with the data we
|
| + # downloaded. Skip this builder.
|
| + failed_data_pull.append(try_builder)
|
| + continue
|
| +
|
| + # Generate new expectations.
|
| + output_file = os.path.join(CHECKOUT_PATH, 'expectations', 'bench',
|
| + 'bench_expectations_%s.txt' % builder)
|
| + try:
|
| + subprocess.check_call(['python',
|
| + os.path.join(CHECKOUT_PATH, 'bench',
|
| + 'gen_bench_expectations.py'),
|
| + '-b', builder, '-o', output_file,
|
| + '-d', dest_dir, '-r', revision])
|
| + except subprocess.CalledProcessError:
|
| + failed_gen_expectations.append(builder)
|
| +
|
| + failure = ''
|
| + if failed_data_pull:
|
| + failure += 'Failed to load data for: %s\n\n' % ','.join(failed_data_pull)
|
| + if failed_gen_expectations:
|
| + failure += 'Failed to generate expectations for: %s\n\n' % ','.join(
|
| + failed_gen_expectations)
|
| + if failure:
|
| + raise Exception(failure)
|
| +
|
| +
|
| +if __name__ == '__main__':
|
| + gen_bench_expectations_from_codereview(sys.argv[1])
|
| +
|
|
|