Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(40)

Unified Diff: tools/gen_bench_expectations_from_codereview.py

Issue 363833004: Fix RecreateSkps (Closed) Base URL: https://skia.googlesource.com/skia.git@master
Patch Set: Add TODO Created 6 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: tools/gen_bench_expectations_from_codereview.py
diff --git a/tools/gen_bench_expectations_from_codereview.py b/tools/gen_bench_expectations_from_codereview.py
index 3d40aa67eb04d3dd4d87a40305e9ebd4ec648c5c..4c41c9a72e3cdda6580676b23ecfc342002c7422 100644
--- a/tools/gen_bench_expectations_from_codereview.py
+++ b/tools/gen_bench_expectations_from_codereview.py
@@ -10,21 +10,27 @@
import collections
import compare_codereview
+import json
import os
import re
import shutil
-import subprocess
import sys
+import urllib2
+import fix_pythonpath # pylint: disable=W0611
+from common.py.utils import shell_utils
-BENCH_DATA_URL = 'gs://chromium-skia-gm/perfdata/%s/%s/*'
+
+BENCH_DATA_URL = 'gs://chromium-skia-gm/perfdata/%s/%s/bench_*_data_*'
+BUILD_STATUS_SUCCESS = 0
+BUILD_STATUS_WARNINGS = 1
CHECKOUT_PATH = os.path.realpath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.pardir))
TMP_BENCH_DATA_DIR = os.path.join(CHECKOUT_PATH, '.bench_data')
TryBuild = collections.namedtuple(
- 'TryBuild', ['builder_name', 'build_number', 'is_finished'])
+ 'TryBuild', ['builder_name', 'build_number', 'is_finished', 'json_url'])
def find_all_builds(codereview_url):
@@ -40,12 +46,19 @@ def find_all_builds(codereview_url):
try_builds = []
for builder, data in results.iteritems():
if builder.startswith('Perf'):
- build_num = data.url.split('/')[-1] if data.url else None
+ build_num = None
+ json_url = None
+ if data.url:
+ split_url = data.url.split('/')
+ build_num = split_url[-1]
+ split_url.insert(split_url.index('builders'), 'json')
+ json_url = '/'.join(split_url)
is_finished = (data.status not in ('pending', 'try-pending') and
build_num is not None)
try_builds.append(TryBuild(builder_name=builder,
build_number=build_num,
- is_finished=is_finished))
+ is_finished=is_finished,
+ json_url=json_url))
return try_builds
@@ -85,9 +98,7 @@ def get_bench_data(builder, build_num, dest_dir):
dest_dir: string; destination directory for the bench data.
"""
url = BENCH_DATA_URL % (builder, build_num)
- subprocess.check_call(['gsutil', 'cp', '-R', url, dest_dir],
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
+ shell_utils.run(['gsutil', 'cp', '-R', url, dest_dir])
def find_revision_from_downloaded_data(dest_dir):
@@ -111,8 +122,32 @@ class TrybotNotFinishedError(Exception):
pass
+def _step_succeeded(try_build, step_name):
+ """Return True if the given step succeeded and False otherwise.
+
+ This function talks to the build master's JSON interface, which is slow.
+
+ TODO(borenet): There are now a few places which talk to the master's JSON
+ interface. Maybe it'd be worthwhile to create a module which does this.
+
+ Args:
+ try_build: TryBuild instance; the build we're concerned about.
+ step_name: string; name of the step we're concerned about.
+ """
+ step_url = '/'.join((try_build.json_url, 'steps', step_name))
+ step_data = json.load(urllib2.urlopen(step_url))
+ # step_data['results'] may not be present if the step succeeded. If present,
+ # it is a list whose first element is a result code, per the documentation:
+ # http://docs.buildbot.net/latest/developer/results.html
+ result = step_data.get('results', [BUILD_STATUS_SUCCESS])[0]
+ if result in (BUILD_STATUS_SUCCESS, BUILD_STATUS_WARNINGS):
+ return True
+ return False
+
+
def gen_bench_expectations_from_codereview(codereview_url,
- error_on_unfinished=True):
+ error_on_unfinished=True,
+ error_on_try_failure=True):
"""Generate bench expectations from a code review.
Scans the given code review for Perf trybot runs. Downloads the results of
@@ -122,6 +157,8 @@ def gen_bench_expectations_from_codereview(codereview_url,
Args:
url: string; URL of the code review.
error_on_unfinished: bool; throw an error if any trybot has not finished.
+ error_on_try_failure: bool; throw an error if any trybot failed an
+ important step.
"""
try_builds = find_all_builds(codereview_url)
@@ -129,14 +166,32 @@ def gen_bench_expectations_from_codereview(codereview_url,
if error_on_unfinished and not _all_trybots_finished(try_builds):
raise TrybotNotFinishedError('Not all trybots have finished.')
+ failed_run = []
failed_data_pull = []
failed_gen_expectations = []
+ # Don't even try to do anything if BenchPictures, PostBench, or
+ # UploadBenchResults failed.
+ for try_build in try_builds:
+ for step in ('BenchPictures', 'PostBench', 'UploadBenchResults'):
+ if not _step_succeeded(try_build, step):
+ msg = '%s failed on %s!' % (step, try_build.builder_name)
+ if error_on_try_failure:
+ raise Exception(msg)
+ print 'WARNING: %s Skipping.' % msg
+ failed_run.append(try_build.builder_name)
+
if os.path.isdir(TMP_BENCH_DATA_DIR):
shutil.rmtree(TMP_BENCH_DATA_DIR)
for try_build in try_builds:
try_builder = try_build.builder_name
+
+ # Even if we're not erroring out on try failures, we can't generate new
+ # expectations for failed bots.
+ if try_builder in failed_run:
+ continue
+
builder = try_builder.replace('-Trybot', '')
# Download the data.
@@ -144,7 +199,7 @@ def gen_bench_expectations_from_codereview(codereview_url,
os.makedirs(dest_dir)
try:
get_bench_data(try_builder, try_build.build_number, dest_dir)
- except subprocess.CalledProcessError:
+ except shell_utils.CommandFailedException:
failed_data_pull.append(try_builder)
continue
@@ -160,12 +215,12 @@ def gen_bench_expectations_from_codereview(codereview_url,
output_file = os.path.join(CHECKOUT_PATH, 'expectations', 'bench',
'bench_expectations_%s.txt' % builder)
try:
- subprocess.check_call(['python',
- os.path.join(CHECKOUT_PATH, 'bench',
- 'gen_bench_expectations.py'),
- '-b', builder, '-o', output_file,
- '-d', dest_dir, '-r', revision])
- except subprocess.CalledProcessError:
+ shell_utils.run(['python',
+ os.path.join(CHECKOUT_PATH, 'bench',
+ 'gen_bench_expectations.py'),
+ '-b', builder, '-o', output_file,
+ '-d', dest_dir, '-r', revision])
+ except shell_utils.CommandFailedException:
failed_gen_expectations.append(builder)
failure = ''
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698