Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(240)

Side by Side Diff: tools/gen_bench_expectations_from_codereview.py

Issue 297893004: Add script to rebaseline benches from codereview trybot results (Closed) Base URL: https://skia.googlesource.com/skia.git@master
Patch Set: Remove accidentally-added perf baselines Created 6 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 #!/usr/bin/python
2
3 # Copyright (c) 2014 The Chromium Authors. All rights reserved.
4 # Use of this source code is governed by a BSD-style license that can be
5 # found in the LICENSE file.
6
7
8 """Generate new bench expectations from results of trybots on a code review."""
9
10
11 import collections
12 import compare_codereview
13 import os
14 import re
15 import shutil
16 import subprocess
17 import sys
18
19
20 BENCH_DATA_URL = 'gs://chromium-skia-gm/perfdata/%s/%s/*'
21 CHECKOUT_PATH = os.path.realpath(os.path.join(
22 os.path.dirname(os.path.abspath(__file__)), os.pardir))
23 TMP_BENCH_DATA_DIR = os.path.join(CHECKOUT_PATH, '.bench_data')
24
25
26 def find_all_builds(codereview_url):
27 """Finds and returns information about trybot runs for a code review.
28
29 Args:
30 codereview_url: URL of the codereview in question.
31
32 Returns:
33 List of NamedTuples: (builder_name, build_number, is_finished)
34 """
35 results = compare_codereview.CodeReviewHTMLParser().parse(codereview_url)
36 TryBuild = collections.namedtuple(
37 'TryBuild', ['builder_name', 'build_number', 'is_finished'])
38 try_builds = []
39
40 for builder, data in results.iteritems():
41 if builder.startswith('Perf'):
42 try_builds.append(TryBuild(builder, data.url.split('/')[-1],
43 data.status != 'pending'))
44 return try_builds
45
46
47 def get_bench_data(builder, build_num, dest_dir):
48 """Download the bench data for the given builder at the given build_num.
49
50 Args:
51 builder: string; name of the builder.
52 build_num: string; build number.
53 dest_dir: string; destination directory for the bench data.
54 """
55 url = BENCH_DATA_URL % (builder, build_num)
56 subprocess.check_call(['gsutil', 'cp', '-R', url, dest_dir],
57 stdout=subprocess.PIPE,
58 stderr=subprocess.PIPE)
59
60
61 def find_revision_from_downloaded_data(dest_dir):
62 """Finds the revision at which the downloaded data was generated.
63
64 Args:
65 dest_dir: string; directory holding the downloaded data.
66
67 Returns:
68 The revision (git commit hash) at which the downloaded data was
69 generated, or None if no revision can be found.
70 """
71 for data_file in os.listdir(dest_dir):
72 match = re.match('bench_(?P<revision>[0-9a-fA-F]{2,40})_data.*', data_file)
73 if match:
74 return match.group('revision')
75 return None
76
77
78 class TrybotNotFinishedError(Exception):
79 pass
80
81
82 def gen_bench_expectations_from_codereview(codereview_url,
83 error_on_unfinished=True):
84 """Generate bench expectations from a code review.
85
86 Scans the given code review for Perf trybot runs. Downloads the results of
87 finished trybots and uses them to generate new expectations for their
88 waterfall counterparts.
89
90 Args:
91 url: string; URL of the code review.
92 error_on_unfinished: bool; throw an error if any trybot has not finished.
93 """
94 try_builds = find_all_builds(codereview_url)
95
96 # Verify that all trybots have finished running.
97 if error_on_unfinished:
98 for try_build in try_builds:
99 if not try_build.is_finished:
100 raise TrybotNotFinishedError('%s: #%s is not finished.' % (
101 try_build.builder_name,
102 try_build.build_number))
103 failed_data_pull = []
104 failed_gen_expectations = []
105
106 if os.path.isdir(TMP_BENCH_DATA_DIR):
107 shutil.rmtree(TMP_BENCH_DATA_DIR)
108
109 for try_build in try_builds:
110 try_builder = try_build.builder_name
111 builder = try_builder.replace('-Trybot', '')
112
113 # Download the data.
114 dest_dir = os.path.join(TMP_BENCH_DATA_DIR, builder)
115 os.makedirs(dest_dir)
116 try:
117 get_bench_data(try_builder, try_build.build_number, dest_dir)
118 except subprocess.CalledProcessError:
119 failed_data_pull.append(try_builder)
120 continue
121
122 # Find the revision at which the data was generated.
123 revision = find_revision_from_downloaded_data(dest_dir)
124 if not revision:
125 # If we can't find a revision, then something is wrong with the data we
126 # downloaded. Skip this builder.
127 failed_data_pull.append(try_builder)
128 continue
129
130 # Generate new expectations.
131 output_file = os.path.join(CHECKOUT_PATH, 'expectations', 'bench',
132 'bench_expectations_%s.txt' % builder)
133 try:
134 subprocess.check_call(['python',
135 os.path.join(CHECKOUT_PATH, 'bench',
136 'gen_bench_expectations.py'),
137 '-b', builder, '-o', output_file,
138 '-d', dest_dir, '-r', revision])
139 except subprocess.CalledProcessError:
140 failed_gen_expectations.append(builder)
141
142 failure = ''
143 if failed_data_pull:
144 failure += 'Failed to load data for: %s\n\n' % ','.join(failed_data_pull)
145 if failed_gen_expectations:
146 failure += 'Failed to generate expectations for: %s\n\n' % ','.join(
147 failed_gen_expectations)
148 if failure:
149 raise Exception(failure)
150
151
152 if __name__ == '__main__':
153 gen_bench_expectations_from_codereview(sys.argv[1])
154
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698