Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(2)

Side by Side Diff: tools/gen_bench_expectations_from_codereview.py

Issue 2373803002: Remove gen_bench_expectations_from_codereview (Closed)
Patch Set: Created 4 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 #!/usr/bin/python
2
3 # Copyright (c) 2014 The Chromium Authors. All rights reserved.
4 # Use of this source code is governed by a BSD-style license that can be
5 # found in the LICENSE file.
6
7
8 """Generate new bench expectations from results of trybots on a code review."""
9
10
11 import collections
12 import compare_codereview
13 import json
14 import os
15 import re
16 import shutil
17 import subprocess
18 import sys
19 import urllib2
20
21
22 BENCH_DATA_URL = 'gs://chromium-skia-gm/perfdata/%s/%s/bench_*_data_*'
23 BUILD_STATUS_SUCCESS = 0
24 BUILD_STATUS_WARNINGS = 1
25 CHECKOUT_PATH = os.path.realpath(os.path.join(
26 os.path.dirname(os.path.abspath(__file__)), os.pardir))
27 TMP_BENCH_DATA_DIR = os.path.join(CHECKOUT_PATH, '.bench_data')
28
29
30 TryBuild = collections.namedtuple(
31 'TryBuild', ['builder_name', 'build_number', 'is_finished', 'json_url'])
32
33
34 def find_all_builds(codereview_url):
35 """Finds and returns information about trybot runs for a code review.
36
37 Args:
38 codereview_url: URL of the codereview in question.
39
40 Returns:
41 List of NamedTuples: (builder_name, build_number, is_finished)
42 """
43 results = compare_codereview.CodeReviewHTMLParser().parse(codereview_url)
44 try_builds = []
45 for builder, data in results.iteritems():
46 if builder.startswith('Perf'):
47 build_num = None
48 json_url = None
49 if data.url:
50 split_url = data.url.split('/')
51 build_num = split_url[-1]
52 split_url.insert(split_url.index('builders'), 'json')
53 json_url = '/'.join(split_url)
54 is_finished = (data.status not in ('pending', 'try-pending') and
55 build_num is not None)
56 try_builds.append(TryBuild(builder_name=builder,
57 build_number=build_num,
58 is_finished=is_finished,
59 json_url=json_url))
60 return try_builds
61
62
63 def _all_trybots_finished(try_builds):
64 """Return True iff all of the given try jobs have finished.
65
66 Args:
67 try_builds: list of TryBuild instances.
68
69 Returns:
70 True if all of the given try jobs have finished, otherwise False.
71 """
72 for try_build in try_builds:
73 if not try_build.is_finished:
74 return False
75 return True
76
77
78 def all_trybots_finished(codereview_url):
79 """Return True iff all of the try jobs on the given codereview have finished.
80
81 Args:
82 codereview_url: string; URL of the codereview.
83
84 Returns:
85 True if all of the try jobs have finished, otherwise False.
86 """
87 return _all_trybots_finished(find_all_builds(codereview_url))
88
89
90 def get_bench_data(builder, build_num, dest_dir):
91 """Download the bench data for the given builder at the given build_num.
92
93 Args:
94 builder: string; name of the builder.
95 build_num: string; build number.
96 dest_dir: string; destination directory for the bench data.
97 """
98 url = BENCH_DATA_URL % (builder, build_num)
99 subprocess.check_call(['gsutil', 'cp', '-R', url, dest_dir])
100
101
102 def find_revision_from_downloaded_data(dest_dir):
103 """Finds the revision at which the downloaded data was generated.
104
105 Args:
106 dest_dir: string; directory holding the downloaded data.
107
108 Returns:
109 The revision (git commit hash) at which the downloaded data was
110 generated, or None if no revision can be found.
111 """
112 for data_file in os.listdir(dest_dir):
113 match = re.match('bench_(?P<revision>[0-9a-fA-F]{2,40})_data.*', data_file)
114 if match:
115 return match.group('revision')
116 return None
117
118
119 class TrybotNotFinishedError(Exception):
120 pass
121
122
123 def _step_succeeded(try_build, step_name):
124 """Return True if the given step succeeded and False otherwise.
125
126 This function talks to the build master's JSON interface, which is slow.
127
128 TODO(borenet): There are now a few places which talk to the master's JSON
129 interface. Maybe it'd be worthwhile to create a module which does this.
130
131 Args:
132 try_build: TryBuild instance; the build we're concerned about.
133 step_name: string; name of the step we're concerned about.
134 """
135 step_url = '/'.join((try_build.json_url, 'steps', step_name))
136 step_data = json.load(urllib2.urlopen(step_url))
137 # step_data['results'] may not be present if the step succeeded. If present,
138 # it is a list whose first element is a result code, per the documentation:
139 # http://docs.buildbot.net/latest/developer/results.html
140 result = step_data.get('results', [BUILD_STATUS_SUCCESS])[0]
141 if result in (BUILD_STATUS_SUCCESS, BUILD_STATUS_WARNINGS):
142 return True
143 return False
144
145
146 def gen_bench_expectations_from_codereview(codereview_url,
147 error_on_unfinished=True,
148 error_on_try_failure=True):
149 """Generate bench expectations from a code review.
150
151 Scans the given code review for Perf trybot runs. Downloads the results of
152 finished trybots and uses them to generate new expectations for their
153 waterfall counterparts.
154
155 Args:
156 url: string; URL of the code review.
157 error_on_unfinished: bool; throw an error if any trybot has not finished.
158 error_on_try_failure: bool; throw an error if any trybot failed an
159 important step.
160 """
161 try_builds = find_all_builds(codereview_url)
162
163 # Verify that all trybots have finished running.
164 if error_on_unfinished and not _all_trybots_finished(try_builds):
165 raise TrybotNotFinishedError('Not all trybots have finished.')
166
167 failed_run = []
168 failed_data_pull = []
169 failed_gen_expectations = []
170
171 # Don't even try to do anything if BenchPictures, PostBench, or
172 # UploadBenchResults failed.
173 for try_build in try_builds:
174 for step in ('BenchPictures', 'PostBench', 'UploadBenchResults'):
175 if not _step_succeeded(try_build, step):
176 msg = '%s failed on %s!' % (step, try_build.builder_name)
177 if error_on_try_failure:
178 raise Exception(msg)
179 print 'WARNING: %s Skipping.' % msg
180 failed_run.append(try_build.builder_name)
181
182 if os.path.isdir(TMP_BENCH_DATA_DIR):
183 shutil.rmtree(TMP_BENCH_DATA_DIR)
184
185 for try_build in try_builds:
186 try_builder = try_build.builder_name
187
188 # Even if we're not erroring out on try failures, we can't generate new
189 # expectations for failed bots.
190 if try_builder in failed_run:
191 continue
192
193 builder = try_builder.replace('-Trybot', '')
194
195 # Download the data.
196 dest_dir = os.path.join(TMP_BENCH_DATA_DIR, builder)
197 os.makedirs(dest_dir)
198 try:
199 get_bench_data(try_builder, try_build.build_number, dest_dir)
200 except subprocess.CalledProcessError:
201 failed_data_pull.append(try_builder)
202 continue
203
204 # Find the revision at which the data was generated.
205 revision = find_revision_from_downloaded_data(dest_dir)
206 if not revision:
207 # If we can't find a revision, then something is wrong with the data we
208 # downloaded. Skip this builder.
209 failed_data_pull.append(try_builder)
210 continue
211
212 # Generate new expectations.
213 output_file = os.path.join(CHECKOUT_PATH, 'expectations', 'bench',
214 'bench_expectations_%s.txt' % builder)
215 try:
216 subprocess.check_call(['python',
217 os.path.join(CHECKOUT_PATH, 'bench',
218 'gen_bench_expectations.py'),
219 '-b', builder, '-o', output_file,
220 '-d', dest_dir, '-r', revision])
221 except subprocess.CalledProcessError:
222 failed_gen_expectations.append(builder)
223
224 failure = ''
225 if failed_data_pull:
226 failure += 'Failed to load data for: %s\n\n' % ','.join(failed_data_pull)
227 if failed_gen_expectations:
228 failure += 'Failed to generate expectations for: %s\n\n' % ','.join(
229 failed_gen_expectations)
230 if failure:
231 raise Exception(failure)
232
233
234 if __name__ == '__main__':
235 gen_bench_expectations_from_codereview(sys.argv[1])
236
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698