Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(12)

Side by Side Diff: third_party/WebKit/Tools/Scripts/webkitpy/common/net/buildbot/buildbot.py

Issue 1953463002: Empty webkitpy/common/net/buildbot/__init__.py and update imports. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Remove the net/buildbot/ subdirectory; move the files into net/ and fix pylint warnings Created 4 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 # Copyright (c) 2009, Google Inc. All rights reserved.
2 #
3 # Redistribution and use in source and binary forms, with or without
4 # modification, are permitted provided that the following conditions are
5 # met:
6 #
7 # * Redistributions of source code must retain the above copyright
8 # notice, this list of conditions and the following disclaimer.
9 # * Redistributions in binary form must reproduce the above
10 # copyright notice, this list of conditions and the following disclaimer
11 # in the documentation and/or other materials provided with the
12 # distribution.
13 # * Neither the name of Google Inc. nor the names of its
14 # contributors may be used to endorse or promote products derived from
15 # this software without specific prior written permission.
16 #
17 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
29 import json
30 import operator
31 import re
32 import urllib
33 import urllib2
34
35 import webkitpy.common.config.urls as config_urls
36 from webkitpy.common.memoized import memoized
37 from webkitpy.common.net.layouttestresults import LayoutTestResults
38 from webkitpy.common.net.networktransaction import NetworkTransaction
39 from webkitpy.common.system.logutils import get_logger
40 from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup
41
42
43 _log = get_logger(__file__)
44
45
46 class Builder(object):
47
48 def __init__(self, name, buildbot):
49 self._name = name
50 self._buildbot = buildbot
51 self._builds_cache = {}
52 self._revision_to_build_number = None
53
54 def name(self):
55 return self._name
56
57 def results_url(self):
58 return config_urls.chromium_results_url_base_for_builder(self._name)
59
60 def accumulated_results_url(self):
61 return config_urls.chromium_accumulated_results_url_base_for_builder(sel f._name)
62
63 def latest_layout_test_results_url(self):
64 return self.accumulated_results_url() or self.latest_cached_build().resu lts_url()
65
66 @memoized
67 def latest_layout_test_results(self):
68 return self.fetch_layout_test_results(self.latest_layout_test_results_ur l())
69
70 def _fetch_file_from_results(self, results_url, file_name):
71 # It seems this can return None if the url redirects and then returns 40 4.
72 result = urllib2.urlopen("%s/%s" % (results_url, file_name))
73 if not result:
74 return None
75 # urlopen returns a file-like object which sometimes works fine with str ()
76 # but sometimes is a addinfourl object. In either case calling read() i s correct.
77 return result.read()
78
79 def fetch_layout_test_results(self, results_url):
80 # FIXME: This should cache that the result was a 404 and stop hitting th e network.
81 results_file = NetworkTransaction(convert_404_to_None=True).run(
82 lambda: self._fetch_file_from_results(results_url, "failing_results. json"))
83 return LayoutTestResults.results_from_string(results_file)
84
85 def url_encoded_name(self):
86 return urllib.quote(self._name)
87
88 def url(self):
89 return "%s/builders/%s" % (self._buildbot.buildbot_url, self.url_encoded _name())
90
91 # This provides a single place to mock
92 def _fetch_build(self, build_number):
93 build_dictionary = self._buildbot._fetch_build_dictionary(self, build_nu mber)
94 if not build_dictionary:
95 return None
96 revision_string = build_dictionary['sourceStamp']['revision']
97 return Build(self,
98 build_number=int(build_dictionary['number']),
99 # 'revision' may be None if a trunk build was started by th e force-build button on the web page.
100 revision=(int(revision_string) if revision_string else None ),
101 # Buildbot uses any nubmer other than 0 to mean fail. Sinc e we fetch with
102 # filter=1, passing builds may contain no 'results' value.
103 is_green=(not build_dictionary.get('results')),
104 )
105
106 def build(self, build_number):
107 if not build_number:
108 return None
109 cached_build = self._builds_cache.get(build_number)
110 if cached_build:
111 return cached_build
112
113 build = self._fetch_build(build_number)
114 self._builds_cache[build_number] = build
115 return build
116
117 def latest_cached_build(self):
118 revision_build_pairs = self.revision_build_pairs_with_results()
119 revision_build_pairs.sort(key=lambda i: i[1])
120 latest_build_number = revision_build_pairs[-1][1]
121 return self.build(latest_build_number)
122
123 file_name_regexp = re.compile(r"r(?P<revision>\d+) \((?P<build_number>\d+)\) ")
124
125 def _revision_and_build_for_filename(self, filename):
126 # Example: "r47483 (1)/" or "r47483 (1).zip"
127 match = self.file_name_regexp.match(filename)
128 if not match:
129 return None
130 return (int(match.group("revision")), int(match.group("build_number")))
131
132 def _fetch_revision_to_build_map(self):
133 # All _fetch requests go through _buildbot for easier mocking
134 # FIXME: This should use NetworkTransaction's 404 handling instead.
135 try:
136 # FIXME: This method is horribly slow due to the huge network load.
137 # FIXME: This is a poor way to do revision -> build mapping.
138 # Better would be to ask buildbot through some sort of API.
139 print "Loading revision/build list from %s." % self.results_url()
140 print "This may take a while..."
141 result_files = self._buildbot._fetch_twisted_directory_listing(self. results_url())
142 except urllib2.HTTPError, error:
143 if error.code != 404:
144 raise
145 _log.debug("Revision/build list failed to load.")
146 result_files = []
147 return dict(self._file_info_list_to_revision_to_build_list(result_files) )
148
149 def _file_info_list_to_revision_to_build_list(self, file_info_list):
150 # This assumes there was only one build per revision, which is false but we don't care for now.
151 revisions_and_builds = []
152 for file_info in file_info_list:
153 revision_and_build = self._revision_and_build_for_filename(file_info ["filename"])
154 if revision_and_build:
155 revisions_and_builds.append(revision_and_build)
156 return revisions_and_builds
157
158 def _revision_to_build_map(self):
159 if not self._revision_to_build_number:
160 self._revision_to_build_number = self._fetch_revision_to_build_map()
161 return self._revision_to_build_number
162
163 def revision_build_pairs_with_results(self):
164 return self._revision_to_build_map().items()
165
166 # This assumes there can be only one build per revision, which is false, but we don't care for now.
167 def build_for_revision(self, revision, allow_failed_lookups=False):
168 # NOTE: This lookup will fail if that exact revision was never built.
169 build_number = self._revision_to_build_map().get(int(revision))
170 if not build_number:
171 return None
172 build = self.build(build_number)
173 if not build and allow_failed_lookups:
174 # Builds for old revisions with fail to lookup via buildbot's json a pi.
175 build = Build(self,
176 build_number=build_number,
177 revision=revision,
178 is_green=False,
179 )
180 return build
181
182
183 class Build(object):
184
185 def __init__(self, builder, build_number, revision, is_green):
186 self._builder = builder
187 self._number = build_number
188 self._revision = revision
189 self._is_green = is_green
190
191 @staticmethod
192 def build_url(builder, build_number):
193 return "%s/builds/%s" % (builder.url(), build_number)
194
195 def url(self):
196 return self.build_url(self.builder(), self._number)
197
198 def results_url(self):
199 results_directory = "r%s (%s)" % (self.revision(), self._number)
200 return "%s/%s" % (self._builder.results_url(), urllib.quote(results_dire ctory))
201
202 def results_zip_url(self):
203 return "%s.zip" % self.results_url()
204
205 def builder(self):
206 return self._builder
207
208 def revision(self):
209 return self._revision
210
211 def is_green(self):
212 return self._is_green
213
214 def previous_build(self):
215 # previous_build() allows callers to avoid assuming build numbers are se quential.
216 # They may not be sequential across all master changes, or when non-trun k builds are made.
217 return self._builder.build(self._number - 1)
218
219
220 class BuildBot(object):
221 _builder_factory = Builder
222 _default_url = config_urls.chromium_buildbot_url
223
224 def __init__(self, url=None):
225 self.buildbot_url = url if url else self._default_url
226 self._builder_by_name = {}
227
228 def _parse_last_build_cell(self, builder, cell):
229 status_link = cell.find('a')
230 if status_link:
231 # Will be either a revision number or a build number
232 revision_string = status_link.string
233 # If revision_string has non-digits assume it's not a revision numbe r.
234 builder['built_revision'] = int(revision_string) \
235 if not re.match('\D', revision_string) \
236 else None
237
238 # FIXME: We treat slave lost as green even though it is not to
239 # work around the Qts bot being on a broken internet connection.
240 # The real fix is https://bugs.webkit.org/show_bug.cgi?id=37099
241 builder['is_green'] = not re.search('fail', cell.renderContents()) o r \
242 not not re.search('lost', cell.renderContents())
243
244 status_link_regexp = r"builders/(?P<builder_name>.*)/builds/(?P<buil d_number>\d+)"
245 link_match = re.match(status_link_regexp, status_link['href'])
246 builder['build_number'] = int(link_match.group("build_number"))
247 else:
248 # We failed to find a link in the first cell, just give up. This
249 # can happen if a builder is just-added, the first cell will just
250 # be "no build"
251 # Other parts of the code depend on is_green being present.
252 builder['is_green'] = False
253 builder['built_revision'] = None
254 builder['build_number'] = None
255
256 def _parse_current_build_cell(self, builder, cell):
257 activity_lines = cell.renderContents().split("<br />")
258 builder["activity"] = activity_lines[0] # normally "building" or "idle"
259 # The middle lines document how long left for any current builds.
260 match = re.match("(?P<pending_builds>\d) pending", activity_lines[-1])
261 builder["pending_builds"] = int(match.group("pending_builds")) if match else 0
262
263 def _parse_builder_status_from_row(self, status_row):
264 status_cells = status_row.findAll('td')
265 builder = {}
266
267 # First cell is the name
268 name_link = status_cells[0].find('a')
269 builder["name"] = unicode(name_link.string)
270
271 self._parse_last_build_cell(builder, status_cells[1])
272 self._parse_current_build_cell(builder, status_cells[2])
273 return builder
274
275 def _matches_regexps(self, builder_name, name_regexps):
276 for name_regexp in name_regexps:
277 if re.match(name_regexp, builder_name):
278 return True
279 return False
280
281 # FIXME: These _fetch methods should move to a networking class.
282 def _fetch_build_dictionary(self, builder, build_number):
283 # Note: filter=1 will remove None and {} and '', which cuts noise but ca n
284 # cause keys to be missing which you might otherwise expect.
285 # FIXME: The bot sends a *huge* amount of data for each request, we shou ld
286 # find a way to reduce the response size further.
287 json_url = "%s/json/builders/%s/builds/%s?filter=1" % (self.buildbot_url , urllib.quote(builder.name()), build_number)
288 try:
289 return json.load(urllib2.urlopen(json_url))
290 except urllib2.URLError, err:
291 build_url = Build.build_url(builder, build_number)
292 _log.error("Error fetching data for %s build %s (%s, json: %s): %s" %
293 (builder.name(), build_number, build_url, json_url, err))
294 return None
295 except ValueError, err:
296 build_url = Build.build_url(builder, build_number)
297 _log.error("Error decoding json data from %s: %s" % (build_url, err) )
298 return None
299
300 def _fetch_one_box_per_builder(self):
301 build_status_url = "%s/one_box_per_builder" % self.buildbot_url
302 return urllib2.urlopen(build_status_url)
303
304 def _file_cell_text(self, file_cell):
305 """Traverses down through firstChild elements until one containing a str ing is found, then returns that string"""
306 element = file_cell
307 while element.string is None and element.contents:
308 element = element.contents[0]
309 return element.string
310
311 def _parse_twisted_file_row(self, file_row):
312 string_or_empty = lambda string: unicode(string) if string else u""
313 file_cells = file_row.findAll('td')
314 return {
315 "filename": string_or_empty(self._file_cell_text(file_cells[0])),
316 "size": string_or_empty(self._file_cell_text(file_cells[1])),
317 "type": string_or_empty(self._file_cell_text(file_cells[2])),
318 "encoding": string_or_empty(self._file_cell_text(file_cells[3])),
319 }
320
321 def _parse_twisted_directory_listing(self, page):
322 soup = BeautifulSoup(page)
323 # HACK: Match only table rows with a class to ignore twisted header/foot er rows.
324 file_rows = soup.find('table').findAll('tr', {'class': re.compile(r'\b(? :directory|file)\b')})
325 return [self._parse_twisted_file_row(file_row) for file_row in file_rows ]
326
327 # FIXME: There should be a better way to get this information directly from twisted.
328 def _fetch_twisted_directory_listing(self, url):
329 return self._parse_twisted_directory_listing(urllib2.urlopen(url))
330
331 def builders(self):
332 return [self.builder_with_name(status["name"]) for status in self.builde r_statuses()]
333
334 # This method pulls from /one_box_per_builder as an efficient way to get inf ormation about
335 def builder_statuses(self):
336 soup = BeautifulSoup(self._fetch_one_box_per_builder())
337 return [self._parse_builder_status_from_row(status_row) for status_row i n soup.find('table').findAll('tr')]
338
339 def builder_with_name(self, name):
340 builder = self._builder_by_name.get(name)
341 if not builder:
342 builder = self._builder_factory(name, self)
343 self._builder_by_name[name] = builder
344 return builder
345
346 # This makes fewer requests than calling Builder.latest_build would. It gra bs all builder
347 # statuses in one request using self.builder_statuses (fetching /one_box_per _builder instead of builder pages).
348 def _latest_builds_from_builders(self):
349 builder_statuses = self.builder_statuses()
350 return [self.builder_with_name(status["name"]).build(status["build_numbe r"]) for status in builder_statuses]
351
352 def _build_at_or_before_revision(self, build, revision):
353 while build:
354 if build.revision() <= revision:
355 return build
356 build = build.previous_build()
357
358 def _fetch_builder_page(self, builder):
359 builder_page_url = "%s/builders/%s?numbuilds=100" % (self.buildbot_url, urllib2.quote(builder.name()))
360 return urllib2.urlopen(builder_page_url)
361
362 def _revisions_for_builder(self, builder):
363 soup = BeautifulSoup(self._fetch_builder_page(builder))
364 revisions = []
365 for status_row in soup.find('table').findAll('tr'):
366 revision_anchor = status_row.find('a')
367 table_cells = status_row.findAll('td')
368 if not table_cells or len(table_cells) < 3 or not table_cells[2].str ing:
369 continue
370 if revision_anchor and revision_anchor.string and re.match(r'^\d+$', revision_anchor.string):
371 revisions.append((int(revision_anchor.string), 'success' in tabl e_cells[2].string))
372 return revisions
373
374 def _find_green_revision(self, builder_revisions):
375 revision_statuses = {}
376 for builder in builder_revisions:
377 for revision, succeeded in builder_revisions[builder]:
378 revision_statuses.setdefault(revision, set())
379 if succeeded and revision_statuses[revision] != None:
380 revision_statuses[revision].add(builder)
381 else:
382 revision_statuses[revision] = None
383
384 # In descending order, look for a revision X with successful builds
385 # Once we found X, check if remaining builders succeeded in the neighbor hood of X.
386 revisions_in_order = sorted(revision_statuses.keys(), reverse=True)
387 for i, revision in enumerate(revisions_in_order):
388 if not revision_statuses[revision]:
389 continue
390
391 builders_succeeded_in_future = set()
392 for future_revision in sorted(revisions_in_order[:i + 1]):
393 if not revision_statuses[future_revision]:
394 break
395 builders_succeeded_in_future = builders_succeeded_in_future.unio n(revision_statuses[future_revision])
396
397 builders_succeeded_in_past = set()
398 for past_revision in revisions_in_order[i:]:
399 if not revision_statuses[past_revision]:
400 break
401 builders_succeeded_in_past = builders_succeeded_in_past.union(re vision_statuses[past_revision])
402
403 if len(builders_succeeded_in_future) == len(builder_revisions) and l en(builders_succeeded_in_past) == len(builder_revisions):
404 return revision
405 return None
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698