Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1364)

Side by Side Diff: tools/bisect-builds.py

Issue 7493016: Added pre-fetching of builds. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: nits scratched Created 9 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/python 1 #!/usr/bin/python
2 # Copyright (c) 2011 The Chromium Authors. All rights reserved. 2 # Copyright (c) 2011 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """Snapshot Build Bisect Tool 6 """Snapshot Build Bisect Tool
7 7
8 This script bisects a snapshot archive using binary search. It starts at 8 This script bisects a snapshot archive using binary search. It starts at
9 a bad revision (it will try to guess HEAD) and asks for a last known-good 9 a bad revision (it will try to guess HEAD) and asks for a last known-good
10 revision. It will then binary search across this revision range by downloading, 10 revision. It will then binary search across this revision range by downloading,
11 unzipping, and opening Chromium for you. After testing the specific revision, 11 unzipping, and opening Chromium for you. After testing the specific revision,
12 it will ask you whether it is good or bad before continuing the search. 12 it will ask you whether it is good or bad before continuing the search.
13 """ 13 """
14 14
15 # The root URL for storage. 15 # The root URL for storage.
16 BASE_URL = 'http://commondatastorage.googleapis.com/chromium-browser-snapshots' 16 BASE_URL = 'http://commondatastorage.googleapis.com/chromium-browser-snapshots'
17 BASE_URL_RECENT = 'http://build.chromium.org/f/chromium/snapshots'
18 17
19 # URL to the ViewVC commit page. 18 # URL to the ViewVC commit page.
20 BUILD_VIEWVC_URL = 'http://src.chromium.org/viewvc/chrome?view=rev&revision=%d' 19 BUILD_VIEWVC_URL = 'http://src.chromium.org/viewvc/chrome?view=rev&revision=%d'
21 20
22 # Changelogs URL. 21 # Changelogs URL.
23 CHANGELOG_URL = 'http://build.chromium.org/f/chromium/' \ 22 CHANGELOG_URL = 'http://build.chromium.org/f/chromium/' \
24 'perf/dashboard/ui/changelog.html?url=/trunk/src&range=%d:%d' 23 'perf/dashboard/ui/changelog.html?url=/trunk/src&range=%d:%d'
25 24
26 ############################################################################### 25 ###############################################################################
27 26
28 import math 27 import math
29 import optparse 28 import optparse
30 import os 29 import os
31 import pipes 30 import pipes
32 import re 31 import re
33 import shutil 32 import shutil
33 import subprocess
34 import sys 34 import sys
35 import tempfile 35 import tempfile
36 import threading
36 import urllib 37 import urllib
37 from xml.etree import ElementTree 38 from xml.etree import ElementTree
38 import zipfile 39 import zipfile
39 40
40 class PathContext(object): 41 class PathContext(object):
41 """A PathContext is used to carry the information used to construct URLs and 42 """A PathContext is used to carry the information used to construct URLs and
42 paths when dealing with the storage server and archives.""" 43 paths when dealing with the storage server and archives."""
43 def __init__(self, platform, good_revision, bad_revision, use_recent): 44 def __init__(self, platform, good_revision, bad_revision):
44 super(PathContext, self).__init__() 45 super(PathContext, self).__init__()
45 # Store off the input parameters. 46 # Store off the input parameters.
46 self.platform = platform # What's passed in to the '-a/--archive' option. 47 self.platform = platform # What's passed in to the '-a/--archive' option.
47 self.good_revision = good_revision 48 self.good_revision = good_revision
48 self.bad_revision = bad_revision 49 self.bad_revision = bad_revision
49 self.use_recent = use_recent
50 50
51 # The name of the ZIP file in a revision directory on the server. 51 # The name of the ZIP file in a revision directory on the server.
52 self.archive_name = None 52 self.archive_name = None
53 53
54 # Set some internal members: 54 # Set some internal members:
55 # _listing_platform_dir = Directory that holds revisions. Ends with a '/'. 55 # _listing_platform_dir = Directory that holds revisions. Ends with a '/'.
56 # _archive_extract_dir = Uncompressed directory in the archive_name file. 56 # _archive_extract_dir = Uncompressed directory in the archive_name file.
57 # _binary_name = The name of the executable to run. 57 # _binary_name = The name of the executable to run.
58 if self.platform == 'linux' or self.platform == 'linux64': 58 if self.platform == 'linux' or self.platform == 'linux64':
59 self._listing_platform_dir = 'Linux/' 59 self._listing_platform_dir = 'Linux/'
60 self.archive_name = 'chrome-linux.zip' 60 self.archive_name = 'chrome-linux.zip'
61 self._archive_extract_dir = 'chrome-linux' 61 self._archive_extract_dir = 'chrome-linux'
62 self._binary_name = 'chrome' 62 self._binary_name = 'chrome'
63 # Linux and x64 share all the same path data except for the archive dir. 63 # Linux and x64 share all the same path data except for the archive dir.
64 if self.platform == 'linux64': 64 if self.platform == 'linux64':
65 self._listing_platform_dir = 'Linux_x64/' 65 self._listing_platform_dir = 'Linux_x64/'
66 elif self.platform == 'mac': 66 elif self.platform == 'mac':
67 self._listing_platform_dir = 'Mac/' 67 self._listing_platform_dir = 'Mac/'
68 self.archive_name = 'chrome-mac.zip' 68 self.archive_name = 'chrome-mac.zip'
69 self._archive_extract_dir = 'chrome-mac' 69 self._archive_extract_dir = 'chrome-mac'
70 self._binary_name = 'Chromium.app/Contents/MacOS/Chromium' 70 self._binary_name = 'Chromium.app/Contents/MacOS/Chromium'
71 elif self.platform == 'win': 71 elif self.platform == 'win':
72 self._listing_platform_dir = 'Win/' 72 self._listing_platform_dir = 'Win/'
73 self.archive_name = 'chrome-win32.zip' 73 self.archive_name = 'chrome-win32.zip'
74 self._archive_extract_dir = 'chrome-win32' 74 self._archive_extract_dir = 'chrome-win32'
75 self._binary_name = 'chrome.exe' 75 self._binary_name = 'chrome.exe'
76 else: 76 else:
77 raise Exception("Invalid platform") 77 raise Exception('Invalid platform: %s' % self.platform)
78 78
79 def GetListingURL(self, marker=None): 79 def GetListingURL(self, marker=None):
80 """Returns the URL for a directory listing, with an optional marker.""" 80 """Returns the URL for a directory listing, with an optional marker."""
81 marker_param = '' 81 marker_param = ''
82 if marker: 82 if marker:
83 marker_param = '&marker=' + str(marker) 83 marker_param = '&marker=' + str(marker)
84 return BASE_URL + '/?delimiter=/&prefix=' + self._listing_platform_dir + \ 84 return BASE_URL + '/?delimiter=/&prefix=' + self._listing_platform_dir + \
85 marker_param 85 marker_param
86 86
87 def GetListingURLRecent(self):
88 """Returns the URL for a directory listing of recent builds."""
89 return BASE_URL_RECENT + '/' + self._listing_platform_dir
90
91 def GetDownloadURL(self, revision): 87 def GetDownloadURL(self, revision):
92 """Gets the download URL for a build archive of a specific revision.""" 88 """Gets the download URL for a build archive of a specific revision."""
93 if self.use_recent: 89 return "%s/%s%d/%s" % (
94 return "%s/%s%d/%s" % ( 90 BASE_URL, self._listing_platform_dir, revision, self.archive_name)
95 BASE_URL_RECENT, self._listing_platform_dir, revision,
96 self.archive_name)
97 else:
98 return "%s/%s%d/%s" % (
99 BASE_URL, self._listing_platform_dir, revision, self.archive_name)
100 91
101 def GetLastChangeURL(self): 92 def GetLastChangeURL(self):
102 """Returns a URL to the LAST_CHANGE file.""" 93 """Returns a URL to the LAST_CHANGE file."""
103 return BASE_URL + '/' + self._listing_platform_dir + 'LAST_CHANGE' 94 return BASE_URL + '/' + self._listing_platform_dir + 'LAST_CHANGE'
104 95
105 def GetLaunchPath(self): 96 def GetLaunchPath(self):
106 """Returns a relative path (presumably from the archive extraction location) 97 """Returns a relative path (presumably from the archive extraction location)
107 that is used to run the executable.""" 98 that is used to run the executable."""
108 return os.path.join(self._archive_extract_dir, self._binary_name) 99 return os.path.join(self._archive_extract_dir, self._binary_name)
109 100
101 def ParseDirectoryIndex(self):
102 """Parses the Google Storage directory listing into a list of revision
103 numbers. The range starts with self.good_revision and goes until
104 self.bad_revision."""
105
106 def _FetchAndParse(url):
107 """Fetches a URL and returns a 2-Tuple of ([revisions], next-marker). If
108 next-marker is not None, then the listing is a partial listing and another
109 fetch should be performed with next-marker being the marker= GET
110 parameter."""
111 handle = urllib.urlopen(url)
112 document = ElementTree.parse(handle)
113
114 # All nodes in the tree are namespaced. Get the root's tag name to extract
115 # the namespace. Etree does namespaces as |{namespace}tag|.
116 root_tag = document.getroot().tag
117 end_ns_pos = root_tag.find('}')
118 if end_ns_pos == -1:
119 raise Exception("Could not locate end namespace for directory index")
120 namespace = root_tag[:end_ns_pos + 1]
121
122 # Find the prefix (_listing_platform_dir) and whether or not the list is
123 # truncated.
124 prefix_len = len(document.find(namespace + 'Prefix').text)
125 next_marker = None
126 is_truncated = document.find(namespace + 'IsTruncated')
127 if is_truncated is not None and is_truncated.text.lower() == 'true':
128 next_marker = document.find(namespace + 'NextMarker').text
129
130 # Get a list of all the revisions.
131 all_prefixes = document.findall(namespace + 'CommonPrefixes/' +
132 namespace + 'Prefix')
133 # The <Prefix> nodes have content of the form of
134 # |_listing_platform_dir/revision/|. Strip off the platform dir and the
135 # trailing slash to just have a number.
136 revisions = []
137 for prefix in all_prefixes:
138 revnum = prefix.text[prefix_len:-1]
139 try:
140 revnum = int(revnum)
141 revisions.append(revnum)
142 except ValueError:
143 pass
144 return (revisions, next_marker)
145
146 # Fetch the first list of revisions.
147 (revisions, next_marker) = _FetchAndParse(self.GetListingURL())
148
149 # If the result list was truncated, refetch with the next marker. Do this
150 # until an entire directory listing is done.
151 while next_marker:
152 next_url = self.GetListingURL(next_marker)
153 (new_revisions, next_marker) = _FetchAndParse(next_url)
154 revisions.extend(new_revisions)
155
156 return revisions
157
158 def GetRevList(self):
159 """Gets the list of revision numbers between self.good_revision and
160 self.bad_revision."""
161 # Download the revlist and filter for just the range between good and bad.
162 minrev = self.good_revision
163 maxrev = self.bad_revision
164 revlist = map(int, self.ParseDirectoryIndex())
165 revlist = [x for x in revlist if x >= minrev and x <= maxrev]
166 revlist.sort()
167 return revlist
168
110 169
111 def UnzipFilenameToDir(filename, dir): 170 def UnzipFilenameToDir(filename, dir):
112 """Unzip |filename| to directory |dir|.""" 171 """Unzip |filename| to directory |dir|."""
172 cwd = os.getcwd()
173 if not os.path.isabs(filename):
174 filename = os.path.join(cwd, filename)
113 zf = zipfile.ZipFile(filename) 175 zf = zipfile.ZipFile(filename)
114 # Make base. 176 # Make base.
115 pushd = os.getcwd()
116 try: 177 try:
117 if not os.path.isdir(dir): 178 if not os.path.isdir(dir):
118 os.mkdir(dir) 179 os.mkdir(dir)
119 os.chdir(dir) 180 os.chdir(dir)
120 # Extract files. 181 # Extract files.
121 for info in zf.infolist(): 182 for info in zf.infolist():
122 name = info.filename 183 name = info.filename
123 if name.endswith('/'): # dir 184 if name.endswith('/'): # dir
124 if not os.path.isdir(name): 185 if not os.path.isdir(name):
125 os.makedirs(name) 186 os.makedirs(name)
126 else: # file 187 else: # file
127 dir = os.path.dirname(name) 188 dir = os.path.dirname(name)
128 if not os.path.isdir(dir): 189 if not os.path.isdir(dir):
129 os.makedirs(dir) 190 os.makedirs(dir)
130 out = open(name, 'wb') 191 out = open(name, 'wb')
131 out.write(zf.read(name)) 192 out.write(zf.read(name))
132 out.close() 193 out.close()
133 # Set permissions. Permission info in external_attr is shifted 16 bits. 194 # Set permissions. Permission info in external_attr is shifted 16 bits.
134 os.chmod(name, info.external_attr >> 16L) 195 os.chmod(name, info.external_attr >> 16L)
135 os.chdir(pushd) 196 os.chdir(cwd)
136 except Exception, e: 197 except Exception, e:
137 print >>sys.stderr, e 198 print >>sys.stderr, e
138 sys.exit(1) 199 sys.exit(1)
139 200
140 201
141 def ParseDirectoryIndex(context): 202 def FetchRevision(context, rev, filename, quit_event=None):
142 """Parses the Google Storage directory listing into a list of revision 203 """Downloads and unzips revision |rev|.
143 numbers. The range starts with context.good_revision and goes until the latest 204 @param context A PathContext instance.
144 revision.""" 205 @param rev The Chromium revision number/tag to download.
145 def _FetchAndParse(url): 206 @param filename The destination for the downloaded file.
146 """Fetches a URL and returns a 2-Tuple of ([revisions], next-marker). If 207 @param quit_event A threading.Event which will be set by the master thread to
147 next-marker is not None, then the listing is a partial listing and another 208 indicate that the download should be aborted.
148 fetch should be performed with next-marker being the marker= GET 209 """
149 parameter.""" 210 def ReportHook(blocknum, blocksize, totalsize):
150 handle = urllib.urlopen(url) 211 if quit_event and quit_event.is_set():
151 document = ElementTree.parse(handle) 212 raise RuntimeError("Aborting download of revision %d" % rev)
152 213
153 # All nodes in the tree are namespaced. Get the root's tag name to extract 214 download_url = context.GetDownloadURL(rev)
154 # the namespace. Etree does namespaces as |{namespace}tag|. 215 try:
155 root_tag = document.getroot().tag 216 urllib.urlretrieve(download_url, filename, ReportHook)
156 end_ns_pos = root_tag.find('}') 217 except RuntimeError, e:
157 if end_ns_pos == -1: 218 pass
158 raise Exception("Could not locate end namespace for directory index") 219
159 namespace = root_tag[:end_ns_pos + 1] 220
160 221 def RunRevision(context, revision, zipfile, profile, args):
161 # Find the prefix (_listing_platform_dir) and whether or not the list is 222 """Given a zipped revision, unzip it and run the test."""
162 # truncated. 223 print "Trying revision %d..." % revision
163 prefix_len = len(document.find(namespace + 'Prefix').text) 224
164 next_marker = None 225 # Create a temp directory and unzip the revision into it
165 is_truncated = document.find(namespace + 'IsTruncated')
166 if is_truncated is not None and is_truncated.text.lower() == 'true':
167 next_marker = document.find(namespace + 'NextMarker').text
168
169 # Get a list of all the revisions.
170 all_prefixes = document.findall(namespace + 'CommonPrefixes/' +
171 namespace + 'Prefix')
172 # The <Prefix> nodes have content of the form of
173 # |_listing_platform_dir/revision/|. Strip off the platform dir and the
174 # trailing slash to just have a number.
175 revisions = []
176 for prefix in all_prefixes:
177 revnum = prefix.text[prefix_len:-1]
178 try:
179 revnum = int(revnum)
180 revisions.append(revnum)
181 except ValueError:
182 pass
183 return (revisions, next_marker)
184
185 # Fetch the first list of revisions.
186 (revisions, next_marker) = _FetchAndParse(context.GetListingURL())
187 # If the result list was truncated, refetch with the next marker. Do this
188 # until an entire directory listing is done.
189 while next_marker:
190 (new_revisions, next_marker) = _FetchAndParse(
191 context.GetListingURL(next_marker))
192 revisions.extend(new_revisions)
193
194 return revisions
195
196
197 def ParseDirectoryIndexRecent(context):
198 """Parses the recent builds directory listing into a list of revision
199 numbers."""
200 handle = urllib.urlopen(context.GetListingURLRecent())
201 document = handle.read()
202
203 # Looking for: <a href="92976/">92976/</a>
204 return re.findall(r"<a href=\"(\d+)/\">\1/</a>", document)
205
206
207 def FilterRevList(context, revlist):
208 """Filter revlist to the revisions between |good_revision| and
209 |bad_revision| of the |context|."""
210 # Download the revlist and filter for just the range between good and bad.
211 rev_range = range(context.good_revision, context.bad_revision)
212 revlist = filter(lambda r: r in rev_range, revlist)
213 revlist.sort()
214 return revlist
215
216
217 def TryRevision(context, rev, profile, args):
218 """Downloads revision |rev|, unzips it, and opens it for the user to test.
219 |profile| is the profile to use."""
220 # Do this in a temp dir so we don't collide with user files.
221 cwd = os.getcwd() 226 cwd = os.getcwd()
222 tempdir = tempfile.mkdtemp(prefix='bisect_tmp') 227 tempdir = tempfile.mkdtemp(prefix='bisect_tmp')
228 UnzipFilenameToDir(zipfile, tempdir)
223 os.chdir(tempdir) 229 os.chdir(tempdir)
224 230
225 # Download the file. 231 # Run the test
Robert Sesek 2011/07/29 19:19:47 All comments must have proper punctuation. Here an
szager1 2011/07/29 20:22:55 Done.
Robert Sesek 2011/07/29 22:36:11 Nope. Please comb the CL when a reviewer says "thr
226 download_url = context.GetDownloadURL(rev) 232 testargs = [context.GetLaunchPath(), '--user-data-dir=%s' % profile] + args
227 def _ReportHook(blocknum, blocksize, totalsize): 233 subproc = subprocess.Popen(testargs,
228 size = blocknum * blocksize 234 bufsize=-1,
229 if totalsize == -1: # Total size not known. 235 stdout=subprocess.PIPE,
230 progress = "Received %d bytes" % size 236 stderr=subprocess.PIPE)
231 else: 237 (stdout, stderr) = subproc.communicate()
232 size = min(totalsize, size)
233 progress = "Received %d of %d bytes, %.2f%%" % (
234 size, totalsize, 100.0 * size / totalsize)
235 # Send a \r to let all progress messages use just one line of output.
236 sys.stdout.write("\r" + progress)
237 sys.stdout.flush()
238 try:
239 print 'Fetching ' + download_url
240 urllib.urlretrieve(download_url, context.archive_name, _ReportHook)
241 print
242 # Throw an exception if the download was less than 1000 bytes.
243 if os.path.getsize(context.archive_name) < 1000: raise Exception()
244 except Exception, e:
245 print('Could not retrieve the download. Sorry.')
246 sys.exit(-1)
247
248 # Unzip the file.
249 print 'Unzipping ...'
250 UnzipFilenameToDir(context.archive_name, os.curdir)
251
252 # Tell the system to open the app.
253 args = ['--user-data-dir=%s' % profile] + args
254 flags = ' '.join(map(pipes.quote, args))
255 cmd = '%s %s' % (context.GetLaunchPath(), flags)
256 print 'Running %s' % cmd
257 os.system(cmd)
258 238
259 os.chdir(cwd) 239 os.chdir(cwd)
260 print 'Cleaning temp dir ...'
261 try: 240 try:
262 shutil.rmtree(tempdir, True) 241 shutil.rmtree(tempdir, True)
263 except Exception, e: 242 except Exception, e:
264 pass 243 pass
265 244
266 245 return (subproc.returncode, stdout, stderr)
267 def AskIsGoodBuild(rev): 246
247 def AskIsGoodBuild(rev, status, stdout, stderr):
268 """Ask the user whether build |rev| is good or bad.""" 248 """Ask the user whether build |rev| is good or bad."""
269 # Loop until we get a response that we can parse. 249 # Loop until we get a response that we can parse.
270 while True: 250 while True:
271 response = raw_input('\nBuild %d is [(g)ood/(b)ad]: ' % int(rev)) 251 response = raw_input('\nRevision %d is [(g)ood/(b)ad/(q)uit]: ' % int(rev))
272 if response and response in ('g', 'b'): 252 if response and response in ('g', 'b'):
273 return response == 'g' 253 return response == 'g'
274 254 if response and response == 'q':
275 255 raise SystemExit()
276 def Bisect(revlist, 256
277 context, 257 def Bisect(platform,
258 good_rev=0,
259 bad_rev=0,
278 try_args=(), 260 try_args=(),
279 profile='profile', 261 profile=None,
280 predicate=AskIsGoodBuild): 262 predicate=AskIsGoodBuild):
281 """Tries to find the exact commit where a regression was introduced by 263 """Given known good and known bad revisions, run a binary search on all
282 running a binary search on all archived builds in a given revision range. 264 archived revisions to determine the last known good revision.
283 265
284 @param revlist A list of chromium revision numbers to check. 266 @param platform Which build to download/run ('mac', 'win', 'linux64', etc.).
285 @param context A PathContext object. 267 @param good_rev Number/tag of the last known good revision.
286 @param try_args A tuple of arguments to pass to the predicate function. 268 @param bad_rev Number/tag of the first known bad revision.
287 @param profile The user profile with which to run chromium. 269 @param try_args A tuple of arguments to pass to the test application.
270 @param profile The name of the user profile to run with.
288 @param predicate A predicate function which returns True iff the argument 271 @param predicate A predicate function which returns True iff the argument
289 chromium revision is good. 272 chromium revision is good.
273
274 Threading is used to fetch Chromium revisions in the background, speeding up
275 the user's experience. For example, suppose the bounds of the search are
276 good_rev=0, bad_rev=100. The first revision we'd like to check is 50.
277 Depending on whether revision 50 is good or bad, the next revision to check
278 will be either 25 or 75. So, while revision 50 is being checked, the script
279 will download revisions 25 and 75 in the background. Once the good/bad verdict
280 on rev 50 is known:
281
282 - If rev 50 is good, we cancel the download of rev 25, and run the next test
283 on rev 75.
284
285 - If rev 50 is bad, we cancel the download of rev 75, and run the next test
286 on rev 25.
290 """ 287 """
291 288
289 if not profile:
290 profile = 'profile'
291
292 context = PathContext(platform, good_rev, bad_rev)
293 cwd = os.getcwd()
294
295 _GetDownloadPath = lambda rev: os.path.join(cwd,
296 '%d-%s' % (rev, context.archive_name))
Robert Sesek 2011/07/29 19:19:47 nit: indent 4 for line wraps
szager1 2011/07/29 20:22:55 Done.
297
298 revlist = context.GetRevList()
299
300 # Get a list of revisions to bisect across.
301 if len(revlist) < 2: # Don't have enough builds to bisect
302 msg = 'We don\'t have enough builds to bisect. revlist: %s' % revlist
303 raise RuntimeError(msg)
304
305 # Figure out our bookends and first pivot point; fetch the pivot revision
292 good = 0 306 good = 0
293 bad = len(revlist) - 1 307 bad = len(revlist) - 1
294 last_known_good_rev = revlist[good] 308 pivot = bad / 2
295 first_known_bad_rev = revlist[bad] 309 rev = revlist[pivot]
310 zipfile = _GetDownloadPath(rev)
311 print "Downloading revision %d..." % rev
312 FetchRevision(context, rev, zipfile)
296 313
297 # Binary search time! 314 # Binary search time!
298 while good < bad: 315 while zipfile and bad - good > 1:
299 candidates = revlist[good:bad] 316 # Pre-fetch next two possible pivots
300 num_poss = len(candidates) 317 # - down_pivot is the next revision to check if the current revision turns
301 if num_poss > 10: 318 # out to be bad.
302 print('%d candidates. %d tries left.' % 319 # - up_pivot is the next revision to check if the current revision turns
303 (num_poss, round(math.log(num_poss, 2)))) 320 # out to be good.
304 else: 321 down_pivot = int((pivot - good) / 2) + good
305 print('Candidates: %s' % revlist[good:bad]) 322 down_thread = None
306 323 if down_pivot != pivot and down_pivot != good:
307 # Cut the problem in half... 324 down_rev = revlist[down_pivot]
308 test = int((bad - good) / 2) + good 325 down_zipfile = _GetDownloadPath(down_rev)
309 test_rev = revlist[test] 326 down_event = threading.Event()
310 327 fetchargs = (context, down_rev, down_zipfile, down_event)
311 # Let the user give this rev a spin (in her own profile, if she wants). 328 down_thread = threading.Thread(target=FetchRevision,
312 TryRevision(context, test_rev, profile, try_args) 329 name='down_fetch',
313 if predicate(test_rev): 330 args=fetchargs)
314 last_known_good_rev = test_rev 331 down_thread.start()
315 good = test + 1 332
316 else: 333 up_pivot = int((bad - pivot) / 2) + pivot
317 bad = test 334 up_thread = None
318 335 if up_pivot != pivot and up_pivot != bad:
319 return (last_known_good_rev, first_known_bad_rev) 336 up_rev = revlist[up_pivot]
337 up_zipfile = _GetDownloadPath(up_rev)
338 up_event = threading.Event()
339 fetchargs = (context, up_rev, up_zipfile, up_event)
340 up_thread = threading.Thread(target=FetchRevision,
341 name='up_fetch',
342 args=fetchargs)
343 up_thread.start()
344
345 # Run test on the pivot revision.
346 (status, stdout, stderr) = RunRevision(context,
347 rev,
348 zipfile,
349 profile,
350 try_args)
351 os.unlink(zipfile)
352 zipfile = None
353
354 # Call the predicate function to see if the current revision is good or bad.
355 # On that basis, kill one of the background downloads and complete the
356 # other, as described in the comments above.
357 try:
358 if predicate(rev, status, stdout, stderr):
359 good = pivot
360 if down_thread:
361 down_event.set() # Kill the download of older revision
362 down_thread.join()
363 os.unlink(down_zipfile)
364 if up_thread:
365 print "Downloading revision %d..." % up_rev
366 up_thread.join() # Wait for newer revision to finish downloading
367 pivot = up_pivot
368 zipfile = up_zipfile
369 else:
370 bad = pivot
371 if up_thread:
372 up_event.set() # Kill download of newer revision
373 up_thread.join()
374 os.unlink(up_zipfile)
375 if down_thread:
376 print "Downloading revision %d..." % down_rev
377 down_thread.join() # Wait for older revision to finish downloading
378 pivot = down_pivot
379 zipfile = down_zipfile
380 except SystemExit:
381 for f in [down_zipfile, up_zipfile]:
382 try:
383 os.unlink(f)
384 except OSError:
385 pass
386 sys.exit(0)
387
388 rev = revlist[pivot]
389
390 return (revlist[good], revlist[bad])
320 391
321 392
322 def main(): 393 def main():
323 usage = ('%prog [options] [-- chromium-options]\n' 394 usage = ('%prog [options] [-- chromium-options]\n'
324 'Perform binary search on the snapshot builds.\n' 395 'Perform binary search on the snapshot builds.\n'
325 '\n' 396 '\n'
326 'Tip: add "-- --no-first-run" to bypass the first run prompts.') 397 'Tip: add "-- --no-first-run" to bypass the first run prompts.')
327 parser = optparse.OptionParser(usage=usage) 398 parser = optparse.OptionParser(usage=usage)
328 # Strangely, the default help output doesn't include the choice list. 399 # Strangely, the default help output doesn't include the choice list.
329 choices = ['mac', 'win', 'linux', 'linux64'] 400 choices = ['mac', 'win', 'linux', 'linux64']
(...skipping 17 matching lines...) Expand all
347 parser.print_help() 418 parser.print_help()
348 return 1 419 return 1
349 420
350 if opts.bad and opts.good and (opts.good > opts.bad): 421 if opts.bad and opts.good and (opts.good > opts.bad):
351 print ('The good revision (%d) must precede the bad revision (%d).\n' % 422 print ('The good revision (%d) must precede the bad revision (%d).\n' %
352 (opts.good, opts.bad)) 423 (opts.good, opts.bad))
353 parser.print_help() 424 parser.print_help()
354 return 1 425 return 1
355 426
356 # Create the context. Initialize 0 for the revisions as they are set below. 427 # Create the context. Initialize 0 for the revisions as they are set below.
357 context = PathContext(opts.archive, 0, 0, use_recent=False) 428 context = PathContext(opts.archive, 0, 0)
358 429
359 # Pick a starting point, try to get HEAD for this. 430 # Pick a starting point, try to get HEAD for this.
360 if opts.bad: 431 if opts.bad:
361 bad_rev = opts.bad 432 bad_rev = opts.bad
362 else: 433 else:
363 bad_rev = 0 434 bad_rev = 0
364 try: 435 try:
365 # Location of the latest build revision number 436 # Location of the latest build revision number
366 nh = urllib.urlopen(context.GetLastChangeURL()) 437 nh = urllib.urlopen(context.GetLastChangeURL())
367 latest = int(nh.read()) 438 latest = int(nh.read())
368 nh.close() 439 nh.close()
369 bad_rev = raw_input('Bad revision [HEAD:%d]: ' % latest) 440 bad_rev = raw_input('Bad revision [HEAD:%d]: ' % latest)
370 if (bad_rev == ''): 441 if (bad_rev == ''):
371 bad_rev = latest 442 bad_rev = latest
372 bad_rev = int(bad_rev) 443 bad_rev = int(bad_rev)
373 except Exception, e: 444 except Exception, e:
374 print('Could not determine latest revision. This could be bad...') 445 print('Could not determine latest revision. This could be bad...')
375 bad_rev = int(raw_input('Bad revision: ')) 446 bad_rev = int(raw_input('Bad revision: '))
376 447
377 # Find out when we were good. 448 # Find out when we were good.
378 if opts.good: 449 if opts.good:
379 good_rev = opts.good 450 good_rev = opts.good
380 else: 451 else:
381 good_rev = 0 452 good_rev = 0
382 try: 453 try:
383 good_rev = int(raw_input('Last known good [0]: ')) 454 good_rev = int(raw_input('Last known good [0]: '))
384 except Exception, e: 455 except Exception, e:
385 pass 456 pass
386 457
387 # Set the input parameters now that they've been validated.
388 context.good_revision = good_rev
389 context.bad_revision = bad_rev
390
391 # Get recent revision list and check whether it's sufficient.
392 all_revs_recent = map(int, ParseDirectoryIndexRecent(context))
393 all_revs_recent.sort()
394 # Skipping 0 since it might be deleted off the server soon:
395 all_revs_recent = all_revs_recent[1:]
396 oldest_recent_rev = all_revs_recent[0]
397 if good_rev >= oldest_recent_rev:
398 # The range is within recent builds, so switch on use_recent.
399 context.use_recent = True
400 elif bad_rev >= oldest_recent_rev:
401 # The range spans both old and recent builds.
402 # If oldest_recent_rev is good, we bisect the recent builds.
403 context.use_recent = True
404 TryRevision(context, oldest_recent_rev, opts.profile, args)
405 if AskIsGoodBuild(oldest_recent_rev):
406 # context.use_recent is True
407 context.good_revision = oldest_recent_rev
408 else:
409 context.use_recent = False
410 context.bad_revision = oldest_recent_rev
411
412 all_revs = []
413 if context.use_recent:
414 all_revs = all_revs_recent
415 else:
416 all_revs = map(int, ParseDirectoryIndex(context))
417
418 # Filter list of revisions to bisect across.
419 revlist = FilterRevList(context, all_revs)
420 if len(revlist) < 2: # Don't have enough builds to bisect
421 print 'We don\'t have enough builds to bisect. revlist: %s' % revlist
422 sys.exit(1)
423
424 (last_known_good_rev, first_known_bad_rev) = Bisect( 458 (last_known_good_rev, first_known_bad_rev) = Bisect(
425 revlist, context, args, opts.profile) 459 opts.archive, good_rev, bad_rev, args, opts.profile)
426 460
427 # We're done. Let the user know the results in an official manner. 461 # We're done. Let the user know the results in an official manner.
428 print('You are probably looking for build %d.' % first_known_bad_rev) 462 print('You are probably looking for build %d.' % first_known_bad_rev)
429 print('CHANGELOG URL:') 463 print('CHANGELOG URL:')
430 print(CHANGELOG_URL % (last_known_good_rev, first_known_bad_rev)) 464 print(CHANGELOG_URL % (last_known_good_rev, first_known_bad_rev))
431 print('Built at revision:') 465 print('Built at revision:')
432 print(BUILD_VIEWVC_URL % first_known_bad_rev) 466 print(BUILD_VIEWVC_URL % first_known_bad_rev)
433 467
434 if __name__ == '__main__': 468 if __name__ == '__main__':
435 sys.exit(main()) 469 sys.exit(main())
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698