OLD | NEW |
---|---|
1 #!/usr/bin/python | 1 #!/usr/bin/python |
2 # Copyright (c) 2011 The Chromium Authors. All rights reserved. | 2 # Copyright (c) 2011 The Chromium Authors. All rights reserved. |
3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
5 | 5 |
6 """Snapshot Build Bisect Tool | 6 """Snapshot Build Bisect Tool |
7 | 7 |
8 This script bisects a snapshot archive using binary search. It starts at | 8 This script bisects a snapshot archive using binary search. It starts at |
9 a bad revision (it will try to guess HEAD) and asks for a last known-good | 9 a bad revision (it will try to guess HEAD) and asks for a last known-good |
10 revision. It will then binary search across this revision range by downloading, | 10 revision. It will then binary search across this revision range by downloading, |
11 unzipping, and opening Chromium for you. After testing the specific revision, | 11 unzipping, and opening Chromium for you. After testing the specific revision, |
12 it will ask you whether it is good or bad before continuing the search. | 12 it will ask you whether it is good or bad before continuing the search. |
13 """ | 13 """ |
14 | 14 |
15 # The root URL for storage. | 15 # The root URL for storage. |
16 BASE_URL = 'http://commondatastorage.googleapis.com/chromium-browser-snapshots' | 16 BASE_URL = 'http://commondatastorage.googleapis.com/chromium-browser-snapshots' |
17 BASE_URL_RECENT = 'http://build.chromium.org/f/chromium/snapshots' | |
18 | 17 |
19 # URL to the ViewVC commit page. | 18 # URL to the ViewVC commit page. |
20 BUILD_VIEWVC_URL = 'http://src.chromium.org/viewvc/chrome?view=rev&revision=%d' | 19 BUILD_VIEWVC_URL = 'http://src.chromium.org/viewvc/chrome?view=rev&revision=%d' |
21 | 20 |
22 # Changelogs URL. | 21 # Changelogs URL. |
23 CHANGELOG_URL = 'http://build.chromium.org/f/chromium/' \ | 22 CHANGELOG_URL = 'http://build.chromium.org/f/chromium/' \ |
24 'perf/dashboard/ui/changelog.html?url=/trunk/src&range=%d:%d' | 23 'perf/dashboard/ui/changelog.html?url=/trunk/src&range=%d:%d' |
25 | 24 |
26 ############################################################################### | 25 ############################################################################### |
27 | 26 |
28 import math | 27 import math |
29 import optparse | 28 import optparse |
30 import os | 29 import os |
31 import pipes | 30 import pipes |
32 import re | 31 import re |
33 import shutil | 32 import shutil |
33 import subprocess | |
34 import sys | 34 import sys |
35 import tempfile | 35 import tempfile |
36 import threading | |
36 import urllib | 37 import urllib |
37 from xml.etree import ElementTree | 38 from xml.etree import ElementTree |
38 import zipfile | 39 import zipfile |
39 | 40 |
40 class PathContext(object): | 41 class PathContext(object): |
41 """A PathContext is used to carry the information used to construct URLs and | 42 """A PathContext is used to carry the information used to construct URLs and |
42 paths when dealing with the storage server and archives.""" | 43 paths when dealing with the storage server and archives.""" |
43 def __init__(self, platform, good_revision, bad_revision, use_recent): | 44 def __init__(self, platform, good_revision, bad_revision): |
44 super(PathContext, self).__init__() | 45 super(PathContext, self).__init__() |
45 # Store off the input parameters. | 46 # Store off the input parameters. |
46 self.platform = platform # What's passed in to the '-a/--archive' option. | 47 self.platform = platform # What's passed in to the '-a/--archive' option. |
47 self.good_revision = good_revision | 48 self.good_revision = good_revision |
48 self.bad_revision = bad_revision | 49 self.bad_revision = bad_revision |
49 self.use_recent = use_recent | |
50 | 50 |
51 # The name of the ZIP file in a revision directory on the server. | 51 # The name of the ZIP file in a revision directory on the server. |
52 self.archive_name = None | 52 self.archive_name = None |
53 | 53 |
54 # Set some internal members: | 54 # Set some internal members: |
55 # _listing_platform_dir = Directory that holds revisions. Ends with a '/'. | 55 # _listing_platform_dir = Directory that holds revisions. Ends with a '/'. |
56 # _archive_extract_dir = Uncompressed directory in the archive_name file. | 56 # _archive_extract_dir = Uncompressed directory in the archive_name file. |
57 # _binary_name = The name of the executable to run. | 57 # _binary_name = The name of the executable to run. |
58 if self.platform == 'linux' or self.platform == 'linux64': | 58 if self.platform == 'linux' or self.platform == 'linux64': |
59 self._listing_platform_dir = 'Linux/' | 59 self._listing_platform_dir = 'Linux/' |
60 self.archive_name = 'chrome-linux.zip' | 60 self.archive_name = 'chrome-linux.zip' |
61 self._archive_extract_dir = 'chrome-linux' | 61 self._archive_extract_dir = 'chrome-linux' |
62 self._binary_name = 'chrome' | 62 self._binary_name = 'chrome' |
63 # Linux and x64 share all the same path data except for the archive dir. | 63 # Linux and x64 share all the same path data except for the archive dir. |
64 if self.platform == 'linux64': | 64 if self.platform == 'linux64': |
65 self._listing_platform_dir = 'Linux_x64/' | 65 self._listing_platform_dir = 'Linux_x64/' |
66 elif self.platform == 'mac': | 66 elif self.platform == 'mac': |
67 self._listing_platform_dir = 'Mac/' | 67 self._listing_platform_dir = 'Mac/' |
68 self.archive_name = 'chrome-mac.zip' | 68 self.archive_name = 'chrome-mac.zip' |
69 self._archive_extract_dir = 'chrome-mac' | 69 self._archive_extract_dir = 'chrome-mac' |
70 self._binary_name = 'Chromium.app/Contents/MacOS/Chromium' | 70 self._binary_name = 'Chromium.app/Contents/MacOS/Chromium' |
71 elif self.platform == 'win': | 71 elif self.platform == 'win': |
72 self._listing_platform_dir = 'Win/' | 72 self._listing_platform_dir = 'Win/' |
73 self.archive_name = 'chrome-win32.zip' | 73 self.archive_name = 'chrome-win32.zip' |
74 self._archive_extract_dir = 'chrome-win32' | 74 self._archive_extract_dir = 'chrome-win32' |
75 self._binary_name = 'chrome.exe' | 75 self._binary_name = 'chrome.exe' |
76 else: | 76 else: |
77 raise Exception("Invalid platform") | 77 raise Exception('Invalid platform: %s' % self.platform) |
78 | 78 |
79 def GetListingURL(self, marker=None): | 79 def GetListingURL(self, marker=None): |
80 """Returns the URL for a directory listing, with an optional marker.""" | 80 """Returns the URL for a directory listing, with an optional marker.""" |
81 marker_param = '' | 81 marker_param = '' |
82 if marker: | 82 if marker: |
83 marker_param = '&marker=' + str(marker) | 83 marker_param = '&marker=' + str(marker) |
84 return BASE_URL + '/?delimiter=/&prefix=' + self._listing_platform_dir + \ | 84 return BASE_URL + '/?delimiter=/&prefix=' + self._listing_platform_dir + \ |
85 marker_param | 85 marker_param |
86 | 86 |
87 def GetListingURLRecent(self): | |
88 """Returns the URL for a directory listing of recent builds.""" | |
89 return BASE_URL_RECENT + '/' + self._listing_platform_dir | |
90 | |
91 def GetDownloadURL(self, revision): | 87 def GetDownloadURL(self, revision): |
92 """Gets the download URL for a build archive of a specific revision.""" | 88 """Gets the download URL for a build archive of a specific revision.""" |
93 if self.use_recent: | 89 return "%s/%s%d/%s" % ( |
94 return "%s/%s%d/%s" % ( | 90 BASE_URL, self._listing_platform_dir, revision, self.archive_name) |
95 BASE_URL_RECENT, self._listing_platform_dir, revision, | |
96 self.archive_name) | |
97 else: | |
98 return "%s/%s%d/%s" % ( | |
99 BASE_URL, self._listing_platform_dir, revision, self.archive_name) | |
100 | 91 |
101 def GetLastChangeURL(self): | 92 def GetLastChangeURL(self): |
102 """Returns a URL to the LAST_CHANGE file.""" | 93 """Returns a URL to the LAST_CHANGE file.""" |
103 return BASE_URL + '/' + self._listing_platform_dir + 'LAST_CHANGE' | 94 return BASE_URL + '/' + self._listing_platform_dir + 'LAST_CHANGE' |
104 | 95 |
105 def GetLaunchPath(self): | 96 def GetLaunchPath(self): |
106 """Returns a relative path (presumably from the archive extraction location) | 97 """Returns a relative path (presumably from the archive extraction location) |
107 that is used to run the executable.""" | 98 that is used to run the executable.""" |
108 return os.path.join(self._archive_extract_dir, self._binary_name) | 99 return os.path.join(self._archive_extract_dir, self._binary_name) |
109 | 100 |
101 def ParseDirectoryIndex(self): | |
102 """Parses the Google Storage directory listing into a list of revision | |
103 numbers. The range starts with self.good_revision and goes until | |
104 self.bad_revision.""" | |
105 | |
106 def _FetchAndParse(url): | |
107 """Fetches a URL and returns a 2-Tuple of ([revisions], next-marker). If | |
108 next-marker is not None, then the listing is a partial listing and another | |
109 fetch should be performed with next-marker being the marker= GET | |
110 parameter.""" | |
111 handle = urllib.urlopen(url) | |
112 document = ElementTree.parse(handle) | |
113 | |
114 # All nodes in the tree are namespaced. Get the root's tag name to extract | |
115 # the namespace. Etree does namespaces as |{namespace}tag|. | |
116 root_tag = document.getroot().tag | |
117 end_ns_pos = root_tag.find('}') | |
118 if end_ns_pos == -1: | |
119 raise Exception("Could not locate end namespace for directory index") | |
120 namespace = root_tag[:end_ns_pos + 1] | |
121 | |
122 # Find the prefix (_listing_platform_dir) and whether or not the list is | |
123 # truncated. | |
124 prefix_len = len(document.find(namespace + 'Prefix').text) | |
125 next_marker = None | |
126 is_truncated = document.find(namespace + 'IsTruncated') | |
127 if is_truncated is not None and is_truncated.text.lower() == 'true': | |
128 next_marker = document.find(namespace + 'NextMarker').text | |
129 | |
130 # Get a list of all the revisions. | |
131 all_prefixes = document.findall(namespace + 'CommonPrefixes/' + | |
132 namespace + 'Prefix') | |
133 # The <Prefix> nodes have content of the form of | |
134 # |_listing_platform_dir/revision/|. Strip off the platform dir and the | |
135 # trailing slash to just have a number. | |
136 revisions = [] | |
137 for prefix in all_prefixes: | |
138 revnum = prefix.text[prefix_len:-1] | |
139 try: | |
140 revnum = int(revnum) | |
141 revisions.append(revnum) | |
142 except ValueError: | |
143 pass | |
144 return (revisions, next_marker) | |
145 | |
146 # Fetch the first list of revisions. | |
147 (revisions, next_marker) = _FetchAndParse(self.GetListingURL()) | |
148 | |
149 # If the result list was truncated, refetch with the next marker. Do this | |
150 # until an entire directory listing is done. | |
151 while next_marker: | |
152 next_url = self.GetListingURL(next_marker) | |
153 (new_revisions, next_marker) = _FetchAndParse(next_url) | |
154 revisions.extend(new_revisions) | |
155 | |
156 return revisions | |
157 | |
158 def GetRevList(self): | |
159 """Gets the list of revision numbers between self.good_revision and | |
160 self.bad_revision.""" | |
161 # Download the revlist and filter for just the range between good and bad. | |
162 minrev = self.good_revision | |
163 maxrev = self.bad_revision | |
164 revlist = map(int, self.ParseDirectoryIndex()) | |
165 revlist = [x for x in revlist if x >= minrev and x <= maxrev] | |
166 revlist.sort() | |
167 return revlist | |
168 | |
110 | 169 |
111 def UnzipFilenameToDir(filename, dir): | 170 def UnzipFilenameToDir(filename, dir): |
112 """Unzip |filename| to directory |dir|.""" | 171 """Unzip |filename| to directory |dir|.""" |
172 cwd = os.getcwd() | |
173 if not os.path.isabs(filename): | |
174 filename = os.path.join(cwd, filename) | |
113 zf = zipfile.ZipFile(filename) | 175 zf = zipfile.ZipFile(filename) |
114 # Make base. | 176 # Make base. |
115 pushd = os.getcwd() | |
116 try: | 177 try: |
117 if not os.path.isdir(dir): | 178 if not os.path.isdir(dir): |
118 os.mkdir(dir) | 179 os.mkdir(dir) |
119 os.chdir(dir) | 180 os.chdir(dir) |
120 # Extract files. | 181 # Extract files. |
121 for info in zf.infolist(): | 182 for info in zf.infolist(): |
122 name = info.filename | 183 name = info.filename |
123 if name.endswith('/'): # dir | 184 if name.endswith('/'): # dir |
124 if not os.path.isdir(name): | 185 if not os.path.isdir(name): |
125 os.makedirs(name) | 186 os.makedirs(name) |
126 else: # file | 187 else: # file |
127 dir = os.path.dirname(name) | 188 dir = os.path.dirname(name) |
128 if not os.path.isdir(dir): | 189 if not os.path.isdir(dir): |
129 os.makedirs(dir) | 190 os.makedirs(dir) |
130 out = open(name, 'wb') | 191 out = open(name, 'wb') |
131 out.write(zf.read(name)) | 192 out.write(zf.read(name)) |
132 out.close() | 193 out.close() |
133 # Set permissions. Permission info in external_attr is shifted 16 bits. | 194 # Set permissions. Permission info in external_attr is shifted 16 bits. |
134 os.chmod(name, info.external_attr >> 16L) | 195 os.chmod(name, info.external_attr >> 16L) |
135 os.chdir(pushd) | 196 os.chdir(cwd) |
136 except Exception, e: | 197 except Exception, e: |
137 print >>sys.stderr, e | 198 print >>sys.stderr, e |
138 sys.exit(1) | 199 sys.exit(1) |
139 | 200 |
140 | 201 |
141 def ParseDirectoryIndex(context): | 202 def FetchRevision(context, rev, filename, quit_event=None): |
142 """Parses the Google Storage directory listing into a list of revision | 203 """Downloads and unzips revision |rev|. |
143 numbers. The range starts with context.good_revision and goes until the latest | 204 @param context A PathContext instance. |
144 revision.""" | 205 @param rev The chromium revision number/tag to download. |
Robert Sesek
2011/07/26 21:00:26
nit: capitalization
szager1
2011/07/29 17:19:21
Done.
| |
145 def _FetchAndParse(url): | 206 @param filename The destination for the downloaded file. |
146 """Fetches a URL and returns a 2-Tuple of ([revisions], next-marker). If | 207 @param quit_event A threading.Event which will be set by the master thread to |
147 next-marker is not None, then the listing is a partial listing and another | 208 indicate that the download should be aborted. |
148 fetch should be performed with next-marker being the marker= GET | 209 """ |
149 parameter.""" | 210 def ReportHook(blocknum, blocksize, totalsize): |
150 handle = urllib.urlopen(url) | 211 if quit_event and quit_event.is_set(): |
151 document = ElementTree.parse(handle) | 212 raise RuntimeError("Aborting download of revision %d" % rev) |
152 | 213 |
153 # All nodes in the tree are namespaced. Get the root's tag name to extract | 214 download_url = context.GetDownloadURL(rev) |
154 # the namespace. Etree does namespaces as |{namespace}tag|. | 215 try: |
155 root_tag = document.getroot().tag | 216 urllib.urlretrieve(download_url, filename, ReportHook) |
156 end_ns_pos = root_tag.find('}') | 217 except RuntimeError, e: |
157 if end_ns_pos == -1: | 218 pass |
158 raise Exception("Could not locate end namespace for directory index") | |
159 namespace = root_tag[:end_ns_pos + 1] | |
160 | |
161 # Find the prefix (_listing_platform_dir) and whether or not the list is | |
162 # truncated. | |
163 prefix_len = len(document.find(namespace + 'Prefix').text) | |
164 next_marker = None | |
165 is_truncated = document.find(namespace + 'IsTruncated') | |
166 if is_truncated is not None and is_truncated.text.lower() == 'true': | |
167 next_marker = document.find(namespace + 'NextMarker').text | |
168 | |
169 # Get a list of all the revisions. | |
170 all_prefixes = document.findall(namespace + 'CommonPrefixes/' + | |
171 namespace + 'Prefix') | |
172 # The <Prefix> nodes have content of the form of | |
173 # |_listing_platform_dir/revision/|. Strip off the platform dir and the | |
174 # trailing slash to just have a number. | |
175 revisions = [] | |
176 for prefix in all_prefixes: | |
177 revnum = prefix.text[prefix_len:-1] | |
178 try: | |
179 revnum = int(revnum) | |
180 revisions.append(revnum) | |
181 except ValueError: | |
182 pass | |
183 return (revisions, next_marker) | |
184 | |
185 # Fetch the first list of revisions. | |
186 (revisions, next_marker) = _FetchAndParse(context.GetListingURL()) | |
187 # If the result list was truncated, refetch with the next marker. Do this | |
188 # until an entire directory listing is done. | |
189 while next_marker: | |
190 (new_revisions, next_marker) = _FetchAndParse( | |
191 context.GetListingURL(next_marker)) | |
192 revisions.extend(new_revisions) | |
193 | |
194 return revisions | |
195 | 219 |
196 | 220 |
197 def ParseDirectoryIndexRecent(context): | 221 def RunRevision(context, revision, zipfile, profile, args): |
198 """Parses the recent builds directory listing into a list of revision | 222 """Given a zipped revision, unzip it and run the test.""" |
199 numbers.""" | 223 print "Trying revision %d..." % revision |
200 handle = urllib.urlopen(context.GetListingURLRecent()) | |
201 document = handle.read() | |
202 | 224 |
203 # Looking for: <a href="92976/">92976/</a> | 225 # Create a temp directory and unzip the revision into it |
204 return re.findall(r"<a href=\"(\d+)/\">\1/</a>", document) | |
205 | |
206 | |
207 def FilterRevList(context, revlist): | |
208 """Filter revlist to the revisions between |good_revision| and | |
209 |bad_revision| of the |context|.""" | |
210 # Download the revlist and filter for just the range between good and bad. | |
211 rev_range = range(context.good_revision, context.bad_revision) | |
212 revlist = filter(lambda r: r in rev_range, revlist) | |
213 revlist.sort() | |
214 return revlist | |
215 | |
216 | |
217 def TryRevision(context, rev, profile, args): | |
218 """Downloads revision |rev|, unzips it, and opens it for the user to test. | |
219 |profile| is the profile to use.""" | |
220 # Do this in a temp dir so we don't collide with user files. | |
221 cwd = os.getcwd() | 226 cwd = os.getcwd() |
222 tempdir = tempfile.mkdtemp(prefix='bisect_tmp') | 227 tempdir = tempfile.mkdtemp(prefix='bisect_tmp') |
228 UnzipFilenameToDir(zipfile, tempdir) | |
223 os.chdir(tempdir) | 229 os.chdir(tempdir) |
224 | 230 |
225 # Download the file. | 231 # Run the test |
226 download_url = context.GetDownloadURL(rev) | 232 testargs = [context.GetLaunchPath(), '--user-data-dir=%s' % profile] + args |
227 def _ReportHook(blocknum, blocksize, totalsize): | 233 subproc = subprocess.Popen(testargs, |
228 size = blocknum * blocksize | 234 bufsize=-1, |
229 if totalsize == -1: # Total size not known. | 235 stdout=subprocess.PIPE, |
230 progress = "Received %d bytes" % size | 236 stderr=subprocess.PIPE) |
231 else: | 237 (stdout, stderr) = subproc.communicate() |
232 size = min(totalsize, size) | |
233 progress = "Received %d of %d bytes, %.2f%%" % ( | |
234 size, totalsize, 100.0 * size / totalsize) | |
235 # Send a \r to let all progress messages use just one line of output. | |
236 sys.stdout.write("\r" + progress) | |
237 sys.stdout.flush() | |
238 try: | |
239 print 'Fetching ' + download_url | |
240 urllib.urlretrieve(download_url, context.archive_name, _ReportHook) | |
241 print | |
242 # Throw an exception if the download was less than 1000 bytes. | |
243 if os.path.getsize(context.archive_name) < 1000: raise Exception() | |
244 except Exception, e: | |
245 print('Could not retrieve the download. Sorry.') | |
246 sys.exit(-1) | |
247 | |
248 # Unzip the file. | |
249 print 'Unzipping ...' | |
250 UnzipFilenameToDir(context.archive_name, os.curdir) | |
251 | |
252 # Tell the system to open the app. | |
253 args = ['--user-data-dir=%s' % profile] + args | |
254 flags = ' '.join(map(pipes.quote, args)) | |
255 cmd = '%s %s' % (context.GetLaunchPath(), flags) | |
256 print 'Running %s' % cmd | |
257 os.system(cmd) | |
258 | 238 |
259 os.chdir(cwd) | 239 os.chdir(cwd) |
260 print 'Cleaning temp dir ...' | |
261 try: | 240 try: |
262 shutil.rmtree(tempdir, True) | 241 shutil.rmtree(tempdir, True) |
263 except Exception, e: | 242 except Exception, e: |
264 pass | 243 pass |
265 | 244 |
245 return (subproc.returncode, stdout, stderr) | |
266 | 246 |
267 def AskIsGoodBuild(rev): | 247 def AskIsGoodBuild(rev, status, stdout, stderr): |
268 """Ask the user whether build |rev| is good or bad.""" | 248 """Ask the user whether build |rev| is good or bad.""" |
269 # Loop until we get a response that we can parse. | 249 # Loop until we get a response that we can parse. |
270 while True: | 250 while True: |
271 response = raw_input('\nBuild %d is [(g)ood/(b)ad]: ' % int(rev)) | 251 response = raw_input('\nRevision %d is [(g)ood/(b)ad/(q)uit]: ' % int(rev)) |
272 if response and response in ('g', 'b'): | 252 if response and response in ('g', 'b'): |
273 return response == 'g' | 253 return response == 'g' |
254 if response and response == 'q': | |
255 raise SystemExit() | |
274 | 256 |
257 def Bisect(platform, | |
258 good_rev=0, | |
259 bad_rev=0, | |
260 try_args=(), | |
261 profile=None, | |
262 predicate=AskIsGoodBuild): | |
263 """Given known good and known bad revisions, run a binary search on all | |
264 archived revisions to determine the last known good revision. | |
275 | 265 |
276 def Bisect(revlist, | 266 @param platform Which build to download/run ('mac', 'win', 'linux64', etc.). |
277 context, | 267 @param good_rev Number/tag of the last known good revision. |
278 try_args=(), | 268 @param bad_rev Number/tag of the first known bad revision. |
279 profile='profile', | 269 @param try_args A tuple of arguments to pass to the test application. |
280 predicate=AskIsGoodBuild): | 270 @param profile The name of the user profile to run with. |
281 """Tries to find the exact commit where a regression was introduced by | |
282 running a binary search on all archived builds in a given revision range. | |
283 | |
284 @param revlist A list of chromium revision numbers to check. | |
285 @param context A PathContext object. | |
286 @param try_args A tuple of arguments to pass to the predicate function. | |
287 @param profile The user profile with which to run chromium. | |
288 @param predicate A predicate function which returns True iff the argument | 271 @param predicate A predicate function which returns True iff the argument |
289 chromium revision is good. | 272 chromium revision is good. |
273 | |
274 Threading is used to fetch chromium revisions in the background, speeding up | |
Robert Sesek
2011/07/26 21:00:26
nit: capitalization
szager1
2011/07/29 17:19:21
Done.
| |
275 the user's experience. For example, suppose the bounds of the search are | |
Robert Sesek
2011/07/26 21:00:26
über-nit: this file uses single spaces between sen
szager1
2011/07/29 17:19:21
Done.
| |
276 good_rev=0, bad_rev=100. The first revision we'd like to check is 50. | |
277 Depending on whether revision 50 is good or bad, the next revision to check | |
278 will be either 25 or 75. So, while revision 50 is being checked, we download | |
279 revisions 25 and 75 in the background. Once we know the good/bad verdict on | |
280 rev 50: | |
281 | |
282 - If rev 50 is good, we cancel the download of rev 25, and run the next test | |
283 on rev 75. | |
284 | |
285 - If rev 50 is bad, we cancel the download of rev 75, and run the next test | |
286 on rev 25. | |
290 """ | 287 """ |
291 | 288 |
289 if not profile: | |
290 profile = 'profile' | |
291 | |
292 context = PathContext(platform, good_rev, bad_rev) | |
293 cwd = os.getcwd() | |
294 | |
295 GetDownloadPath = lambda rev: os.path.join(cwd, '%d-%s' % ( | |
Robert Sesek
2011/07/26 21:00:26
Can you break this after the comma? That's probabl
szager1
2011/07/29 17:19:21
Done.
| |
296 rev, context.archive_name)) | |
297 | |
298 revlist = context.GetRevList() | |
299 | |
300 # Get a list of revisions to bisect across. | |
301 if len(revlist) < 2: # Don't have enough builds to bisect | |
302 msg = 'We don\'t have enough builds to bisect. revlist: %s' % revlist | |
303 raise RuntimeError(msg) | |
304 | |
305 # Figure out our bookends and first pivot point; fetch the pivot revision | |
292 good = 0 | 306 good = 0 |
293 bad = len(revlist) - 1 | 307 bad = len(revlist) - 1 |
294 last_known_good_rev = revlist[good] | 308 pivot = bad / 2 |
295 first_known_bad_rev = revlist[bad] | 309 rev = revlist[pivot] |
310 zipfile = GetDownloadPath(rev) | |
311 print "Downloading revision %d..." % rev | |
312 FetchRevision(context, rev, zipfile) | |
296 | 313 |
297 # Binary search time! | 314 # Binary search time! |
298 while good < bad: | 315 while zipfile and bad - good > 1: |
299 candidates = revlist[good:bad] | 316 # Pre-fetch next two possible pivots |
300 num_poss = len(candidates) | 317 # - down_pivot is the next revision to check if the current revision turns |
301 if num_poss > 10: | 318 # out to be bad. |
302 print('%d candidates. %d tries left.' % | 319 # - up_pivot is the next revision to check if the current revision turns |
303 (num_poss, round(math.log(num_poss, 2)))) | 320 # out to be good. |
304 else: | 321 down_pivot = int((pivot - good) / 2) + good |
305 print('Candidates: %s' % revlist[good:bad]) | 322 down_thread = None |
323 if down_pivot != pivot and down_pivot != good: | |
324 down_rev = revlist[down_pivot] | |
325 down_zipfile = GetDownloadPath(down_rev) | |
326 down_event = threading.Event() | |
327 fetchargs = (context, down_rev, down_zipfile, down_event) | |
328 down_thread = threading.Thread(target=FetchRevision, | |
329 name='down_fetch', | |
330 args=fetchargs) | |
331 down_thread.start() | |
306 | 332 |
307 # Cut the problem in half... | 333 up_pivot = int((bad - pivot) / 2) + pivot |
308 test = int((bad - good) / 2) + good | 334 up_thread = None |
309 test_rev = revlist[test] | 335 if up_pivot != pivot and up_pivot != bad: |
336 up_rev = revlist[up_pivot] | |
337 up_zipfile = GetDownloadPath(up_rev) | |
338 up_event = threading.Event() | |
339 fetchargs = (context, up_rev, up_zipfile, up_event) | |
340 up_thread = threading.Thread(target=FetchRevision, | |
341 name='up_fetch', | |
342 args=fetchargs) | |
343 up_thread.start() | |
310 | 344 |
311 # Let the user give this rev a spin (in her own profile, if she wants). | 345 # Run test on the pivot revision |
Robert Sesek
2011/07/26 21:00:26
nit: full-stop comments. Here and elsewhere.
szager1
2011/07/29 17:19:21
Done.
| |
312 TryRevision(context, test_rev, profile, try_args) | 346 (status, stdout, stderr) = RunRevision(context, |
313 if predicate(test_rev): | 347 rev, |
314 last_known_good_rev = test_rev | 348 zipfile, |
315 good = test + 1 | 349 profile, |
316 else: | 350 try_args) |
317 bad = test | 351 os.unlink(zipfile) |
352 zipfile = None | |
353 try: | |
Robert Sesek
2011/07/26 21:00:26
This block needs a comment.
szager1
2011/07/29 17:19:21
Done.
| |
354 if predicate(rev, status, stdout, stderr): | |
Robert Sesek
2011/07/26 21:00:26
Could the work in these two branches be refactored
szager1
2011/07/29 17:19:21
Perhaps, but it would have to be one of those func
| |
355 good = pivot | |
356 if down_thread: | |
357 down_event.set() # Kill the download of older revision | |
358 down_thread.join() | |
Robert Sesek
2011/07/26 21:00:26
Would it make sense to reuse the threads so you do
szager1
2011/07/29 17:19:21
I don't think there's any measurable performance g
| |
359 os.unlink(down_zipfile) | |
360 if up_thread: | |
361 print "Downloading revision %d..." % up_rev | |
362 up_thread.join() # Wait for newer revision to finish downloading | |
363 pivot = up_pivot | |
364 zipfile = up_zipfile | |
365 else: | |
366 bad = pivot | |
367 if up_thread: | |
368 up_event.set() # Kill download of newer revision | |
369 up_thread.join() | |
370 os.unlink(up_zipfile) | |
371 if down_thread: | |
372 print "Downloading revision %d..." % down_rev | |
373 down_thread.join() # Wait for older revision to finish downloading | |
374 pivot = down_pivot | |
375 zipfile = down_zipfile | |
376 except SystemExit: | |
377 for f in [down_zipfile, up_zipfile]: | |
378 try: | |
379 os.unlink(f) | |
380 except OSError: | |
381 pass | |
382 sys.exit(0) | |
318 | 383 |
319 return (last_known_good_rev, first_known_bad_rev) | 384 rev = revlist[pivot] |
385 | |
386 return (revlist[good], revlist[bad]) | |
320 | 387 |
321 | 388 |
322 def main(): | 389 def main(): |
323 usage = ('%prog [options] [-- chromium-options]\n' | 390 usage = ('%prog [options] [-- chromium-options]\n' |
324 'Perform binary search on the snapshot builds.\n' | 391 'Perform binary search on the snapshot builds.\n' |
325 '\n' | 392 '\n' |
326 'Tip: add "-- --no-first-run" to bypass the first run prompts.') | 393 'Tip: add "-- --no-first-run" to bypass the first run prompts.') |
327 parser = optparse.OptionParser(usage=usage) | 394 parser = optparse.OptionParser(usage=usage) |
328 # Strangely, the default help output doesn't include the choice list. | 395 # Strangely, the default help output doesn't include the choice list. |
329 choices = ['mac', 'win', 'linux', 'linux64'] | 396 choices = ['mac', 'win', 'linux', 'linux64'] |
(...skipping 17 matching lines...) Expand all Loading... | |
347 parser.print_help() | 414 parser.print_help() |
348 return 1 | 415 return 1 |
349 | 416 |
350 if opts.bad and opts.good and (opts.good > opts.bad): | 417 if opts.bad and opts.good and (opts.good > opts.bad): |
351 print ('The good revision (%d) must precede the bad revision (%d).\n' % | 418 print ('The good revision (%d) must precede the bad revision (%d).\n' % |
352 (opts.good, opts.bad)) | 419 (opts.good, opts.bad)) |
353 parser.print_help() | 420 parser.print_help() |
354 return 1 | 421 return 1 |
355 | 422 |
356 # Create the context. Initialize 0 for the revisions as they are set below. | 423 # Create the context. Initialize 0 for the revisions as they are set below. |
357 context = PathContext(opts.archive, 0, 0, use_recent=False) | 424 context = PathContext(opts.archive, 0, 0) |
358 | 425 |
359 # Pick a starting point, try to get HEAD for this. | 426 # Pick a starting point, try to get HEAD for this. |
360 if opts.bad: | 427 if opts.bad: |
361 bad_rev = opts.bad | 428 bad_rev = opts.bad |
362 else: | 429 else: |
363 bad_rev = 0 | 430 bad_rev = 0 |
364 try: | 431 try: |
365 # Location of the latest build revision number | 432 # Location of the latest build revision number |
366 nh = urllib.urlopen(context.GetLastChangeURL()) | 433 nh = urllib.urlopen(context.GetLastChangeURL()) |
367 latest = int(nh.read()) | 434 latest = int(nh.read()) |
368 nh.close() | 435 nh.close() |
369 bad_rev = raw_input('Bad revision [HEAD:%d]: ' % latest) | 436 bad_rev = raw_input('Bad revision [HEAD:%d]: ' % latest) |
370 if (bad_rev == ''): | 437 if (bad_rev == ''): |
371 bad_rev = latest | 438 bad_rev = latest |
372 bad_rev = int(bad_rev) | 439 bad_rev = int(bad_rev) |
373 except Exception, e: | 440 except Exception, e: |
374 print('Could not determine latest revision. This could be bad...') | 441 print('Could not determine latest revision. This could be bad...') |
375 bad_rev = int(raw_input('Bad revision: ')) | 442 bad_rev = int(raw_input('Bad revision: ')) |
376 | 443 |
377 # Find out when we were good. | 444 # Find out when we were good. |
378 if opts.good: | 445 if opts.good: |
379 good_rev = opts.good | 446 good_rev = opts.good |
380 else: | 447 else: |
381 good_rev = 0 | 448 good_rev = 0 |
382 try: | 449 try: |
383 good_rev = int(raw_input('Last known good [0]: ')) | 450 good_rev = int(raw_input('Last known good [0]: ')) |
384 except Exception, e: | 451 except Exception, e: |
385 pass | 452 pass |
386 | 453 |
387 # Set the input parameters now that they've been validated. | |
388 context.good_revision = good_rev | |
389 context.bad_revision = bad_rev | |
390 | |
391 # Get recent revision list and check whether it's sufficient. | |
392 all_revs_recent = map(int, ParseDirectoryIndexRecent(context)) | |
393 all_revs_recent.sort() | |
394 # Skipping 0 since it might be deleted off the server soon: | |
395 all_revs_recent = all_revs_recent[1:] | |
396 oldest_recent_rev = all_revs_recent[0] | |
397 if good_rev >= oldest_recent_rev: | |
398 # The range is within recent builds, so switch on use_recent. | |
399 context.use_recent = True | |
400 elif bad_rev >= oldest_recent_rev: | |
401 # The range spans both old and recent builds. | |
402 # If oldest_recent_rev is good, we bisect the recent builds. | |
403 context.use_recent = True | |
404 TryRevision(context, oldest_recent_rev, opts.profile, args) | |
405 if AskIsGoodBuild(oldest_recent_rev): | |
406 # context.use_recent is True | |
407 context.good_revision = oldest_recent_rev | |
408 else: | |
409 context.use_recent = False | |
410 context.bad_revision = oldest_recent_rev | |
411 | |
412 all_revs = [] | |
413 if context.use_recent: | |
414 all_revs = all_revs_recent | |
415 else: | |
416 all_revs = map(int, ParseDirectoryIndex(context)) | |
417 | |
418 # Filter list of revisions to bisect across. | |
419 revlist = FilterRevList(context, all_revs) | |
420 if len(revlist) < 2: # Don't have enough builds to bisect | |
421 print 'We don\'t have enough builds to bisect. revlist: %s' % revlist | |
422 sys.exit(1) | |
423 | |
424 (last_known_good_rev, first_known_bad_rev) = Bisect( | 454 (last_known_good_rev, first_known_bad_rev) = Bisect( |
425 revlist, context, args, opts.profile) | 455 opts.archive, good_rev, bad_rev, args, opts.profile) |
426 | 456 |
427 # We're done. Let the user know the results in an official manner. | 457 # We're done. Let the user know the results in an official manner. |
428 print('You are probably looking for build %d.' % first_known_bad_rev) | 458 print('You are probably looking for build %d.' % first_known_bad_rev) |
429 print('CHANGELOG URL:') | 459 print('CHANGELOG URL:') |
430 print(CHANGELOG_URL % (last_known_good_rev, first_known_bad_rev)) | 460 print(CHANGELOG_URL % (last_known_good_rev, first_known_bad_rev)) |
431 print('Built at revision:') | 461 print('Built at revision:') |
432 print(BUILD_VIEWVC_URL % first_known_bad_rev) | 462 print(BUILD_VIEWVC_URL % first_known_bad_rev) |
433 | 463 |
434 if __name__ == '__main__': | 464 if __name__ == '__main__': |
435 sys.exit(main()) | 465 sys.exit(main()) |
OLD | NEW |