Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
| 2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | 2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
| 4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
| 5 | 5 |
| 6 """Snapshot Build Bisect Tool | 6 """Snapshot Build Bisect Tool |
| 7 | 7 |
| 8 This script bisects a snapshot archive using binary search. It starts at | 8 This script bisects a snapshot archive using binary search. It starts at |
| 9 a bad revision (it will try to guess HEAD) and asks for a last known-good | 9 a bad revision (it will try to guess HEAD) and asks for a last known-good |
| 10 revision. It will then binary search across this revision range by downloading, | 10 revision. It will then binary search across this revision range by downloading, |
| (...skipping 14 matching lines...) Expand all Loading... | |
| 25 # Official Changelogs URL. | 25 # Official Changelogs URL. |
| 26 OFFICIAL_CHANGELOG_URL = 'http://omahaproxy.appspot.com/'\ | 26 OFFICIAL_CHANGELOG_URL = 'http://omahaproxy.appspot.com/'\ |
| 27 'changelog?old_version=%s&new_version=%s' | 27 'changelog?old_version=%s&new_version=%s' |
| 28 | 28 |
| 29 # DEPS file URL. | 29 # DEPS file URL. |
| 30 DEPS_FILE= 'http://src.chromium.org/viewvc/chrome/trunk/src/DEPS?revision=%d' | 30 DEPS_FILE= 'http://src.chromium.org/viewvc/chrome/trunk/src/DEPS?revision=%d' |
| 31 # WebKit Changelogs URL. | 31 # WebKit Changelogs URL. |
| 32 WEBKIT_CHANGELOG_URL = 'http://trac.webkit.org/log/' \ | 32 WEBKIT_CHANGELOG_URL = 'http://trac.webkit.org/log/' \ |
| 33 'trunk/?rev=%d&stop_rev=%d&verbose=on&limit=10000' | 33 'trunk/?rev=%d&stop_rev=%d&verbose=on&limit=10000' |
| 34 | 34 |
| 35 DONE_MESSAGE = 'You are probably looking for a change made after ' \ | 35 DONE_MESSAGE_GOOD_MIN = 'You are probably looking for a change made after %s ' \ |
| 36 '%s (known good), but no later than %s (first known bad).' | 36 '(known good), but no later than %s (first known bad).' |
| 37 DONE_MESSAGE_GOOD_MAX = 'You are probably looking for a change made after %s ' \ | |
| 38 '(known bad), but no later than %s (first known good).' | |
| 37 | 39 |
| 38 ############################################################################### | 40 ############################################################################### |
| 39 | 41 |
| 40 import math | 42 import math |
| 41 import optparse | 43 import optparse |
| 42 import os | 44 import os |
| 43 import pipes | 45 import pipes |
| 44 import re | 46 import re |
| 45 import shutil | 47 import shutil |
| 46 import subprocess | 48 import subprocess |
| (...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 133 """Returns a URL to the LAST_CHANGE file.""" | 135 """Returns a URL to the LAST_CHANGE file.""" |
| 134 return BASE_URL + '/' + self._listing_platform_dir + 'LAST_CHANGE' | 136 return BASE_URL + '/' + self._listing_platform_dir + 'LAST_CHANGE' |
| 135 | 137 |
| 136 def GetLaunchPath(self): | 138 def GetLaunchPath(self): |
| 137 """Returns a relative path (presumably from the archive extraction location) | 139 """Returns a relative path (presumably from the archive extraction location) |
| 138 that is used to run the executable.""" | 140 that is used to run the executable.""" |
| 139 return os.path.join(self._archive_extract_dir, self._binary_name) | 141 return os.path.join(self._archive_extract_dir, self._binary_name) |
| 140 | 142 |
| 141 def ParseDirectoryIndex(self): | 143 def ParseDirectoryIndex(self): |
| 142 """Parses the Google Storage directory listing into a list of revision | 144 """Parses the Google Storage directory listing into a list of revision |
| 143 numbers. The range starts with self.good_revision and goes until | 145 numbers. The range starts with self.good_revision and goes until |
|
Nico
2012/11/01 19:06:28
It looks like the 2nd sentence in the comment is n
enne (OOO)
2012/11/02 22:26:57
Done.
| |
| 144 self.bad_revision.""" | 146 self.bad_revision.""" |
| 145 | 147 |
| 146 def _FetchAndParse(url): | 148 def _FetchAndParse(url): |
| 147 """Fetches a URL and returns a 2-Tuple of ([revisions], next-marker). If | 149 """Fetches a URL and returns a 2-Tuple of ([revisions], next-marker). If |
| 148 next-marker is not None, then the listing is a partial listing and another | 150 next-marker is not None, then the listing is a partial listing and another |
| 149 fetch should be performed with next-marker being the marker= GET | 151 fetch should be performed with next-marker being the marker= GET |
| 150 parameter.""" | 152 parameter.""" |
| 151 handle = urllib.urlopen(url) | 153 handle = urllib.urlopen(url) |
| 152 document = ElementTree.parse(handle) | 154 document = ElementTree.parse(handle) |
| 153 | 155 |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 191 while next_marker: | 193 while next_marker: |
| 192 next_url = self.GetListingURL(next_marker) | 194 next_url = self.GetListingURL(next_marker) |
| 193 (new_revisions, next_marker) = _FetchAndParse(next_url) | 195 (new_revisions, next_marker) = _FetchAndParse(next_url) |
| 194 revisions.extend(new_revisions) | 196 revisions.extend(new_revisions) |
| 195 return revisions | 197 return revisions |
| 196 | 198 |
| 197 def GetRevList(self): | 199 def GetRevList(self): |
| 198 """Gets the list of revision numbers between self.good_revision and | 200 """Gets the list of revision numbers between self.good_revision and |
| 199 self.bad_revision.""" | 201 self.bad_revision.""" |
| 200 # Download the revlist and filter for just the range between good and bad. | 202 # Download the revlist and filter for just the range between good and bad. |
| 201 minrev = self.good_revision | 203 minrev = min(self.good_revision, self.bad_revision) |
| 202 maxrev = self.bad_revision | 204 maxrev = max(self.good_revision, self.bad_revision) |
| 203 revlist = map(int, self.ParseDirectoryIndex()) | 205 revlist = map(int, self.ParseDirectoryIndex()) |
| 204 revlist = [x for x in revlist if x >= int(minrev) and x <= int(maxrev)] | 206 revlist = [x for x in revlist if x >= int(minrev) and x <= int(maxrev)] |
| 205 revlist.sort() | 207 revlist.sort() |
| 206 return revlist | 208 return revlist |
| 207 | 209 |
| 208 def GetOfficialBuildsList(self): | 210 def GetOfficialBuildsList(self): |
| 209 """Gets the list of official build numbers between self.good_revision and | 211 """Gets the list of official build numbers between self.good_revision and |
| 210 self.bad_revision.""" | 212 self.bad_revision.""" |
| 211 # Download the revlist and filter for just the range between good and bad. | 213 # Download the revlist and filter for just the range between good and bad. |
| 212 minrev = self.good_revision | 214 minrev = min(self.good_revision, self.bad_revision) |
| 213 maxrev = self.bad_revision | 215 maxrev = max(self.good_revision, self.bad_revision) |
| 214 handle = urllib.urlopen(OFFICIAL_BASE_URL) | 216 handle = urllib.urlopen(OFFICIAL_BASE_URL) |
| 215 dirindex = handle.read() | 217 dirindex = handle.read() |
| 216 handle.close() | 218 handle.close() |
| 217 build_numbers = re.findall(r'<a href="([0-9][0-9].*)/">', dirindex) | 219 build_numbers = re.findall(r'<a href="([0-9][0-9].*)/">', dirindex) |
| 218 final_list = [] | 220 final_list = [] |
| 219 i = 0 | 221 i = 0 |
| 220 parsed_build_numbers = [LooseVersion(x) for x in build_numbers] | 222 parsed_build_numbers = [LooseVersion(x) for x in build_numbers] |
| 221 for build_number in sorted(parsed_build_numbers): | 223 for build_number in sorted(parsed_build_numbers): |
| 222 path = OFFICIAL_BASE_URL + '/' + str(build_number) + '/' + \ | 224 path = OFFICIAL_BASE_URL + '/' + str(build_number) + '/' + \ |
| 223 self._listing_platform_dir + self.archive_name | 225 self._listing_platform_dir + self.archive_name |
| (...skipping 161 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 385 bad_rev=0, | 387 bad_rev=0, |
| 386 num_runs=1, | 388 num_runs=1, |
| 387 try_args=(), | 389 try_args=(), |
| 388 profile=None, | 390 profile=None, |
| 389 evaluate=AskIsGoodBuild): | 391 evaluate=AskIsGoodBuild): |
| 390 """Given known good and known bad revisions, run a binary search on all | 392 """Given known good and known bad revisions, run a binary search on all |
| 391 archived revisions to determine the last known good revision. | 393 archived revisions to determine the last known good revision. |
| 392 | 394 |
| 393 @param platform Which build to download/run ('mac', 'win', 'linux64', etc.). | 395 @param platform Which build to download/run ('mac', 'win', 'linux64', etc.). |
| 394 @param official_builds Specify build type (Chromium or Official build). | 396 @param official_builds Specify build type (Chromium or Official build). |
| 395 @param good_rev Number/tag of the last known good revision. | 397 @param good_rev Number/tag of the known good revision. |
| 396 @param bad_rev Number/tag of the first known bad revision. | 398 @param bad_rev Number/tag of the known bad revision. |
| 397 @param num_runs Number of times to run each build for asking good/bad. | 399 @param num_runs Number of times to run each build for asking good/bad. |
| 398 @param try_args A tuple of arguments to pass to the test application. | 400 @param try_args A tuple of arguments to pass to the test application. |
| 399 @param profile The name of the user profile to run with. | 401 @param profile The name of the user profile to run with. |
| 400 @param evaluate A function which returns 'g' if the argument build is good, | 402 @param evaluate A function which returns 'g' if the argument build is good, |
| 401 'b' if it's bad or 'u' if unknown. | 403 'b' if it's bad or 'u' if unknown. |
| 402 | 404 |
| 403 Threading is used to fetch Chromium revisions in the background, speeding up | 405 Threading is used to fetch Chromium revisions in the background, speeding up |
| 404 the user's experience. For example, suppose the bounds of the search are | 406 the user's experience. For example, suppose the bounds of the search are |
| 405 good_rev=0, bad_rev=100. The first revision to be checked is 50. Depending on | 407 good_rev=0, bad_rev=100. The first revision to be checked is 50. Depending on |
| 406 whether revision 50 is good or bad, the next revision to check will be either | 408 whether revision 50 is good or bad, the next revision to check will be either |
| (...skipping 22 matching lines...) Expand all Loading... | |
| 429 if official_builds: | 431 if official_builds: |
| 430 revlist = context.GetOfficialBuildsList() | 432 revlist = context.GetOfficialBuildsList() |
| 431 else: | 433 else: |
| 432 revlist = context.GetRevList() | 434 revlist = context.GetRevList() |
| 433 | 435 |
| 434 # Get a list of revisions to bisect across. | 436 # Get a list of revisions to bisect across. |
| 435 if len(revlist) < 2: # Don't have enough builds to bisect. | 437 if len(revlist) < 2: # Don't have enough builds to bisect. |
| 436 msg = 'We don\'t have enough builds to bisect. revlist: %s' % revlist | 438 msg = 'We don\'t have enough builds to bisect. revlist: %s' % revlist |
| 437 raise RuntimeError(msg) | 439 raise RuntimeError(msg) |
| 438 | 440 |
| 439 print 'Bisecting range [%s, %s].' % (revlist[0], revlist[-1]) | |
| 440 | |
| 441 # Figure out our bookends and first pivot point; fetch the pivot revision. | 441 # Figure out our bookends and first pivot point; fetch the pivot revision. |
| 442 good = 0 | 442 minrev = 0 |
| 443 bad = len(revlist) - 1 | 443 maxrev = len(revlist) - 1 |
| 444 pivot = bad / 2 | 444 pivot = maxrev / 2 |
| 445 rev = revlist[pivot] | 445 rev = revlist[pivot] |
| 446 zipfile = _GetDownloadPath(rev) | 446 zipfile = _GetDownloadPath(rev) |
| 447 initial_fetch = DownloadJob(context, 'initial_fetch', rev, zipfile) | 447 initial_fetch = DownloadJob(context, 'initial_fetch', rev, zipfile) |
| 448 initial_fetch.Start() | 448 initial_fetch.Start() |
| 449 initial_fetch.WaitFor() | 449 initial_fetch.WaitFor() |
| 450 | 450 |
| 451 # Binary search time! | 451 # Binary search time! |
| 452 while zipfile and bad - good > 1: | 452 while zipfile and maxrev - minrev > 1: |
| 453 min_str, max_str = ["good", "bad"][::1 - 2 * (bad_rev < good_rev)] | |
|
Nico
2012/11/01 19:06:28
What is this, perl? Just use an if and 4 lines.
enne (OOO)
2012/11/01 19:20:32
Haha. Will do.
| |
| 454 print 'Bisecting range [%s (%s), %s (%s)].' % (revlist[minrev], min_str, \ | |
| 455 revlist[maxrev], max_str) | |
| 456 | |
| 453 # Pre-fetch next two possible pivots | 457 # Pre-fetch next two possible pivots |
| 454 # - down_pivot is the next revision to check if the current revision turns | 458 # - down_pivot is the next revision to check if the current revision turns |
| 455 # out to be bad. | 459 # out to be bad. |
| 456 # - up_pivot is the next revision to check if the current revision turns | 460 # - up_pivot is the next revision to check if the current revision turns |
| 457 # out to be good. | 461 # out to be good. |
| 458 down_pivot = int((pivot - good) / 2) + good | 462 down_pivot = int((pivot - minrev) / 2) + minrev |
| 459 down_fetch = None | 463 down_fetch = None |
| 460 if down_pivot != pivot and down_pivot != good: | 464 if down_pivot != pivot and down_pivot != minrev: |
| 461 down_rev = revlist[down_pivot] | 465 down_rev = revlist[down_pivot] |
| 462 down_fetch = DownloadJob(context, 'down_fetch', down_rev, | 466 down_fetch = DownloadJob(context, 'down_fetch', down_rev, |
| 463 _GetDownloadPath(down_rev)) | 467 _GetDownloadPath(down_rev)) |
| 464 down_fetch.Start() | 468 down_fetch.Start() |
| 465 | 469 |
| 466 up_pivot = int((bad - pivot) / 2) + pivot | 470 up_pivot = int((maxrev - pivot) / 2) + pivot |
| 467 up_fetch = None | 471 up_fetch = None |
| 468 if up_pivot != pivot and up_pivot != bad: | 472 if up_pivot != pivot and up_pivot != maxrev: |
| 469 up_rev = revlist[up_pivot] | 473 up_rev = revlist[up_pivot] |
| 470 up_fetch = DownloadJob(context, 'up_fetch', up_rev, | 474 up_fetch = DownloadJob(context, 'up_fetch', up_rev, |
| 471 _GetDownloadPath(up_rev)) | 475 _GetDownloadPath(up_rev)) |
| 472 up_fetch.Start() | 476 up_fetch.Start() |
| 473 | 477 |
| 474 # Run test on the pivot revision. | 478 # Run test on the pivot revision. |
| 475 status = None | 479 status = None |
| 476 stdout = None | 480 stdout = None |
| 477 stderr = None | 481 stderr = None |
| 478 try: | 482 try: |
| 479 (status, stdout, stderr) = RunRevision(context, | 483 (status, stdout, stderr) = RunRevision(context, |
| 480 rev, | 484 rev, |
| 481 zipfile, | 485 zipfile, |
| 482 profile, | 486 profile, |
| 483 num_runs, | 487 num_runs, |
| 484 try_args) | 488 try_args) |
| 485 except Exception, e: | 489 except Exception, e: |
| 486 print >>sys.stderr, e | 490 print >>sys.stderr, e |
| 487 os.unlink(zipfile) | 491 os.unlink(zipfile) |
| 488 zipfile = None | 492 zipfile = None |
| 489 | 493 |
| 490 # Call the evaluate function to see if the current revision is good or bad. | 494 # Call the evaluate function to see if the current revision is good or bad. |
| 491 # On that basis, kill one of the background downloads and complete the | 495 # On that basis, kill one of the background downloads and complete the |
| 492 # other, as described in the comments above. | 496 # other, as described in the comments above. |
| 493 try: | 497 try: |
| 494 answer = evaluate(rev, official_builds, status, stdout, stderr) | 498 answer = evaluate(rev, official_builds, status, stdout, stderr) |
| 495 if answer == 'g': | 499 if answer == 'g' and good_rev < bad_rev or \ |
| 496 good = pivot | 500 answer == 'b' and bad_rev < good_rev: |
| 501 minrev = pivot | |
| 497 if down_fetch: | 502 if down_fetch: |
| 498 down_fetch.Stop() # Kill the download of the older revision. | 503 down_fetch.Stop() # Kill the download of the older revision. |
| 499 if up_fetch: | 504 if up_fetch: |
| 500 up_fetch.WaitFor() | 505 up_fetch.WaitFor() |
| 501 pivot = up_pivot | 506 pivot = up_pivot |
| 502 zipfile = up_fetch.zipfile | 507 zipfile = up_fetch.zipfile |
| 503 elif answer == 'b': | 508 elif answer == 'b' and good_rev < bad_rev or \ |
| 504 bad = pivot | 509 answer == 'g' and bad_rev < good_rev: |
| 510 maxrev = pivot | |
| 505 if up_fetch: | 511 if up_fetch: |
| 506 up_fetch.Stop() # Kill the download of the newer revision. | 512 up_fetch.Stop() # Kill the download of the newer revision. |
| 507 if down_fetch: | 513 if down_fetch: |
| 508 down_fetch.WaitFor() | 514 down_fetch.WaitFor() |
| 509 pivot = down_pivot | 515 pivot = down_pivot |
| 510 zipfile = down_fetch.zipfile | 516 zipfile = down_fetch.zipfile |
| 511 elif answer == 'u': | 517 elif answer == 'u': |
| 512 # Nuke the revision from the revlist and choose a new pivot. | 518 # Nuke the revision from the revlist and choose a new pivot. |
| 513 revlist.pop(pivot) | 519 revlist.pop(pivot) |
| 514 bad -= 1 # Assumes bad >= pivot. | 520 maxrev -= 1 # Assumes maxrev >= pivot. |
| 515 | 521 |
| 516 fetch = None | 522 fetch = None |
| 517 if bad - good > 1: | 523 if maxrev - minrev > 1: |
| 518 # Alternate between using down_pivot or up_pivot for the new pivot | 524 # Alternate between using down_pivot or up_pivot for the new pivot |
| 519 # point, without affecting the range. Do this instead of setting the | 525 # point, without affecting the range. Do this instead of setting the |
| 520 # pivot to the midpoint of the new range because adjacent revisions | 526 # pivot to the midpoint of the new range because adjacent revisions |
| 521 # are likely affected by the same issue that caused the (u)nknown | 527 # are likely affected by the same issue that caused the (u)nknown |
| 522 # response. | 528 # response. |
| 523 if up_fetch and down_fetch: | 529 if up_fetch and down_fetch: |
| 524 fetch = [up_fetch, down_fetch][len(revlist) % 2] | 530 fetch = [up_fetch, down_fetch][len(revlist) % 2] |
| 525 elif up_fetch: | 531 elif up_fetch: |
| 526 fetch = up_fetch | 532 fetch = up_fetch |
| 527 else: | 533 else: |
| (...skipping 16 matching lines...) Expand all Loading... | |
| 544 for f in [_GetDownloadPath(revlist[down_pivot]), | 550 for f in [_GetDownloadPath(revlist[down_pivot]), |
| 545 _GetDownloadPath(revlist[up_pivot])]: | 551 _GetDownloadPath(revlist[up_pivot])]: |
| 546 try: | 552 try: |
| 547 os.unlink(f) | 553 os.unlink(f) |
| 548 except OSError: | 554 except OSError: |
| 549 pass | 555 pass |
| 550 sys.exit(0) | 556 sys.exit(0) |
| 551 | 557 |
| 552 rev = revlist[pivot] | 558 rev = revlist[pivot] |
| 553 | 559 |
| 554 return (revlist[good], revlist[bad]) | 560 return (revlist[minrev], revlist[maxrev]) |
| 555 | 561 |
| 556 | 562 |
| 557 def GetWebKitRevisionForChromiumRevision(rev): | 563 def GetWebKitRevisionForChromiumRevision(rev): |
| 558 """Returns the webkit revision that was in chromium's DEPS file at | 564 """Returns the webkit revision that was in chromium's DEPS file at |
| 559 chromium revision |rev|.""" | 565 chromium revision |rev|.""" |
| 560 # . doesn't match newlines without re.DOTALL, so this is safe. | 566 # . doesn't match newlines without re.DOTALL, so this is safe. |
| 561 webkit_re = re.compile(r'webkit_revision.:\D*(\d+)') | 567 webkit_re = re.compile(r'webkit_revision.:\D*(\d+)') |
| 562 url = urllib.urlopen(DEPS_FILE % rev) | 568 url = urllib.urlopen(DEPS_FILE % rev) |
| 563 m = webkit_re.search(url.read()) | 569 m = webkit_re.search(url.read()) |
| 564 url.close() | 570 url.close() |
| (...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 631 else: | 637 else: |
| 632 good_rev = '0.0.0.0' if opts.official_builds else 0 | 638 good_rev = '0.0.0.0' if opts.official_builds else 0 |
| 633 | 639 |
| 634 if opts.official_builds: | 640 if opts.official_builds: |
| 635 good_rev = LooseVersion(good_rev) | 641 good_rev = LooseVersion(good_rev) |
| 636 bad_rev = LooseVersion(bad_rev) | 642 bad_rev = LooseVersion(bad_rev) |
| 637 else: | 643 else: |
| 638 good_rev = int(good_rev) | 644 good_rev = int(good_rev) |
| 639 bad_rev = int(bad_rev) | 645 bad_rev = int(bad_rev) |
| 640 | 646 |
| 641 if good_rev > bad_rev: | |
| 642 print ('The good revision (%s) must precede the bad revision (%s).\n' % | |
| 643 (good_rev, bad_rev)) | |
| 644 parser.print_help() | |
| 645 return 1 | |
| 646 | |
| 647 if opts.times < 1: | 647 if opts.times < 1: |
| 648 print('Number of times to run (%d) must be greater than or equal to 1.' % | 648 print('Number of times to run (%d) must be greater than or equal to 1.' % |
| 649 opts.times) | 649 opts.times) |
| 650 parser.print_help() | 650 parser.print_help() |
| 651 return 1 | 651 return 1 |
| 652 | 652 |
| 653 (last_known_good_rev, first_known_bad_rev) = Bisect( | 653 (min_chromium_rev, max_chromium_rev) = Bisect( |
| 654 opts.archive, opts.official_builds, good_rev, bad_rev, opts.times, args, | 654 opts.archive, opts.official_builds, good_rev, bad_rev, opts.times, args, |
| 655 opts.profile) | 655 opts.profile) |
| 656 | 656 |
| 657 # Get corresponding webkit revisions. | 657 # Get corresponding webkit revisions. |
| 658 try: | 658 try: |
| 659 last_known_good_webkit_rev = GetWebKitRevisionForChromiumRevision( | 659 min_webkit_rev = GetWebKitRevisionForChromiumRevision(min_chromium_rev) |
| 660 last_known_good_rev) | 660 max_webkit_rev = GetWebKitRevisionForChromiumRevision(max_chromium_rev) |
| 661 first_known_bad_webkit_rev = GetWebKitRevisionForChromiumRevision( | |
| 662 first_known_bad_rev) | |
| 663 except Exception, e: | 661 except Exception, e: |
| 664 # Silently ignore the failure. | 662 # Silently ignore the failure. |
| 665 last_known_good_webkit_rev, first_known_bad_webkit_rev = 0, 0 | 663 min_webkit_rev, max_webkit_rev = 0, 0 |
| 666 | 664 |
| 667 # We're done. Let the user know the results in an official manner. | 665 # We're done. Let the user know the results in an official manner. |
| 668 print DONE_MESSAGE % (str(last_known_good_rev), str(first_known_bad_rev)) | 666 if good_rev > bad_rev: |
| 669 if last_known_good_webkit_rev != first_known_bad_webkit_rev: | 667 print DONE_MESSAGE_GOOD_MAX % (str(min_chromium_rev), str(max_chromium_rev)) |
| 668 else: | |
| 669 print DONE_MESSAGE_GOOD_MIN % (str(min_chromium_rev), str(max_chromium_rev)) | |
| 670 | |
| 671 if min_webkit_rev != max_webkit_rev: | |
| 670 print 'WEBKIT CHANGELOG URL:' | 672 print 'WEBKIT CHANGELOG URL:' |
| 671 print ' ' + WEBKIT_CHANGELOG_URL % (first_known_bad_webkit_rev, | 673 print ' ' + WEBKIT_CHANGELOG_URL % (max_webkit_rev, min_webkit_rev) |
|
Nico
2012/11/01 19:06:28
Should this be printed backwards too?
enne (OOO)
2012/11/02 22:26:57
This is correct.
| |
| 672 last_known_good_webkit_rev) | |
| 673 print 'CHANGELOG URL:' | 674 print 'CHANGELOG URL:' |
| 674 if opts.official_builds: | 675 if opts.official_builds: |
| 675 print OFFICIAL_CHANGELOG_URL % (last_known_good_rev, first_known_bad_rev) | 676 print OFFICIAL_CHANGELOG_URL % (min_chromium_rev, max_chromium_rev) |
| 676 else: | 677 else: |
| 677 print ' ' + CHANGELOG_URL % (last_known_good_rev, first_known_bad_rev) | 678 print ' ' + CHANGELOG_URL % (min_chromium_rev, max_chromium_rev) |
| 678 | 679 |
| 679 if __name__ == '__main__': | 680 if __name__ == '__main__': |
| 680 sys.exit(main()) | 681 sys.exit(main()) |
| OLD | NEW |