Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(5)

Side by Side Diff: tools/bisect-builds.py

Issue 11266025: Make tools/bisect_builds.py handle reversed bad/good revision ranges (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Now with inf% more testing Created 8 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | tools/bisect_test.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """Snapshot Build Bisect Tool 6 """Snapshot Build Bisect Tool
7 7
8 This script bisects a snapshot archive using binary search. It starts at 8 This script bisects a snapshot archive using binary search. It starts at
9 a bad revision (it will try to guess HEAD) and asks for a last known-good 9 a bad revision (it will try to guess HEAD) and asks for a last known-good
10 revision. It will then binary search across this revision range by downloading, 10 revision. It will then binary search across this revision range by downloading,
(...skipping 14 matching lines...) Expand all
25 # Official Changelogs URL. 25 # Official Changelogs URL.
26 OFFICIAL_CHANGELOG_URL = 'http://omahaproxy.appspot.com/'\ 26 OFFICIAL_CHANGELOG_URL = 'http://omahaproxy.appspot.com/'\
27 'changelog?old_version=%s&new_version=%s' 27 'changelog?old_version=%s&new_version=%s'
28 28
29 # DEPS file URL. 29 # DEPS file URL.
30 DEPS_FILE= 'http://src.chromium.org/viewvc/chrome/trunk/src/DEPS?revision=%d' 30 DEPS_FILE= 'http://src.chromium.org/viewvc/chrome/trunk/src/DEPS?revision=%d'
31 # WebKit Changelogs URL. 31 # WebKit Changelogs URL.
32 WEBKIT_CHANGELOG_URL = 'http://trac.webkit.org/log/' \ 32 WEBKIT_CHANGELOG_URL = 'http://trac.webkit.org/log/' \
33 'trunk/?rev=%d&stop_rev=%d&verbose=on&limit=10000' 33 'trunk/?rev=%d&stop_rev=%d&verbose=on&limit=10000'
34 34
35 DONE_MESSAGE = 'You are probably looking for a change made after ' \ 35 DONE_MESSAGE_GOOD_MIN = 'You are probably looking for a change made after %s ' \
36 '%s (known good), but no later than %s (first known bad).' 36 '(known good), but no later than %s (first known bad).'
37 DONE_MESSAGE_GOOD_MAX = 'You are probably looking for a change made after %s ' \
38 '(known bad), but no later than %s (first known good).'
37 39
38 ############################################################################### 40 ###############################################################################
39 41
40 import math 42 import math
41 import optparse 43 import optparse
42 import os 44 import os
43 import pipes 45 import pipes
44 import re 46 import re
45 import shutil 47 import shutil
46 import subprocess 48 import subprocess
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after
133 """Returns a URL to the LAST_CHANGE file.""" 135 """Returns a URL to the LAST_CHANGE file."""
134 return BASE_URL + '/' + self._listing_platform_dir + 'LAST_CHANGE' 136 return BASE_URL + '/' + self._listing_platform_dir + 'LAST_CHANGE'
135 137
136 def GetLaunchPath(self): 138 def GetLaunchPath(self):
137 """Returns a relative path (presumably from the archive extraction location) 139 """Returns a relative path (presumably from the archive extraction location)
138 that is used to run the executable.""" 140 that is used to run the executable."""
139 return os.path.join(self._archive_extract_dir, self._binary_name) 141 return os.path.join(self._archive_extract_dir, self._binary_name)
140 142
141 def ParseDirectoryIndex(self): 143 def ParseDirectoryIndex(self):
142 """Parses the Google Storage directory listing into a list of revision 144 """Parses the Google Storage directory listing into a list of revision
143 numbers. The range starts with self.good_revision and goes until 145 numbers."""
144 self.bad_revision."""
145 146
146 def _FetchAndParse(url): 147 def _FetchAndParse(url):
147 """Fetches a URL and returns a 2-Tuple of ([revisions], next-marker). If 148 """Fetches a URL and returns a 2-Tuple of ([revisions], next-marker). If
148 next-marker is not None, then the listing is a partial listing and another 149 next-marker is not None, then the listing is a partial listing and another
149 fetch should be performed with next-marker being the marker= GET 150 fetch should be performed with next-marker being the marker= GET
150 parameter.""" 151 parameter."""
151 handle = urllib.urlopen(url) 152 handle = urllib.urlopen(url)
152 document = ElementTree.parse(handle) 153 document = ElementTree.parse(handle)
153 154
154 # All nodes in the tree are namespaced. Get the root's tag name to extract 155 # All nodes in the tree are namespaced. Get the root's tag name to extract
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
191 while next_marker: 192 while next_marker:
192 next_url = self.GetListingURL(next_marker) 193 next_url = self.GetListingURL(next_marker)
193 (new_revisions, next_marker) = _FetchAndParse(next_url) 194 (new_revisions, next_marker) = _FetchAndParse(next_url)
194 revisions.extend(new_revisions) 195 revisions.extend(new_revisions)
195 return revisions 196 return revisions
196 197
197 def GetRevList(self): 198 def GetRevList(self):
198 """Gets the list of revision numbers between self.good_revision and 199 """Gets the list of revision numbers between self.good_revision and
199 self.bad_revision.""" 200 self.bad_revision."""
200 # Download the revlist and filter for just the range between good and bad. 201 # Download the revlist and filter for just the range between good and bad.
201 minrev = self.good_revision 202 minrev = min(self.good_revision, self.bad_revision)
202 maxrev = self.bad_revision 203 maxrev = max(self.good_revision, self.bad_revision)
203 revlist = map(int, self.ParseDirectoryIndex()) 204 revlist = map(int, self.ParseDirectoryIndex())
204 revlist = [x for x in revlist if x >= int(minrev) and x <= int(maxrev)] 205 revlist = [x for x in revlist if x >= int(minrev) and x <= int(maxrev)]
205 revlist.sort() 206 revlist.sort()
206 return revlist 207 return revlist
207 208
208 def GetOfficialBuildsList(self): 209 def GetOfficialBuildsList(self):
209 """Gets the list of official build numbers between self.good_revision and 210 """Gets the list of official build numbers between self.good_revision and
210 self.bad_revision.""" 211 self.bad_revision."""
211 # Download the revlist and filter for just the range between good and bad. 212 # Download the revlist and filter for just the range between good and bad.
212 minrev = self.good_revision 213 minrev = min(self.good_revision, self.bad_revision)
213 maxrev = self.bad_revision 214 maxrev = max(self.good_revision, self.bad_revision)
214 handle = urllib.urlopen(OFFICIAL_BASE_URL) 215 handle = urllib.urlopen(OFFICIAL_BASE_URL)
215 dirindex = handle.read() 216 dirindex = handle.read()
216 handle.close() 217 handle.close()
217 build_numbers = re.findall(r'<a href="([0-9][0-9].*)/">', dirindex) 218 build_numbers = re.findall(r'<a href="([0-9][0-9].*)/">', dirindex)
218 final_list = [] 219 final_list = []
219 i = 0 220 i = 0
220 parsed_build_numbers = [LooseVersion(x) for x in build_numbers] 221 parsed_build_numbers = [LooseVersion(x) for x in build_numbers]
221 for build_number in sorted(parsed_build_numbers): 222 for build_number in sorted(parsed_build_numbers):
222 path = OFFICIAL_BASE_URL + '/' + str(build_number) + '/' + \ 223 path = OFFICIAL_BASE_URL + '/' + str(build_number) + '/' + \
223 self._listing_platform_dir + self.archive_name 224 self._listing_platform_dir + self.archive_name
(...skipping 161 matching lines...) Expand 10 before | Expand all | Expand 10 after
385 bad_rev=0, 386 bad_rev=0,
386 num_runs=1, 387 num_runs=1,
387 try_args=(), 388 try_args=(),
388 profile=None, 389 profile=None,
389 evaluate=AskIsGoodBuild): 390 evaluate=AskIsGoodBuild):
390 """Given known good and known bad revisions, run a binary search on all 391 """Given known good and known bad revisions, run a binary search on all
391 archived revisions to determine the last known good revision. 392 archived revisions to determine the last known good revision.
392 393
393 @param platform Which build to download/run ('mac', 'win', 'linux64', etc.). 394 @param platform Which build to download/run ('mac', 'win', 'linux64', etc.).
394 @param official_builds Specify build type (Chromium or Official build). 395 @param official_builds Specify build type (Chromium or Official build).
395 @param good_rev Number/tag of the last known good revision. 396 @param good_rev Number/tag of the known good revision.
396 @param bad_rev Number/tag of the first known bad revision. 397 @param bad_rev Number/tag of the known bad revision.
397 @param num_runs Number of times to run each build for asking good/bad. 398 @param num_runs Number of times to run each build for asking good/bad.
398 @param try_args A tuple of arguments to pass to the test application. 399 @param try_args A tuple of arguments to pass to the test application.
399 @param profile The name of the user profile to run with. 400 @param profile The name of the user profile to run with.
400 @param evaluate A function which returns 'g' if the argument build is good, 401 @param evaluate A function which returns 'g' if the argument build is good,
401 'b' if it's bad or 'u' if unknown. 402 'b' if it's bad or 'u' if unknown.
402 403
403 Threading is used to fetch Chromium revisions in the background, speeding up 404 Threading is used to fetch Chromium revisions in the background, speeding up
404 the user's experience. For example, suppose the bounds of the search are 405 the user's experience. For example, suppose the bounds of the search are
405 good_rev=0, bad_rev=100. The first revision to be checked is 50. Depending on 406 good_rev=0, bad_rev=100. The first revision to be checked is 50. Depending on
406 whether revision 50 is good or bad, the next revision to check will be either 407 whether revision 50 is good or bad, the next revision to check will be either
(...skipping 22 matching lines...) Expand all
429 if official_builds: 430 if official_builds:
430 revlist = context.GetOfficialBuildsList() 431 revlist = context.GetOfficialBuildsList()
431 else: 432 else:
432 revlist = context.GetRevList() 433 revlist = context.GetRevList()
433 434
434 # Get a list of revisions to bisect across. 435 # Get a list of revisions to bisect across.
435 if len(revlist) < 2: # Don't have enough builds to bisect. 436 if len(revlist) < 2: # Don't have enough builds to bisect.
436 msg = 'We don\'t have enough builds to bisect. revlist: %s' % revlist 437 msg = 'We don\'t have enough builds to bisect. revlist: %s' % revlist
437 raise RuntimeError(msg) 438 raise RuntimeError(msg)
438 439
439 print 'Bisecting range [%s, %s].' % (revlist[0], revlist[-1])
440
441 # Figure out our bookends and first pivot point; fetch the pivot revision. 440 # Figure out our bookends and first pivot point; fetch the pivot revision.
442 good = 0 441 minrev = 0
443 bad = len(revlist) - 1 442 maxrev = len(revlist) - 1
444 pivot = bad / 2 443 pivot = maxrev / 2
445 rev = revlist[pivot] 444 rev = revlist[pivot]
446 zipfile = _GetDownloadPath(rev) 445 zipfile = _GetDownloadPath(rev)
447 initial_fetch = DownloadJob(context, 'initial_fetch', rev, zipfile) 446 fetch = DownloadJob(context, 'initial_fetch', rev, zipfile)
448 initial_fetch.Start() 447 fetch.Start()
449 initial_fetch.WaitFor() 448 fetch.WaitFor()
450 449
451 # Binary search time! 450 # Binary search time!
452 while zipfile and bad - good > 1: 451 while fetch and fetch.zipfile and maxrev - minrev > 1:
452 if bad_rev < good_rev:
453 min_str, max_str = ("bad", "good")
Nico 2012/11/02 22:32:11 nit: parens on rhs not needed (but don't hurt eith
454 else:
455 min_str, max_str = ("good", "bad")
456 print 'Bisecting range [%s (%s), %s (%s)].' % (revlist[minrev], min_str, \
457 revlist[maxrev], max_str)
458
453 # Pre-fetch next two possible pivots 459 # Pre-fetch next two possible pivots
454 # - down_pivot is the next revision to check if the current revision turns 460 # - down_pivot is the next revision to check if the current revision turns
455 # out to be bad. 461 # out to be bad.
456 # - up_pivot is the next revision to check if the current revision turns 462 # - up_pivot is the next revision to check if the current revision turns
457 # out to be good. 463 # out to be good.
458 down_pivot = int((pivot - good) / 2) + good 464 down_pivot = int((pivot - minrev) / 2) + minrev
459 down_fetch = None 465 down_fetch = None
460 if down_pivot != pivot and down_pivot != good: 466 if down_pivot != pivot and down_pivot != minrev:
461 down_rev = revlist[down_pivot] 467 down_rev = revlist[down_pivot]
462 down_fetch = DownloadJob(context, 'down_fetch', down_rev, 468 down_fetch = DownloadJob(context, 'down_fetch', down_rev,
463 _GetDownloadPath(down_rev)) 469 _GetDownloadPath(down_rev))
464 down_fetch.Start() 470 down_fetch.Start()
465 471
466 up_pivot = int((bad - pivot) / 2) + pivot 472 up_pivot = int((maxrev - pivot) / 2) + pivot
467 up_fetch = None 473 up_fetch = None
468 if up_pivot != pivot and up_pivot != bad: 474 if up_pivot != pivot and up_pivot != maxrev:
469 up_rev = revlist[up_pivot] 475 up_rev = revlist[up_pivot]
470 up_fetch = DownloadJob(context, 'up_fetch', up_rev, 476 up_fetch = DownloadJob(context, 'up_fetch', up_rev,
471 _GetDownloadPath(up_rev)) 477 _GetDownloadPath(up_rev))
472 up_fetch.Start() 478 up_fetch.Start()
473 479
474 # Run test on the pivot revision. 480 # Run test on the pivot revision.
475 status = None 481 status = None
476 stdout = None 482 stdout = None
477 stderr = None 483 stderr = None
478 try: 484 try:
479 (status, stdout, stderr) = RunRevision(context, 485 (status, stdout, stderr) = RunRevision(context,
480 rev, 486 rev,
481 zipfile, 487 fetch.zipfile,
482 profile, 488 profile,
483 num_runs, 489 num_runs,
484 try_args) 490 try_args)
485 except Exception, e: 491 except Exception, e:
486 print >>sys.stderr, e 492 print >>sys.stderr, e
487 os.unlink(zipfile) 493 fetch.Stop()
488 zipfile = None 494 fetch = None
489 495
490 # Call the evaluate function to see if the current revision is good or bad. 496 # Call the evaluate function to see if the current revision is good or bad.
491 # On that basis, kill one of the background downloads and complete the 497 # On that basis, kill one of the background downloads and complete the
492 # other, as described in the comments above. 498 # other, as described in the comments above.
493 try: 499 try:
494 answer = evaluate(rev, official_builds, status, stdout, stderr) 500 answer = evaluate(rev, official_builds, status, stdout, stderr)
495 if answer == 'g': 501 if answer == 'g' and good_rev < bad_rev or \
496 good = pivot 502 answer == 'b' and bad_rev < good_rev:
503 minrev = pivot
497 if down_fetch: 504 if down_fetch:
498 down_fetch.Stop() # Kill the download of the older revision. 505 down_fetch.Stop() # Kill the download of the older revision.
499 if up_fetch: 506 if up_fetch:
500 up_fetch.WaitFor() 507 up_fetch.WaitFor()
501 pivot = up_pivot 508 pivot = up_pivot
502 zipfile = up_fetch.zipfile 509 fetch = up_fetch
503 elif answer == 'b': 510 elif answer == 'b' and good_rev < bad_rev or \
504 bad = pivot 511 answer == 'g' and bad_rev < good_rev:
512 maxrev = pivot
505 if up_fetch: 513 if up_fetch:
506 up_fetch.Stop() # Kill the download of the newer revision. 514 up_fetch.Stop() # Kill the download of the newer revision.
507 if down_fetch: 515 if down_fetch:
508 down_fetch.WaitFor() 516 down_fetch.WaitFor()
509 pivot = down_pivot 517 pivot = down_pivot
510 zipfile = down_fetch.zipfile 518 fetch = down_fetch
511 elif answer == 'u': 519 elif answer == 'u':
512 # Nuke the revision from the revlist and choose a new pivot. 520 # Nuke the revision from the revlist and choose a new pivot.
513 revlist.pop(pivot) 521 revlist.pop(pivot)
514 bad -= 1 # Assumes bad >= pivot. 522 maxrev -= 1 # Assumes maxrev >= pivot.
515 523
516 fetch = None 524 if maxrev - minrev > 1:
517 if bad - good > 1:
518 # Alternate between using down_pivot or up_pivot for the new pivot 525 # Alternate between using down_pivot or up_pivot for the new pivot
519 # point, without affecting the range. Do this instead of setting the 526 # point, without affecting the range. Do this instead of setting the
520 # pivot to the midpoint of the new range because adjacent revisions 527 # pivot to the midpoint of the new range because adjacent revisions
521 # are likely affected by the same issue that caused the (u)nknown 528 # are likely affected by the same issue that caused the (u)nknown
522 # response. 529 # response.
523 if up_fetch and down_fetch: 530 if up_fetch and down_fetch:
524 fetch = [up_fetch, down_fetch][len(revlist) % 2] 531 fetch = [up_fetch, down_fetch][len(revlist) % 2]
525 elif up_fetch: 532 elif up_fetch:
526 fetch = up_fetch 533 fetch = up_fetch
527 else: 534 else:
(...skipping 16 matching lines...) Expand all
544 for f in [_GetDownloadPath(revlist[down_pivot]), 551 for f in [_GetDownloadPath(revlist[down_pivot]),
545 _GetDownloadPath(revlist[up_pivot])]: 552 _GetDownloadPath(revlist[up_pivot])]:
546 try: 553 try:
547 os.unlink(f) 554 os.unlink(f)
548 except OSError: 555 except OSError:
549 pass 556 pass
550 sys.exit(0) 557 sys.exit(0)
551 558
552 rev = revlist[pivot] 559 rev = revlist[pivot]
553 560
554 return (revlist[good], revlist[bad]) 561 return (revlist[minrev], revlist[maxrev])
555 562
556 563
557 def GetWebKitRevisionForChromiumRevision(rev): 564 def GetWebKitRevisionForChromiumRevision(rev):
558 """Returns the webkit revision that was in chromium's DEPS file at 565 """Returns the webkit revision that was in chromium's DEPS file at
559 chromium revision |rev|.""" 566 chromium revision |rev|."""
560 # . doesn't match newlines without re.DOTALL, so this is safe. 567 # . doesn't match newlines without re.DOTALL, so this is safe.
561 webkit_re = re.compile(r'webkit_revision.:\D*(\d+)') 568 webkit_re = re.compile(r'webkit_revision.:\D*(\d+)')
562 url = urllib.urlopen(DEPS_FILE % rev) 569 url = urllib.urlopen(DEPS_FILE % rev)
563 m = webkit_re.search(url.read()) 570 m = webkit_re.search(url.read())
564 url.close() 571 url.close()
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
631 else: 638 else:
632 good_rev = '0.0.0.0' if opts.official_builds else 0 639 good_rev = '0.0.0.0' if opts.official_builds else 0
633 640
634 if opts.official_builds: 641 if opts.official_builds:
635 good_rev = LooseVersion(good_rev) 642 good_rev = LooseVersion(good_rev)
636 bad_rev = LooseVersion(bad_rev) 643 bad_rev = LooseVersion(bad_rev)
637 else: 644 else:
638 good_rev = int(good_rev) 645 good_rev = int(good_rev)
639 bad_rev = int(bad_rev) 646 bad_rev = int(bad_rev)
640 647
641 if good_rev > bad_rev:
642 print ('The good revision (%s) must precede the bad revision (%s).\n' %
643 (good_rev, bad_rev))
644 parser.print_help()
645 return 1
646
647 if opts.times < 1: 648 if opts.times < 1:
648 print('Number of times to run (%d) must be greater than or equal to 1.' % 649 print('Number of times to run (%d) must be greater than or equal to 1.' %
649 opts.times) 650 opts.times)
650 parser.print_help() 651 parser.print_help()
651 return 1 652 return 1
652 653
653 (last_known_good_rev, first_known_bad_rev) = Bisect( 654 (min_chromium_rev, max_chromium_rev) = Bisect(
654 opts.archive, opts.official_builds, good_rev, bad_rev, opts.times, args, 655 opts.archive, opts.official_builds, good_rev, bad_rev, opts.times, args,
655 opts.profile) 656 opts.profile)
656 657
657 # Get corresponding webkit revisions. 658 # Get corresponding webkit revisions.
658 try: 659 try:
659 last_known_good_webkit_rev = GetWebKitRevisionForChromiumRevision( 660 min_webkit_rev = GetWebKitRevisionForChromiumRevision(min_chromium_rev)
660 last_known_good_rev) 661 max_webkit_rev = GetWebKitRevisionForChromiumRevision(max_chromium_rev)
661 first_known_bad_webkit_rev = GetWebKitRevisionForChromiumRevision(
662 first_known_bad_rev)
663 except Exception, e: 662 except Exception, e:
664 # Silently ignore the failure. 663 # Silently ignore the failure.
665 last_known_good_webkit_rev, first_known_bad_webkit_rev = 0, 0 664 min_webkit_rev, max_webkit_rev = 0, 0
666 665
667 # We're done. Let the user know the results in an official manner. 666 # We're done. Let the user know the results in an official manner.
668 print DONE_MESSAGE % (str(last_known_good_rev), str(first_known_bad_rev)) 667 if good_rev > bad_rev:
669 if last_known_good_webkit_rev != first_known_bad_webkit_rev: 668 print DONE_MESSAGE_GOOD_MAX % (str(min_chromium_rev), str(max_chromium_rev))
669 else:
670 print DONE_MESSAGE_GOOD_MIN % (str(min_chromium_rev), str(max_chromium_rev))
671
672 if min_webkit_rev != max_webkit_rev:
670 print 'WEBKIT CHANGELOG URL:' 673 print 'WEBKIT CHANGELOG URL:'
671 print ' ' + WEBKIT_CHANGELOG_URL % (first_known_bad_webkit_rev, 674 print ' ' + WEBKIT_CHANGELOG_URL % (max_webkit_rev, min_webkit_rev)
672 last_known_good_webkit_rev)
673 print 'CHANGELOG URL:' 675 print 'CHANGELOG URL:'
674 if opts.official_builds: 676 if opts.official_builds:
675 print OFFICIAL_CHANGELOG_URL % (last_known_good_rev, first_known_bad_rev) 677 print OFFICIAL_CHANGELOG_URL % (min_chromium_rev, max_chromium_rev)
676 else: 678 else:
677 print ' ' + CHANGELOG_URL % (last_known_good_rev, first_known_bad_rev) 679 print ' ' + CHANGELOG_URL % (min_chromium_rev, max_chromium_rev)
678 680
679 if __name__ == '__main__': 681 if __name__ == '__main__':
680 sys.exit(main()) 682 sys.exit(main())
OLDNEW
« no previous file with comments | « no previous file | tools/bisect_test.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698