Chromium Code Reviews| Index: tools/bisect-perf-regression.py |
| diff --git a/tools/bisect-perf-regression.py b/tools/bisect-perf-regression.py |
| index 862221c326d728750a61edbe29e3ab3c6c950a9d..567438663134ca567b968ac814c3df25ed91e4bc 100755 |
| --- a/tools/bisect-perf-regression.py |
| +++ b/tools/bisect-perf-regression.py |
| @@ -126,6 +126,24 @@ def IsStringInt(string_to_check): |
| return False |
| +def OutputAnnotationStepStart(name): |
| + """Outputs appropriate annotation to signal the start of a step to |
| + a trybot. |
| + |
| + Args: |
| + name: The name of the step. |
| + """ |
| + print '@@@SEED_STEP %s@@@' % name |
| + print '@@@STEP_CURSOR %s@@@' % name |
| + print '@@@STEP_STARTED@@@' |
| + |
| + |
| +def OutputAnnotationStepClosed(): |
| + """Outputs appropriate annotation to signal the closing of a step to |
| + a trybot.""" |
| + print '@@@STEP_CLOSED@@@' |
| + |
| + |
| def RunProcess(command): |
| """Run an arbitrary command, returning its output and return code. |
| @@ -779,12 +797,21 @@ class BisectPerformanceMetrics(object): |
| 'sort' : i + sort + 1} |
| def PrintRevisionsToBisectMessage(self, revision_list, depot): |
| + if self.opts.output_buildbot_annotations: |
| + list_length = len(revision_list) |
|
tonyg
2013/02/14 01:21:01
inline?
shatch
2013/02/14 01:37:47
Done.
|
| + step_name = 'Bisection Range: [%s - %s]' %\ |
| + (revision_list[list_length-1], revision_list[0]) |
|
tonyg
2013/02/14 01:21:01
indent continued line by 4 spaces
shatch
2013/02/14 01:37:47
Done.
|
| + OutputAnnotationStepStart(step_name) |
| + |
| print 'Revisions to bisect on [%s]:' % depot |
| for revision_id in revision_list: |
| print ' -> %s' % (revision_id, ) |
| + if self.opts.output_buildbot_annotations: |
| + OutputAnnotationStepClosed() |
| + |
| def Run(self, command_to_run, bad_revision_in, good_revision_in, metric): |
| """Given known good and bad revisions, run a binary search on all |
| intermediate revisions to determine the CL where the performance regression |
| @@ -846,11 +873,17 @@ class BisectPerformanceMetrics(object): |
| results['error'] = 'Could\'t resolve [%s] to SHA1.' % (good_revision_in,) |
| return results |
| + if self.opts.output_buildbot_annotations: |
| + OutputAnnotationStepStart('Gathering Revisions') |
| + |
| print 'Gathering revision range for bisection.' |
| # Retrieve a list of revisions to do bisection on. |
| src_revision_list = self.GetRevisionList(bad_revision, good_revision) |
| + if self.opts.output_buildbot_annotations: |
| + OutputAnnotationStepClosed() |
| + |
| if src_revision_list: |
| # revision_data will store information about a revision such as the |
| # depot it came from, the webkit/V8 revision at that time, |
| @@ -877,6 +910,9 @@ class BisectPerformanceMetrics(object): |
| self.PrintRevisionsToBisectMessage(revision_list, 'src') |
| + if self.opts.output_buildbot_annotations: |
| + OutputAnnotationStepStart('Gathering Reference Values') |
| + |
| print 'Gathering reference values for bisection.' |
| # Perform the performance tests on the good and bad revisions, to get |
| @@ -886,6 +922,9 @@ class BisectPerformanceMetrics(object): |
| command_to_run, |
| metric) |
| + if self.opts.output_buildbot_annotations: |
| + OutputAnnotationStepClosed() |
| + |
| if bad_results[1]: |
| results['error'] = bad_results[0] |
| return results |
| @@ -985,6 +1024,10 @@ class BisectPerformanceMetrics(object): |
| self.ChangeToDepotWorkingDirectory(next_revision_depot) |
| + if self.opts.output_buildbot_annotations: |
| + step_name = 'Working on [%s]' % next_revision_id |
| + OutputAnnotationStepStart(step_name) |
| + |
| print 'Working on revision: [%s]' % next_revision_id |
| run_results = self.SyncBuildAndRunRevision(next_revision_id, |
| @@ -992,6 +1035,9 @@ class BisectPerformanceMetrics(object): |
| command_to_run, |
| metric) |
| + if self.opts.output_buildbot_annotations: |
| + OutputAnnotationStepClosed() |
| + |
| # If the build is successful, check whether or not the metric |
| # had regressed. |
| if not run_results[1]: |
| @@ -1033,6 +1079,9 @@ class BisectPerformanceMetrics(object): |
| revision_data_sorted = sorted(revision_data.iteritems(), |
| key = lambda x: x[1]['sort']) |
| + if self.opts.output_buildbot_annotations: |
| + OutputAnnotationStepStart('Results') |
| + |
| print 'Full results of bisection:' |
| for current_id, current_data in revision_data_sorted: |
| @@ -1057,7 +1106,7 @@ class BisectPerformanceMetrics(object): |
| last_broken_revision = k |
| if last_broken_revision != None and first_working_revision != None: |
| - print 'Results: Regression was detected as a result of changes on:' |
| + print 'Results: Regression may have occurred in range:' |
| print ' -> First Bad Revision: [%s] [%s]' %\ |
| (last_broken_revision, |
| revision_data[last_broken_revision]['depot']) |
| @@ -1065,6 +1114,9 @@ class BisectPerformanceMetrics(object): |
| (first_working_revision, |
| revision_data[first_working_revision]['depot']) |
| + if self.opts.output_buildbot_annotations: |
| + OutputAnnotationStepClosed() |
| + |
| def DetermineAndCreateSourceControl(): |
| """Attempts to determine the underlying source control workflow and returns |
| @@ -1112,6 +1164,9 @@ def main(): |
| parser.add_option('--use_goma', |
| action="store_true", |
| help='Add a bunch of extra threads for goma.') |
| + parser.add_option('--output_buildbot_annotations', |
| + action="store_true", |
| + help='Add extra annotation output for buildbot.') |
| parser.add_option('--debug_ignore_build', |
| action="store_true", |
| help='DEBUG: Don\'t perform builds.') |