Index: tools/bisect-perf-regression.py |
diff --git a/tools/bisect-perf-regression.py b/tools/bisect-perf-regression.py |
index 14cf10c8ff61df3e8c7992101519e2e44096da99..51aeab26ee061193ad2b27029b943c90bf31e79c 100755 |
--- a/tools/bisect-perf-regression.py |
+++ b/tools/bisect-perf-regression.py |
@@ -494,6 +494,26 @@ class GitSourceControl(SourceControl): |
return not RunGit(['checkout', bisect_utils.FILE_DEPS_GIT])[1] |
+ def QueryFileRevisionHistory(self, filename, revision_start, revision_end): |
+ """Returns a list of commits that modified this file. |
+ |
+ Args: |
+ filename: Name of file. |
+ revision_start: Start of revision range. |
+ revision_end: End of revision range. |
+ |
+ Returns: |
+ Returns a list of commits that touched this file. |
+ """ |
+ cmd = ['log', '--format=%H', '%s^1..%s' % (revision_start, revision_end), |
+ filename] |
+ (output, return_code) = RunGit(cmd) |
+ |
+ assert not return_code, 'An error occurred while running'\ |
tonyg
2013/05/28 23:16:07
We already do this assert not return_code dance in
shatch
2013/05/28 23:38:17
Done.
|
+ ' "git %s"' % ' '.join(cmd) |
+ |
+ return [o for o in output.split('\n') if o] |
+ |
class BisectPerformanceMetrics(object): |
"""BisectPerformanceMetrics performs a bisection against a list of range |
of revisions to narrow down where performance regressions may have |
@@ -507,6 +527,7 @@ class BisectPerformanceMetrics(object): |
self.src_cwd = os.getcwd() |
self.depot_cwd = {} |
self.cleanup_commands = [] |
+ self.warnings = [] |
# This always starts true since the script grabs latest first. |
self.was_blink = True |
@@ -1044,6 +1065,59 @@ class BisectPerformanceMetrics(object): |
if self.opts.output_buildbot_annotations: |
bisect_utils.OutputAnnotationStepClosed() |
+ def NudgeRevisionsIfDEPSChange(self, bad_revision, good_revision): |
+ """Checks to see if changes to DEPS file occurred, and that the revision |
+ range also includes the change to .DEPS.git. If it doesn't, attempts to |
+ expand the revision range to include it. |
+ |
+ Args: |
+ good_revision: Last known good revision. |
+ bad_rev: First known bad revision. |
tonyg
2013/05/28 23:16:07
Nit: args are in the wrong order here
shatch
2013/05/28 23:38:17
Done.
|
+ |
+ Returns: |
+ A tuple with the new good and bad revisions. |
tonyg
2013/05/28 23:16:07
A tuple with the new bad and good revisions
shatch
2013/05/28 23:38:17
Done.
|
+ """ |
+ if self.source_control.IsGit(): |
+ changes_to_deps = self.source_control.QueryFileRevisionHistory( |
+ 'DEPS', good_revision, bad_revision) |
+ |
+ if changes_to_deps: |
+ # DEPS file was changed, search from the oldest change to DEPS file to |
+ # bad_revision to see if there are matching .DEPS.git changes. |
+ oldest_deps_change = changes_to_deps[len(changes_to_deps) - 1] |
tonyg
2013/05/28 23:16:07
Neat python trick:
oldest_deps_change = changes_to
shatch
2013/05/28 23:38:17
Cool!
|
+ changes_to_gitdeps = self.source_control.QueryFileRevisionHistory( |
+ bisect_utils.FILE_DEPS_GIT, oldest_deps_change, bad_revision) |
+ |
+ if len(changes_to_deps) != len(changes_to_gitdeps): |
+ # Grab the timestamp of the last DEPS change |
+ cmd = ['log', '--format=%ct', '-1', changes_to_deps[0]] |
+ (output, return_code) = RunGit(cmd) |
+ |
+ assert not return_code, 'An error occurred while running'\ |
+ ' "git %s"' % ' '.join(cmd) |
+ |
+ commit_time = int(output) |
+ |
+ # Try looking for a commit that touches the .DEPS.git file in the |
+ # next 5 minutes after the DEPS file change. |
tonyg
2013/05/28 23:16:07
I didn't look how long it usually takes, but 5 min
shatch
2013/05/28 23:38:17
Done.
|
+ cmd = ['log', '--format=%H', '-1', |
+ '--before=%d' % (commit_time + 300), '--after=%d' % commit_time, |
+ 'origin/master', bisect_utils.FILE_DEPS_GIT] |
+ (output, return_code) = RunGit(cmd) |
+ |
+ assert not return_code, 'An error occurred while running'\ |
+ ' "git %s"' % ' '.join(cmd) |
+ |
+ output = output.strip() |
+ if output: |
+ self.warnings.append('Detected change to DEPS and modified ' |
+ 'revision range to include change to .DEPS.git') |
+ return (output, good_revision) |
+ else: |
+ self.warnings.append('Detected change to DEPS but couldn\'t find ' |
+ 'matching change to .DEPS.git') |
+ return (bad_revision, good_revision) |
+ |
def Run(self, command_to_run, bad_revision_in, good_revision_in, metric): |
"""Given known good and bad revisions, run a binary search on all |
intermediate revisions to determine the CL where the performance regression |
@@ -1105,6 +1179,9 @@ class BisectPerformanceMetrics(object): |
results['error'] = 'Could\'t resolve [%s] to SHA1.' % (good_revision_in,) |
return results |
+ (bad_revision, good_revision) = self.NudgeRevisionsIfDEPSChange( |
+ bad_revision, good_revision) |
+ |
if self.opts.output_buildbot_annotations: |
bisect_utils.OutputAnnotationStepStart('Gathering Revisions') |
@@ -1380,13 +1457,11 @@ class BisectPerformanceMetrics(object): |
deviations = math.fabs(bad_mean - good_mean) / good_std_dev |
if deviations < 1.5: |
- print 'Warning: Regression was less than 1.5 standard deviations '\ |
- 'from "good" value. Results may not be accurate.' |
+ self.warnings.append('Regression was less than 1.5 standard ' |
+ 'deviations from "good" value. Results may not be accurate.') |
tonyg
2013/05/28 23:16:07
Something for a future CL that I'll mention while
shatch
2013/05/28 23:38:17
Yeah that sounds reasonable, I'll get a version wi
|
elif self.opts.repeat_test_count == 1: |
- print 'Warning: Tests were only set to run once. This may be '\ |
- 'insufficient to get meaningful results.' |
+ self.warnings.append('Tests were only set to run once. This ' |
+ 'may be insufficient to get meaningful results.') |
# Check for any other possible regression ranges |
prev_revision_data = revision_data_sorted[0][1] |
@@ -1447,6 +1522,14 @@ class BisectPerformanceMetrics(object): |
current_data['depot'], current_id) |
+ if self.warnings: |
+ print 'The following warnings were generated:' |
+ for w in self.warnings: |
+ print ' - %s' % w |
+ |
if self.opts.output_buildbot_annotations: |
bisect_utils.OutputAnnotationStepClosed() |