Index: tools/auto_bisect/bisect_perf_regression.py |
diff --git a/tools/auto_bisect/bisect_perf_regression.py b/tools/auto_bisect/bisect_perf_regression.py |
index f30ceb01d335f2483ddf0e9aae47c5e644aef7c0..db238be35aff9b8d691e22f753b0eaa0e460ecff 100755 |
--- a/tools/auto_bisect/bisect_perf_regression.py |
+++ b/tools/auto_bisect/bisect_perf_regression.py |
@@ -2480,7 +2480,13 @@ class BisectPerformanceMetrics(object): |
metric, |
test_run_multiplier=BORDER_REVISIONS_EXTRA_RUNS) |
# Is extend the right thing to do here? |
- state.value['values'].extend(run_results[0]['values']) |
+ if run_results[1] != BUILD_RESULT_FAIL: |
+ state.value['values'].extend(run_results[0]['values']) |
+ else: |
+ warning_text = 'Re-test of revision %s failed with error message: %s' |
+ warning_text %= (state.revision, run_results[0]) |
+ if warning_text not in self.warnings: |
+ self.warnings.append(warning_text) |
def _IsPlatformSupported(): |