Index: tools/auto_bisect/bisect_perf_regression.py |
diff --git a/tools/auto_bisect/bisect_perf_regression.py b/tools/auto_bisect/bisect_perf_regression.py |
index fab182e11654c0f4d05fef3f4b3d93040d4c60e3..1f264590ab3704e5b091127a6e3fdab6c3e0ba19 100755 |
--- a/tools/auto_bisect/bisect_perf_regression.py |
+++ b/tools/auto_bisect/bisect_perf_regression.py |
@@ -50,6 +50,7 @@ sys.path.append(os.path.join( |
os.path.dirname(__file__), os.path.pardir, 'telemetry')) |
from bisect_results import BisectResults |
+from bisect_results import ConfidenceScore |
import bisect_utils |
import builder |
import math_utils |
@@ -2445,6 +2446,14 @@ class BisectPerformanceMetrics(object): |
known_bad_value = bad_results[0] |
known_good_value = good_results[0] |
+ regression_confidence = ConfidenceScore(known_bad_value['values'], |
+ known_good_value['values']) |
+ if regression_confidence < HIGH_CONFIDENCE: |
qyearsley
2014/10/12 04:07:21
I'm not 100% sure it's good practice to re-use the
RobertoCN
2014/10/17 22:44:37
Done.
|
+ results.error = ('The results of the performance test on the \'good\' ' |
+ 'and \'bad\' revisions are so close together that we ' |
+ 'can\'t be confident this is a regression. Try ' |
+ 'bisecting a different revision range.') |
qyearsley
2014/10/12 04:07:22
I think that this message can be improved. We want
RobertoCN
2014/10/17 22:44:37
Done.
qyearsley
2014/10/17 23:53:52
Sometimes when the bisect can't reproduce the regr
|
+ return results |
# Can just mark the good and bad revisions explicitly here since we |
# already know the results. |
bad_revision_data = revision_data[revision_list[0]] |