| Index: tools/auto_bisect/bisect_perf_regression.py
|
| diff --git a/tools/auto_bisect/bisect_perf_regression.py b/tools/auto_bisect/bisect_perf_regression.py
|
| index ca97d67e422649087a3561debd89bdb8f94d169c..41db6ae00ba959a7afcbea7fc901098ec4e11b70 100755
|
| --- a/tools/auto_bisect/bisect_perf_regression.py
|
| +++ b/tools/auto_bisect/bisect_perf_regression.py
|
| @@ -2307,9 +2307,18 @@ class BisectPerformanceMetrics(object):
|
|
|
| # We need these reference values to determine if later runs should be
|
| # classified as pass or fail.
|
| +
|
| known_bad_value = bad_results[0]
|
| known_good_value = good_results[0]
|
|
|
| + # Abort bisect early when the return codes for known good
|
| + # and known bad revisions are same.
|
| + if (self._IsBisectModeReturnCode() and
|
| + known_bad_value['mean'] == known_good_value['mean']):
|
| + return BisectResults(abort_reason=('known good and known bad revisions '
|
| + 'returned same return code (return code=%s). '
|
| + 'Continuing bisect might not yield any results.' %
|
| + known_bad_value['mean']))
|
| # Check the direction of improvement only if the improvement_direction
|
| # option is set to a specific direction (1 for higher is better or -1 for
|
| # lower is better).
|
|
|