Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(174)

Unified Diff: tools/auto_bisect/bisect_perf_regression_test.py

Issue 1525033002: Add required_initial_confidence to legacy bisect. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 5 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « tools/auto_bisect/bisect_perf_regression.py ('k') | tools/run-bisect-perf-regression.py » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: tools/auto_bisect/bisect_perf_regression_test.py
diff --git a/tools/auto_bisect/bisect_perf_regression_test.py b/tools/auto_bisect/bisect_perf_regression_test.py
index 132044ea44d29f1db920fafb5e1876942adffddf..33f733a7332641521a58b226afa48b69f74f382c 100644
--- a/tools/auto_bisect/bisect_perf_regression_test.py
+++ b/tools/auto_bisect/bisect_perf_regression_test.py
@@ -137,13 +137,16 @@ def _GetBisectPerformanceMetricsInstance(options_dict):
return bisect_perf_regression.BisectPerformanceMetrics(opts, os.getcwd())
-def _GetExtendedOptions(improvement_dir, fake_first, ignore_confidence=True):
+def _GetExtendedOptions(improvement_dir, fake_first, ignore_confidence=True,
+ **extra_opts):
"""Returns the a copy of the default options dict plus some options."""
result = dict(DEFAULT_OPTIONS)
result.update({
'improvement_direction': improvement_dir,
'debug_fake_first_test_mean': fake_first,
- 'debug_ignore_regression_confidence': ignore_confidence})
+ 'debug_ignore_regression_confidence': ignore_confidence
+ })
+ result.update(extra_opts)
return result
@@ -316,7 +319,7 @@ class BisectPerfRegressionTest(unittest.TestCase):
results = _GenericDryRun(_GetExtendedOptions(1, -100))
self.assertIsNone(results.error)
- def _CheckAbortsEarly(self, results):
+ def _CheckAbortsEarly(self, results, **extra_opts):
"""Returns True if the bisect job would abort early."""
global _MockResultsGenerator
_MockResultsGenerator = (r for r in results)
@@ -325,7 +328,9 @@ class BisectPerfRegressionTest(unittest.TestCase):
bisect_class.RunPerformanceTestAndParseResults = _MakeMockRunTests()
try:
- dry_run_results = _GenericDryRun(_GetExtendedOptions(0, 0, False))
+ dry_run_results = _GenericDryRun(_GetExtendedOptions(
+ improvement_dir=0, fake_first=0, ignore_confidence=False,
+ **extra_opts))
except StopIteration:
# If StopIteration was raised, that means that the next value after
# the first two values was requested, so the job was not aborted.
@@ -354,6 +359,10 @@ class BisectPerfRegressionTest(unittest.TestCase):
def testBisectNotAborted_MultipleValues(self):
self.assertFalse(self._CheckAbortsEarly(MULTIPLE_VALUES))
+ def testBisectNotAbortedWhenRequiredConfidenceIsZero(self):
+ self.assertFalse(self._CheckAbortsEarly(
+ CLEAR_NON_REGRESSION, required_initial_confidence=0))
+
def _CheckAbortsEarlyForReturnCode(self, results):
"""Returns True if the bisect job would abort early in return code mode."""
global _MockResultsGenerator
« no previous file with comments | « tools/auto_bisect/bisect_perf_regression.py ('k') | tools/run-bisect-perf-regression.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698