Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(19)

Unified Diff: tools/bisect-perf-regression_test.py

Issue 463743002: Return 0 for confidence when there's only results for one "good" or one "bad" rev. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « tools/bisect-perf-regression.py ('k') | no next file » | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: tools/bisect-perf-regression_test.py
diff --git a/tools/bisect-perf-regression_test.py b/tools/bisect-perf-regression_test.py
index 8a1a250d4df59d977c2617f0cb884e58289afdd2..f0d17ac020b7a1eea4b883f91cc8b997880bce97 100644
--- a/tools/bisect-perf-regression_test.py
+++ b/tools/bisect-perf-regression_test.py
@@ -25,8 +25,11 @@ class BisectPerfRegressionTest(unittest.TestCase):
bad_values: First list of numbers.
good_values: Second list of numbers.
"""
- # ConfidenceScore takes a list of lists but these lists are flattened.
- confidence = bisect_perf_module.ConfidenceScore([bad_values], [good_values])
+ # ConfidenceScore takes a list of lists but these lists are flattened
+ # inside the function.
+ confidence = bisect_perf_module.ConfidenceScore(
+ [[v] for v in bad_values],
+ [[v] for v in good_values])
self.assertEqual(score, confidence)
def testConfidenceScore_ZeroConfidence(self):
@@ -45,10 +48,9 @@ class BisectPerfRegressionTest(unittest.TestCase):
self._AssertConfidence(99.9, [1, 1, 1, 1], [1.2, 1.2, 1.2, 1.2])
self._AssertConfidence(99.9, [1, 1, 1, 1], [1.01, 1.01, 1.01, 1.01])
- def testConfidenceScore_ImbalancedSampleSize(self):
- # The second set of numbers only contains one number, so confidence is low.
- self._AssertConfidence(
- 80.0, [1.1, 1.2, 1.1, 1.2, 1.0, 1.3, 1.2, 1.3],[1.4])
+ def testConfidenceScore_UnbalancedSampleSize(self):
+ # The second set of numbers only contains one number, so confidence is 0.
+ self._AssertConfidence(0.0, [1.1, 1.2, 1.1, 1.2, 1.0, 1.3, 1.2], [1.4])
def testConfidenceScore_EmptySample(self):
# Confidence is zero if either or both samples are empty.
« no previous file with comments | « tools/bisect-perf-regression.py ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698