Index: tools/bisect-perf-regression.py |
diff --git a/tools/bisect-perf-regression.py b/tools/bisect-perf-regression.py |
index 3ef3b5ce5ac434cff45d184bdd68a1f98a33ed78..a2abddb30dcc10d5c6c0f66dfaa557e4764b61a0 100755 |
--- a/tools/bisect-perf-regression.py |
+++ b/tools/bisect-perf-regression.py |
@@ -291,6 +291,27 @@ def CalculateStandardDeviation(values): |
return std_dev |
+def CalculateRelativeChange(before, after): |
+ """Returns the relative change of before and after, relative to before. |
+ |
+ There are several different ways to define relative difference between |
+ two numbers; sometimes it is defined as relative to the smaller number, |
+ or to the mean of the two numbers. This version returns the difference |
+ relative to the first of the two numbers. |
+ |
+ Args: |
+ before: A number representing an earlier value. |
+ after: Another number, representing a later value. |
+ |
+ Returns: |
+ A non-negative floating point number; 0.1 represents a 10% change. |
+ """ |
+ if before == 0: |
shatch
2014/04/17 19:46:17
If the "good" values were legitimately 0 (ie. the
qyearsley
2014/04/17 20:54:05
Yeah -- In this version before my latest patch, th
shatch
2014/04/17 22:58:28
Hmm, yeah I'm not sure what the best output here w
qyearsley
2014/04/18 19:38:03
Anyway, if we're calculating relative change relat
|
+ return float('nan') |
+ difference = math.fabs(after - before) |
shatch
2014/04/17 19:46:17
This fabs call seems unnecessary, considering you
qyearsley
2014/04/17 20:54:05
Good point, done. (My original thinking was: first
|
+ return math.fabs(difference / before) |
+ |
+ |
def CalculatePooledStandardError(work_sets): |
numerator = 0.0 |
denominator1 = 0.0 |
@@ -3137,8 +3158,8 @@ class BisectPerformanceMetrics(object): |
mean_of_bad_runs = CalculateMean(broken_mean) |
mean_of_good_runs = CalculateMean(working_mean) |
- regression_size = math.fabs(max(mean_of_good_runs, mean_of_bad_runs) / |
- max(0.0001, min(mean_of_good_runs, mean_of_bad_runs))) * 100.0 - 100.0 |
+ regression_size = 100 * CalculateRelativeChange(mean_of_good_runs, |
+ mean_of_bad_runs) |
regression_std_err = math.fabs(CalculatePooledStandardError( |
[working_mean, broken_mean]) / |