Chromium Code Reviews| Index: tools/bisect-perf-regression.py |
| diff --git a/tools/bisect-perf-regression.py b/tools/bisect-perf-regression.py |
| index 682a45fba1c604f64f14a4c01e2149cfab62c765..bbdb6422e27290d585ab35b6d155c01cafffcf44 100755 |
| --- a/tools/bisect-perf-regression.py |
| +++ b/tools/bisect-perf-regression.py |
| @@ -286,6 +286,8 @@ def ConfidenceScore(good_results_lists, bad_results_lists): |
| # Flatten the lists of results lists. |
| sample1 = sum(good_results_lists, []) |
| sample2 = sum(bad_results_lists, []) |
| + if not sample1 or not sample2: |
| + return 0.0 |
|
qyearsley
2014/07/31 22:34:09
Previously ConfidenceScore was throwing an error i
|
| # The p-value is approximately the probability of obtaining the given set |
| # of good and bad values just by chance. |
| @@ -3054,6 +3056,12 @@ class BisectPerformanceMetrics(object): |
| last_broken_revision = None |
| last_broken_revision_index = -1 |
| + culprit_revisions = [] |
| + other_regressions = [] |
| + regression_size = 0.0 |
| + regression_std_err = 0.0 |
| + confidence = 0.0 |
|
qyearsley
2014/07/31 22:34:09
Previously it was possible that these were referen
|
| + |
| for i in xrange(len(revision_data_sorted)): |
| k, v = revision_data_sorted[i] |
| if v['passed'] == 1: |
| @@ -3233,7 +3241,7 @@ class BisectPerformanceMetrics(object): |
| results_dict['confidence']) |
| _PrintStepTime(revision_data_sorted) |
| self._PrintReproSteps() |
| - self._PrintThankYou() |
| + _PrintThankYou() |
|
qyearsley
2014/07/31 22:34:09
The new test testDryRun was able to catch this :-)
|
| if self.opts.output_buildbot_annotations: |
| bisect_utils.OutputAnnotationStepClosed() |