OLD | NEW |
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 # Copyright 2013 The Chromium Authors. All rights reserved. | 2 # Copyright 2013 The Chromium Authors. All rights reserved. |
3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
5 | 5 |
6 """Chromium auto-bisect tool | 6 """Chromium auto-bisect tool |
7 | 7 |
8 This script bisects a range of commits using binary search. It starts by getting | 8 This script bisects a range of commits using binary search. It starts by getting |
9 reference values for the specified "good" and "bad" commits. Then, for revisions | 9 reference values for the specified "good" and "bad" commits. Then, for revisions |
10 in between, it will get builds, run tests and classify intermediate revisions as | 10 in between, it will get builds, run tests and classify intermediate revisions as |
(...skipping 1346 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1357 if metric and self._IsBisectModeUsingMetric(): | 1357 if metric and self._IsBisectModeUsingMetric(): |
1358 parsed_metric = _ParseMetricValuesFromOutput(metric, output) | 1358 parsed_metric = _ParseMetricValuesFromOutput(metric, output) |
1359 if parsed_metric: | 1359 if parsed_metric: |
1360 metric_values.append(math_utils.Mean(parsed_metric)) | 1360 metric_values.append(math_utils.Mean(parsed_metric)) |
1361 # If we're bisecting on a metric (ie, changes in the mean or | 1361 # If we're bisecting on a metric (ie, changes in the mean or |
1362 # standard deviation) and no metric values are produced, bail out. | 1362 # standard deviation) and no metric values are produced, bail out. |
1363 if not metric_values: | 1363 if not metric_values: |
1364 break | 1364 break |
1365 elif self._IsBisectModeReturnCode(): | 1365 elif self._IsBisectModeReturnCode(): |
1366 metric_values.append(return_code) | 1366 metric_values.append(return_code) |
| 1367 # If there's a failed test, we can bail out early. |
| 1368 if return_code: |
| 1369 break |
1367 | 1370 |
1368 elapsed_minutes = (time.time() - start_time) / 60.0 | 1371 elapsed_minutes = (time.time() - start_time) / 60.0 |
1369 time_limit = self.opts.max_time_minutes * test_run_multiplier | 1372 time_limit = self.opts.max_time_minutes * test_run_multiplier |
1370 if elapsed_minutes >= time_limit: | 1373 if elapsed_minutes >= time_limit: |
1371 break | 1374 break |
1372 | 1375 |
1373 if metric and len(metric_values) == 0: | 1376 if metric and len(metric_values) == 0: |
1374 err_text = 'Metric %s was not found in the test output.' % metric | 1377 err_text = 'Metric %s was not found in the test output.' % metric |
1375 # TODO(qyearsley): Consider also getting and displaying a list of metrics | 1378 # TODO(qyearsley): Consider also getting and displaying a list of metrics |
1376 # that were found in the output here. | 1379 # that were found in the output here. |
(...skipping 1499 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2876 # bugs. If you change this, please update the perf dashboard as well. | 2879 # bugs. If you change this, please update the perf dashboard as well. |
2877 bisect_utils.OutputAnnotationStepStart('Results') | 2880 bisect_utils.OutputAnnotationStepStart('Results') |
2878 print 'Runtime Error: %s' % e | 2881 print 'Runtime Error: %s' % e |
2879 if opts.output_buildbot_annotations: | 2882 if opts.output_buildbot_annotations: |
2880 bisect_utils.OutputAnnotationStepClosed() | 2883 bisect_utils.OutputAnnotationStepClosed() |
2881 return 1 | 2884 return 1 |
2882 | 2885 |
2883 | 2886 |
2884 if __name__ == '__main__': | 2887 if __name__ == '__main__': |
2885 sys.exit(main()) | 2888 sys.exit(main()) |
OLD | NEW |