Chromium Code Reviews| Index: tools/auto_bisect/bisect_perf_regression.py |
| diff --git a/tools/auto_bisect/bisect_perf_regression.py b/tools/auto_bisect/bisect_perf_regression.py |
| index fab182e11654c0f4d05fef3f4b3d93040d4c60e3..0ffee91486ceb4a1efbfc29ee9e01d0ecb0ef8a0 100755 |
| --- a/tools/auto_bisect/bisect_perf_regression.py |
| +++ b/tools/auto_bisect/bisect_perf_regression.py |
| @@ -1519,7 +1519,6 @@ class BisectPerformanceMetrics(object): |
| print 'Something went wrong while updating DEPS file. [%s]' % e |
| return False |
| - |
| def CreateDEPSPatch(self, depot, revision): |
| """Modifies DEPS and returns diff as text. |
| @@ -1665,6 +1664,14 @@ class BisectPerformanceMetrics(object): |
| 'std_dev': 0.0, |
| 'values': [0.0] |
| } |
| + |
| + # When debug_fake_test_mean is passed, its value is returned as the mean |
| + # and such flag is cleared so that further calls behave as if it wasn't |
| + # passed. |
|
qyearsley
2014/10/12 03:37:11
In this comment, it might be clearer to say "set"
RobertoCN
2014/10/15 18:44:44
Done.
|
| + if self.opts.debug_fake_first_test_mean: |
| + fake_results['mean'] = float(self.opts.debug_fake_first_test_mean) |
| + self.opts.debug_fake_first_test_mean = 0 |
| + |
| return (fake_results, success_code) |
| # For Windows platform set posix=False, to parse windows paths correctly. |
| @@ -2439,12 +2446,23 @@ class BisectPerformanceMetrics(object): |
| good_results[0]) |
| return results |
| - |
| # We need these reference values to determine if later runs should be |
| # classified as pass or fail. |
| known_bad_value = bad_results[0] |
| known_good_value = good_results[0] |
| + # Check the direction of improvement only if the direction_of_improvement |
| + # flag is non-false. |
|
qyearsley
2014/10/12 03:37:11
What can it be if its non-false? It can be -1 or 1
RobertoCN
2014/10/15 18:44:44
Done.
|
| + improvement_dir = self.opts.direction_of_improvement |
| + if improvement_dir: |
| + higher_is_better = improvement_dir > 0 |
| + metric_increased = known_bad_value['mean'] > known_good_value['mean'] |
| + if ((higher_is_better and metric_increased) or |
| + (not higher_is_better and not metric_increased)): |
| + results.error = ('The given "good" - "bad" range does not represent ' |
| + 'a regression but an improvement.') |
| + return results |
| + |
| # Can just mark the good and bad revisions explicitly here since we |
| # already know the results. |
| bad_revision_data = revision_data[revision_list[0]] |
| @@ -2946,12 +2964,14 @@ class BisectOptions(object): |
| self.debug_ignore_build = None |
| self.debug_ignore_sync = None |
| self.debug_ignore_perf_test = None |
| + self.debug_fake_first_test_mean = 0 |
| self.gs_bucket = None |
| self.target_arch = 'ia32' |
| self.target_build_type = 'Release' |
| self.builder_host = None |
| self.builder_port = None |
| self.bisect_mode = BISECT_MODE_MEAN |
| + self.direction_of_improvement = 0 |
| @staticmethod |
| def _CreateCommandLineParser(): |
| @@ -2985,6 +3005,12 @@ class BisectOptions(object): |
| type='str', |
| help='The desired metric to bisect on. For example ' + |
| '"vm_rss_final_b/vm_rss_f_b"') |
| + group.add_option('-d', '--direction_of_improvement', |
| + type='int', |
| + default=0, |
| + help='An integer number representing the direction of ' + |
| + 'improvement. 1 for higher is better, -1 for lower is ' + |
| + 'better, 0 for ignore (default).') |
| group.add_option('-r', '--repeat_test_count', |
| type='int', |
| default=20, |
| @@ -3105,6 +3131,12 @@ class BisectOptions(object): |
| group.add_option('--debug_ignore_perf_test', |
| action='store_true', |
| help='DEBUG: Don\'t perform performance tests.') |
| + group.add_option('--debug_fake_first_test_mean', |
| + type='int', |
| + default='0', |
| + help=('DEBUG: When faking performance tests, return this ' |
| + 'value as the mean of the first performance test, ' |
| + 'and return a mean of 0.0 for further tests.')) |
| parser.add_option_group(group) |
| return parser |