Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 # Copyright 2014 The Chromium Authors. All rights reserved. | 1 # Copyright 2014 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 | 4 |
| 5 import os | 5 import os |
| 6 import re | 6 import re |
| 7 import shutil | 7 import shutil |
| 8 import sys | 8 import sys |
| 9 import unittest | 9 import unittest |
| 10 | 10 |
| (...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 104 'debug_ignore_sync': True, | 104 'debug_ignore_sync': True, |
| 105 'debug_ignore_perf_test': True, | 105 'debug_ignore_perf_test': True, |
| 106 'debug_ignore_regression_confidence': True, | 106 'debug_ignore_regression_confidence': True, |
| 107 'command': 'fake_command', | 107 'command': 'fake_command', |
| 108 'metric': 'fake/metric', | 108 'metric': 'fake/metric', |
| 109 'good_revision': 280000, | 109 'good_revision': 280000, |
| 110 'bad_revision': 280005, | 110 'bad_revision': 280005, |
| 111 } | 111 } |
| 112 | 112 |
| 113 # This global is a placeholder for a generator to be defined by the test cases | 113 # This global is a placeholder for a generator to be defined by the test cases |
| 114 # that use _MockRunTests. | 114 # that use _MockRunTests. |
|
qyearsley
2015/06/23 20:47:45
This comment should be updated.
prasadv
2015/06/23 21:37:48
Now used by only _MockRunTests
| |
| 115 _MockResultsGenerator = (x for x in []) | 115 _MockResultsGenerator = (x for x in []) |
| 116 | 116 |
| 117 def _MakeMockRunTests(is_return_code=False): | |
|
qyearsley
2015/06/23 20:47:45
Optional: Maybe it would be clearer if this argume
prasadv
2015/06/23 21:37:48
Done.
| |
| 118 """Return MockRunTest based on the bisect mode.""" | |
| 119 # pylint: disable=unused-argument | |
| 120 def _MockRunTests(*args, **kwargs): | |
|
RobertoCN
2015/06/23 21:03:03
I think we only need one of these two functions.
prasadv
2015/06/23 21:37:48
Done.
| |
| 121 return _FakeTestResult(_MockResultsGenerator.next(), is_return_code) | |
| 117 | 122 |
| 118 def _MockRunTests(*args, **kwargs): # pylint: disable=unused-argument | 123 def _MockRunTestsReturnCode(*args, **kwargs): |
| 119 return _FakeTestResult(_MockResultsGenerator.next()) | 124 return _FakeTestResult(_MockResultsGenerator.next(), is_return_code) |
| 125 # pylint: enable=unused-argument | |
| 126 | |
| 127 if is_return_code: | |
| 128 return _MockRunTestsReturnCode | |
| 129 else: | |
| 130 return _MockRunTests | |
| 120 | 131 |
| 121 | 132 |
| 122 def _FakeTestResult(values): | 133 def _FakeTestResult(values, is_return_code): |
| 123 result_dict = {'mean': 0.0, 'std_err': 0.0, 'std_dev': 0.0, 'values': values} | 134 mean = 0.0 |
| 135 if is_return_code: | |
| 136 mean = 0 if ( | |
| 137 all(current_value == 0 for current_value in values)) else 1 | |
|
qyearsley
2015/06/23 20:47:45
This would fit on one line if you shortened the va
prasadv
2015/06/23 21:37:48
Done.
| |
| 138 result_dict = {'mean': mean, 'std_err': 0.0, 'std_dev': 0.0, 'values': values} | |
| 124 success_code = 0 | 139 success_code = 0 |
| 125 return (result_dict, success_code) | 140 return (result_dict, success_code) |
| 126 | 141 |
| 127 | 142 |
| 128 def _GetBisectPerformanceMetricsInstance(options_dict): | 143 def _GetBisectPerformanceMetricsInstance(options_dict): |
| 129 """Returns an instance of the BisectPerformanceMetrics class.""" | 144 """Returns an instance of the BisectPerformanceMetrics class.""" |
| 130 opts = bisect_perf_regression.BisectOptions.FromDict(options_dict) | 145 opts = bisect_perf_regression.BisectOptions.FromDict(options_dict) |
| 131 return bisect_perf_regression.BisectPerformanceMetrics(opts, os.getcwd()) | 146 return bisect_perf_regression.BisectPerformanceMetrics(opts, os.getcwd()) |
| 132 | 147 |
| 133 | 148 |
| (...skipping 234 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 368 # Test result goes from 0 to -100 where higher is better | 383 # Test result goes from 0 to -100 where higher is better |
| 369 results = _GenericDryRun(_GetExtendedOptions(1, -100)) | 384 results = _GenericDryRun(_GetExtendedOptions(1, -100)) |
| 370 self.assertIsNone(results.error) | 385 self.assertIsNone(results.error) |
| 371 | 386 |
| 372 def _CheckAbortsEarly(self, results): | 387 def _CheckAbortsEarly(self, results): |
| 373 """Returns True if the bisect job would abort early.""" | 388 """Returns True if the bisect job would abort early.""" |
| 374 global _MockResultsGenerator | 389 global _MockResultsGenerator |
| 375 _MockResultsGenerator = (r for r in results) | 390 _MockResultsGenerator = (r for r in results) |
| 376 bisect_class = bisect_perf_regression.BisectPerformanceMetrics | 391 bisect_class = bisect_perf_regression.BisectPerformanceMetrics |
| 377 original_run_tests = bisect_class.RunPerformanceTestAndParseResults | 392 original_run_tests = bisect_class.RunPerformanceTestAndParseResults |
| 378 bisect_class.RunPerformanceTestAndParseResults = _MockRunTests | 393 bisect_class.RunPerformanceTestAndParseResults = _MakeMockRunTests() |
| 379 | 394 |
| 380 try: | 395 try: |
| 381 dry_run_results = _GenericDryRun(_GetExtendedOptions(0, 0, False)) | 396 dry_run_results = _GenericDryRun(_GetExtendedOptions(0, 0, False)) |
| 382 except StopIteration: | 397 except StopIteration: |
| 383 # If StopIteration was raised, that means that the next value after | 398 # If StopIteration was raised, that means that the next value after |
| 384 # the first two values was requested, so the job was not aborted. | 399 # the first two values was requested, so the job was not aborted. |
| 385 return False | 400 return False |
| 386 finally: | 401 finally: |
| 387 bisect_class.RunPerformanceTestAndParseResults = original_run_tests | 402 bisect_class.RunPerformanceTestAndParseResults = original_run_tests |
| 388 | 403 |
| (...skipping 10 matching lines...) Expand all Loading... | |
| 399 | 414 |
| 400 def testBisectNotAborted_ClearRegression(self): | 415 def testBisectNotAborted_ClearRegression(self): |
| 401 self.assertFalse(self._CheckAbortsEarly(CLEAR_REGRESSION)) | 416 self.assertFalse(self._CheckAbortsEarly(CLEAR_REGRESSION)) |
| 402 | 417 |
| 403 def testBisectNotAborted_BarelyRegression(self): | 418 def testBisectNotAborted_BarelyRegression(self): |
| 404 self.assertFalse(self._CheckAbortsEarly(BARELY_REGRESSION)) | 419 self.assertFalse(self._CheckAbortsEarly(BARELY_REGRESSION)) |
| 405 | 420 |
| 406 def testBisectNotAborted_MultipleValues(self): | 421 def testBisectNotAborted_MultipleValues(self): |
| 407 self.assertFalse(self._CheckAbortsEarly(MULTIPLE_VALUES)) | 422 self.assertFalse(self._CheckAbortsEarly(MULTIPLE_VALUES)) |
| 408 | 423 |
| 424 def _CheckAbortsEarlyForReturnCode(self, results): | |
| 425 """Returns True if the bisect job would abort early in return code mode.""" | |
| 426 global _MockResultsGenerator | |
| 427 _MockResultsGenerator = (r for r in results) | |
| 428 bisect_class = bisect_perf_regression.BisectPerformanceMetrics | |
| 429 original_run_tests = bisect_class.RunPerformanceTestAndParseResults | |
| 430 bisect_class.RunPerformanceTestAndParseResults = _MakeMockRunTests(True) | |
| 431 options = dict(DEFAULT_OPTIONS) | |
| 432 options.update({'bisect_mode': 'return_code'}) | |
| 433 try: | |
| 434 dry_run_results = _GenericDryRun(options) | |
| 435 except StopIteration: | |
| 436 # If StopIteration was raised, that means that the next value after | |
| 437 # the first two values was requested, so the job was not aborted. | |
| 438 return False | |
| 439 finally: | |
| 440 bisect_class.RunPerformanceTestAndParseResults = original_run_tests | |
| 441 | |
| 442 # If the job was aborted, there should be a warning about it. | |
| 443 if ('known good and known bad revisions returned same' in | |
| 444 dry_run_results.abort_reason): | |
| 445 return True | |
| 446 return False | |
| 447 | |
| 448 def testBisectAbortOn_SameReturnCode(self): | |
| 449 self.assertTrue (self._CheckAbortsEarlyForReturnCode([[0,0,0], [0,0,0]])) | |
|
qyearsley
2015/06/23 20:47:45
Nit: space after assertTrue
prasadv
2015/06/23 21:37:48
Done.
| |
| 450 | |
| 451 def testBisectNotAbortedOn_DifferentReturnCode(self): | |
| 452 self.assertFalse(self._CheckAbortsEarlyForReturnCode([[1,1,1], [0,0,0]])) | |
| 453 | |
| 409 def testGetCommitPosition(self): | 454 def testGetCommitPosition(self): |
| 410 cp_git_rev = '7017a81991de983e12ab50dfc071c70e06979531' | 455 cp_git_rev = '7017a81991de983e12ab50dfc071c70e06979531' |
| 411 self.assertEqual(291765, source_control.GetCommitPosition(cp_git_rev)) | 456 self.assertEqual(291765, source_control.GetCommitPosition(cp_git_rev)) |
| 412 | 457 |
| 413 svn_git_rev = 'e6db23a037cad47299a94b155b95eebd1ee61a58' | 458 svn_git_rev = 'e6db23a037cad47299a94b155b95eebd1ee61a58' |
| 414 self.assertEqual(291467, source_control.GetCommitPosition(svn_git_rev)) | 459 self.assertEqual(291467, source_control.GetCommitPosition(svn_git_rev)) |
| 415 | 460 |
| 416 def testGetCommitPositionForV8(self): | 461 def testGetCommitPositionForV8(self): |
| 417 bisect_instance = _GetBisectPerformanceMetricsInstance(DEFAULT_OPTIONS) | 462 bisect_instance = _GetBisectPerformanceMetricsInstance(DEFAULT_OPTIONS) |
| 418 v8_rev = '21d700eedcdd6570eff22ece724b63a5eefe78cb' | 463 v8_rev = '21d700eedcdd6570eff22ece724b63a5eefe78cb' |
| (...skipping 288 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 707 (None, 0)), | 752 (None, 0)), |
| 708 ] | 753 ] |
| 709 self._SetupRunGitMock(try_cmd) | 754 self._SetupRunGitMock(try_cmd) |
| 710 bisect_perf_regression._StartBuilderTryJob( | 755 bisect_perf_regression._StartBuilderTryJob( |
| 711 fetch_build.PERF_BUILDER, git_revision, bot_name, bisect_job_name, | 756 fetch_build.PERF_BUILDER, git_revision, bot_name, bisect_job_name, |
| 712 patch) | 757 patch) |
| 713 | 758 |
| 714 | 759 |
| 715 if __name__ == '__main__': | 760 if __name__ == '__main__': |
| 716 unittest.main() | 761 unittest.main() |
| OLD | NEW |