OLD | NEW |
1 # Copyright 2014 The Chromium Authors. All rights reserved. | 1 # Copyright 2014 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 import unittest | 5 import unittest |
6 | 6 |
7 from auto_bisect import source_control as source_control_module | 7 from auto_bisect import source_control as source_control_module |
8 | 8 |
9 # Special import necessary because filename contains dash characters. | 9 # Special import necessary because filename contains dash characters. |
10 bisect_perf_module = __import__('bisect-perf-regression') | 10 bisect_perf_module = __import__('bisect-perf-regression') |
11 | 11 |
12 # Sample output for a performance test used in the results parsing tests below. | |
13 RESULTS_OUTPUT = """RESULT write_operations: write_operations= 23089 count | |
14 RESULT read_bytes_gpu: read_bytes_gpu= 35201 kb | |
15 RESULT write_bytes_gpu: write_bytes_gpu= 542 kb | |
16 RESULT telemetry_page_measurement_results: num_failed= 0 count | |
17 RESULT telemetry_page_measurement_results: num_errored= 0 count | |
18 *RESULT Total: Total_ref= %(value)s | |
19 """ | |
20 | 12 |
| 13 class BisectPerfRegressionTest(unittest.TestCase): |
| 14 """Test case for other functions and classes in bisect-perf-regression.py.""" |
21 | 15 |
22 # Some private methods of the bisect-perf-regression module are tested below. | 16 def _AssertConfidence(self, score, bad_values, good_values): |
23 # pylint: disable=W0212 | 17 """Checks whether the given sets of values have a given confidence score. |
24 class BisectPerfRegressionTest(unittest.TestCase): | |
25 """Test case for top-level functions in the bisect-perf-regrssion module.""" | |
26 | 18 |
27 def setUp(self): | 19 The score represents our confidence that the two sets of values wouldn't |
28 """Sets up the test environment before each test method.""" | 20 be as different as they are just by chance; that is, that some real change |
29 pass | 21 occurred between the two sets of values. |
30 | 22 |
31 def tearDown(self): | 23 Args: |
32 """Cleans up the test environment after each test method.""" | 24 score: Expected confidence score. |
33 pass | 25 bad_values: First list of numbers. |
| 26 good_values: Second list of numbers. |
| 27 """ |
| 28 # ConfidenceScore takes a list of lists but these lists are flattened. |
| 29 confidence = bisect_perf_module.ConfidenceScore([bad_values], [good_values]) |
| 30 self.assertEqual(score, confidence) |
34 | 31 |
35 def testConfidenceScoreHigh(self): | 32 def testConfidenceScore_ZeroConfidence(self): |
36 """Tests the confidence calculation.""" | |
37 bad_values = [[0, 1, 1], [1, 2, 2]] | |
38 good_values = [[1, 2, 2], [3, 3, 4]] | |
39 confidence = bisect_perf_module.ConfidenceScore(bad_values, good_values) | |
40 self.assertEqual(95.0, confidence) | |
41 | |
42 def testConfidenceScoreNotSoHigh(self): | |
43 """Tests the confidence calculation.""" | |
44 bad_values = [[0, 1, 1], [1, 2, 2]] | |
45 good_values = [[1, 1, 1], [3, 3, 4]] | |
46 # The good and bad groups are closer together than in the above test, | |
47 # so the confidence that they're different is a little lower. | |
48 confidence = bisect_perf_module.ConfidenceScore(bad_values, good_values) | |
49 self.assertEqual(80.0, confidence) | |
50 | |
51 def testConfidenceScoreZero(self): | |
52 """Tests the confidence calculation when it's expected to be 0.""" | |
53 bad_values = [[4, 5], [7, 6], [8, 7]] | |
54 good_values = [[8, 7], [6, 7], [5, 4]] | |
55 # The good and bad sets contain the same values, so the confidence that | 33 # The good and bad sets contain the same values, so the confidence that |
56 # they're different should be zero. | 34 # they're different should be zero. |
57 confidence = bisect_perf_module.ConfidenceScore(bad_values, good_values) | 35 self._AssertConfidence(0.0, [4, 5, 7, 6, 8, 7], [8, 7, 6, 7, 5, 4]) |
58 self.assertEqual(0.0, confidence) | |
59 | 36 |
60 def testConfidenceScoreVeryHigh(self): | 37 def testConfidenceScore_MediumConfidence(self): |
61 """Tests the confidence calculation when it's expected to be high.""" | 38 self._AssertConfidence(80.0, [0, 1, 1, 1, 2, 2], [1, 1, 1, 3, 3, 4]) |
62 bad_values = [[1, 1], [1, 1]] | |
63 good_values = [[1.2, 1.2], [1.2, 1.2]] | |
64 confidence = bisect_perf_module.ConfidenceScore(bad_values, good_values) | |
65 self.assertEqual(99.9, confidence) | |
66 | 39 |
67 def testConfidenceScoreImbalance(self): | 40 def testConfidenceScore_HighConfidence(self): |
68 """Tests the confidence calculation one set of numbers is small.""" | 41 self._AssertConfidence(95.0, [0, 1, 1, 1, 2, 2], [1, 2, 2, 3, 3, 4]) |
69 bad_values = [[1.1, 1.2], [1.1, 1.2], [1.0, 1.3], [1.2, 1.3]] | |
70 good_values = [[1.4]] | |
71 confidence = bisect_perf_module.ConfidenceScore(bad_values, good_values) | |
72 self.assertEqual(80.0, confidence) | |
73 | 42 |
74 def testConfidenceScoreImbalance(self): | 43 def testConfidenceScore_VeryHighConfidence(self): |
75 """Tests the confidence calculation one set of numbers is empty.""" | 44 # Confidence is high if the two sets of values have no internal variance. |
76 bad_values = [[1.1, 1.2], [1.1, 1.2], [1.0, 1.3], [1.2, 1.3]] | 45 self._AssertConfidence(99.9, [1, 1, 1, 1], [1.2, 1.2, 1.2, 1.2]) |
77 good_values = [] | 46 self._AssertConfidence(99.9, [1, 1, 1, 1], [1.01, 1.01, 1.01, 1.01]) |
78 confidence = bisect_perf_module.ConfidenceScore(bad_values, good_values) | |
79 self.assertEqual(0.0, confidence) | |
80 | 47 |
81 def testConfidenceScoreFunctionalTestResultsInconsistent(self): | 48 def testConfidenceScore_ImbalancedSampleSize(self): |
82 """Tests the confidence calculation when the numbers are just 0 and 1.""" | 49 # The second set of numbers only contains one number, so confidence is low. |
83 bad_values = [[1], [1], [0], [1], [1], [1], [0], [1]] | 50 self._AssertConfidence( |
84 good_values = [[0], [0], [1], [0], [1], [0]] | 51 80.0, [1.1, 1.2, 1.1, 1.2, 1.0, 1.3, 1.2, 1.3],[1.4]) |
85 confidence = bisect_perf_module.ConfidenceScore(bad_values, good_values) | |
86 self.assertEqual(80.0, confidence) | |
87 | 52 |
88 def testConfidenceScoreFunctionalTestResultsConsistent(self): | 53 def testConfidenceScore_EmptySample(self): |
89 """Tests the confidence calculation when the numbers are 0 and 1.""" | 54 # Confidence is zero if either or both samples are empty. |
90 bad_values = [[1], [1], [1], [1], [1], [1], [1], [1]] | 55 self._AssertConfidence(0.0, [], []) |
91 good_values = [[0], [0], [0], [0], [0], [0]] | 56 self._AssertConfidence(0.0, [], [1.1, 1.2, 1.1, 1.2, 1.0, 1.3, 1.2, 1.3]) |
92 confidence = bisect_perf_module.ConfidenceScore(bad_values, good_values) | 57 self._AssertConfidence(0.0, [1.1, 1.2, 1.1, 1.2, 1.0, 1.3, 1.2, 1.3], []) |
93 self.assertEqual(99.9, confidence) | 58 |
| 59 def testConfidenceScore_FunctionalTestResults(self): |
| 60 self._AssertConfidence(80.0, [1, 1, 0, 1, 1, 1, 0, 1], [0, 0, 1, 0, 1, 0]) |
| 61 self._AssertConfidence(99.9, [1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0]) |
94 | 62 |
95 def testParseDEPSStringManually(self): | 63 def testParseDEPSStringManually(self): |
96 """Tests DEPS parsing.""" | 64 """Tests DEPS parsing.""" |
97 deps_file_contents = """ | 65 deps_file_contents = """ |
98 vars = { | 66 vars = { |
99 'ffmpeg_hash': | 67 'ffmpeg_hash': |
100 '@ac4a9f31fe2610bd146857bbd55d7a260003a888', | 68 '@ac4a9f31fe2610bd146857bbd55d7a260003a888', |
101 'webkit_url': | 69 'webkit_url': |
102 'https://chromium.googlesource.com/chromium/blink.git', | 70 'https://chromium.googlesource.com/chromium/blink.git', |
103 'git_url': | 71 'git_url': |
104 'https://chromium.googlesource.com', | 72 'https://chromium.googlesource.com', |
105 'webkit_rev': | 73 'webkit_rev': |
106 '@e01ac0a267d1017288bc67fa3c366b10469d8a24', | 74 '@e01ac0a267d1017288bc67fa3c366b10469d8a24', |
107 'angle_revision': | 75 'angle_revision': |
108 '74697cf2064c0a2c0d7e1b1b28db439286766a05' | 76 '74697cf2064c0a2c0d7e1b1b28db439286766a05' |
109 }""" | 77 }""" |
110 | 78 |
111 # Should only expect svn/git revisions to come through, and urls should be | 79 # Should only expect SVN/git revisions to come through, and URLs should be |
112 # filtered out. | 80 # filtered out. |
113 expected_vars_dict = { | 81 expected_vars_dict = { |
114 'ffmpeg_hash': '@ac4a9f31fe2610bd146857bbd55d7a260003a888', | 82 'ffmpeg_hash': '@ac4a9f31fe2610bd146857bbd55d7a260003a888', |
115 'webkit_rev': '@e01ac0a267d1017288bc67fa3c366b10469d8a24', | 83 'webkit_rev': '@e01ac0a267d1017288bc67fa3c366b10469d8a24', |
116 'angle_revision': '74697cf2064c0a2c0d7e1b1b28db439286766a05' | 84 'angle_revision': '74697cf2064c0a2c0d7e1b1b28db439286766a05' |
117 } | 85 } |
| 86 # Testing private function. |
| 87 # pylint: disable=W0212 |
118 vars_dict = bisect_perf_module._ParseRevisionsFromDEPSFileManually( | 88 vars_dict = bisect_perf_module._ParseRevisionsFromDEPSFileManually( |
119 deps_file_contents) | 89 deps_file_contents) |
120 self.assertEqual(vars_dict, expected_vars_dict) | 90 self.assertEqual(vars_dict, expected_vars_dict) |
121 | 91 |
122 def testTryParseResultValuesFromOutputWithSingleValue(self): | 92 def _AssertParseResult(self, expected_values, result_string): |
| 93 """Asserts some values are parsed from a RESULT line.""" |
| 94 results_template = ('RESULT other_chart: other_trace= 123 count\n' |
| 95 'RESULT my_chart: my_trace= %(value)s\n') |
| 96 results = results_template % {'value': result_string} |
| 97 metric = ['my_chart', 'my_trace'] |
| 98 # Testing private function. |
| 99 # pylint: disable=W0212 |
| 100 values = bisect_perf_module._TryParseResultValuesFromOutput(metric, results) |
| 101 self.assertEqual(expected_values, values) |
| 102 |
| 103 def testTryParseResultValuesFromOutput_WithSingleValue(self): |
123 """Tests result pattern <*>RESULT <graph>: <trace>= <value>""" | 104 """Tests result pattern <*>RESULT <graph>: <trace>= <value>""" |
124 metrics = ['Total', 'Total_ref'] | 105 self._AssertParseResult([66.88], '66.88 kb') |
125 self.assertEqual( | 106 self._AssertParseResult([66.88], '66.88 ') |
126 [66.88], bisect_perf_module._TryParseResultValuesFromOutput( | 107 self._AssertParseResult([-66.88], '-66.88 kb') |
127 metrics, RESULTS_OUTPUT % {'value': '66.88 kb'})) | 108 self._AssertParseResult([66], '66 kb') |
128 self.assertEqual( | 109 self._AssertParseResult([0.66], '.66 kb') |
129 [66.88], bisect_perf_module._TryParseResultValuesFromOutput( | 110 self._AssertParseResult([], '. kb') |
130 metrics, RESULTS_OUTPUT % {'value': '66.88kb'})) | 111 self._AssertParseResult([], 'aaa kb') |
131 self.assertEqual( | |
132 [66.88], bisect_perf_module._TryParseResultValuesFromOutput( | |
133 metrics, RESULTS_OUTPUT % {'value': ' 66.88 '})) | |
134 self.assertEqual( | |
135 [-66.88], bisect_perf_module._TryParseResultValuesFromOutput( | |
136 metrics, RESULTS_OUTPUT % {'value': ' -66.88 kb'})) | |
137 self.assertEqual( | |
138 [66], bisect_perf_module._TryParseResultValuesFromOutput( | |
139 metrics, RESULTS_OUTPUT % {'value': '66 kb'})) | |
140 self.assertEqual( | |
141 [.66], bisect_perf_module._TryParseResultValuesFromOutput( | |
142 metrics, RESULTS_OUTPUT % {'value': '.66 kb'})) | |
143 self.assertEqual( | |
144 [], bisect_perf_module._TryParseResultValuesFromOutput( | |
145 metrics, RESULTS_OUTPUT % {'value': '. kb'})) | |
146 self.assertEqual( | |
147 [], bisect_perf_module._TryParseResultValuesFromOutput( | |
148 metrics, RESULTS_OUTPUT % {'value': 'aaa kb'})) | |
149 | 112 |
150 def testTryParseResultValuesFromOutputWithMulitValue(self): | 113 def testTryParseResultValuesFromOutput_WithMultiValue(self): |
151 """Tests result pattern <*>RESULT <graph>: <trace>= [<value>,<value>, ..]""" | 114 """Tests result pattern <*>RESULT <graph>: <trace>= [<value>,<value>, ..]""" |
152 metrics = ['Total', 'Total_ref'] | 115 self._AssertParseResult([66.88], '[66.88] kb') |
153 self.assertEqual( | 116 self._AssertParseResult([66.88, 99.44], '[66.88, 99.44]kb') |
154 [66.88], bisect_perf_module._TryParseResultValuesFromOutput( | 117 self._AssertParseResult([66.88, 99.44], '[ 66.88, 99.44 ]') |
155 metrics, RESULTS_OUTPUT % {'value': '[66.88] kb'})) | 118 self._AssertParseResult([-66.88, 99.44], '[-66.88, 99.44] kb') |
156 self.assertEqual( | 119 self._AssertParseResult([-66, 99], '[-66,99] kb') |
157 [66.88, 99.44], bisect_perf_module._TryParseResultValuesFromOutput( | 120 self._AssertParseResult([-66, 99], '[-66,99,] kb') |
158 metrics, RESULTS_OUTPUT % {'value': '[66.88, 99.44]kb'})) | 121 self._AssertParseResult([-66, 0.99], '[-66,.99] kb') |
159 self.assertEqual( | 122 self._AssertParseResult([], '[] kb') |
160 [66.88, 99.44], bisect_perf_module._TryParseResultValuesFromOutput( | 123 self._AssertParseResult([], '[-66,abc] kb') |
161 metrics, RESULTS_OUTPUT % {'value': '[ 66.88, 99.44 ]'})) | |
162 self.assertEqual( | |
163 [-66.88, 99.44], bisect_perf_module._TryParseResultValuesFromOutput( | |
164 metrics, RESULTS_OUTPUT % {'value': '[-66.88,99.44] kb'})) | |
165 self.assertEqual( | |
166 [-66, 99], bisect_perf_module._TryParseResultValuesFromOutput( | |
167 metrics, RESULTS_OUTPUT % {'value': '[-66,99] kb'})) | |
168 self.assertEqual( | |
169 [-66, 99], bisect_perf_module._TryParseResultValuesFromOutput( | |
170 metrics, RESULTS_OUTPUT % {'value': '[-66,99,] kb'})) | |
171 self.assertEqual( | |
172 [.66, .99], bisect_perf_module._TryParseResultValuesFromOutput( | |
173 metrics, RESULTS_OUTPUT % {'value': '[.66,.99] kb'})) | |
174 self.assertEqual( | |
175 [], bisect_perf_module._TryParseResultValuesFromOutput( | |
176 metrics, RESULTS_OUTPUT % {'value': '[] kb'})) | |
177 self.assertEqual( | |
178 [], bisect_perf_module._TryParseResultValuesFromOutput( | |
179 metrics, RESULTS_OUTPUT % {'value': '[-66,abc] kb'})) | |
180 | 124 |
181 def testTryParseResultValuesFromOutputWithMeanStd(self): | 125 def testTryParseResultValuesFromOutputWithMeanStd(self): |
182 """Tests result pattern <*>RESULT <graph>: <trace>= {<mean, std}""" | 126 """Tests result pattern <*>RESULT <graph>: <trace>= {<mean, std}""" |
183 metrics = ['Total', 'Total_ref'] | 127 self._AssertParseResult([33.22], '{33.22, 3.6} kb') |
184 self.assertEqual( | 128 self._AssertParseResult([33.22], '{33.22, 3.6} kb') |
185 [33.22], bisect_perf_module._TryParseResultValuesFromOutput( | 129 self._AssertParseResult([33.22], '{33.22,3.6}kb') |
186 metrics, RESULTS_OUTPUT % {'value': '{33.22, 3.6} kb'})) | 130 self._AssertParseResult([33.22], '{33.22,3.6} kb') |
187 self.assertEqual( | 131 self._AssertParseResult([33.22], '{ 33.22,3.6 }kb') |
188 [33.22], bisect_perf_module._TryParseResultValuesFromOutput( | 132 self._AssertParseResult([-33.22], '{-33.22,3.6}kb') |
189 metrics, RESULTS_OUTPUT % {'value': '{33.22,3.6}kb'})) | 133 self._AssertParseResult([22], '{22,6}kb') |
190 self.assertEqual( | 134 self._AssertParseResult([.22], '{.22,6}kb') |
191 [33.22], bisect_perf_module._TryParseResultValuesFromOutput( | 135 self._AssertParseResult([], '{.22,6, 44}kb') |
192 metrics, RESULTS_OUTPUT % {'value': '{33.22,3.6} kb'})) | 136 self._AssertParseResult([], '{}kb') |
193 self.assertEqual( | 137 self._AssertParseResult([], '{XYZ}kb') |
194 [33.22], bisect_perf_module._TryParseResultValuesFromOutput( | |
195 metrics, RESULTS_OUTPUT % {'value': '{ 33.22,3.6 }kb'})) | |
196 self.assertEqual( | |
197 [-33.22], bisect_perf_module._TryParseResultValuesFromOutput( | |
198 metrics, RESULTS_OUTPUT % {'value': '{-33.22,3.6}kb'})) | |
199 self.assertEqual( | |
200 [22], bisect_perf_module._TryParseResultValuesFromOutput( | |
201 metrics, RESULTS_OUTPUT % {'value': '{22,6}kb'})) | |
202 self.assertEqual( | |
203 [.22], bisect_perf_module._TryParseResultValuesFromOutput( | |
204 metrics, RESULTS_OUTPUT % {'value': '{.22,6}kb'})) | |
205 self.assertEqual( | |
206 [], bisect_perf_module._TryParseResultValuesFromOutput( | |
207 metrics, RESULTS_OUTPUT % {'value': '{.22,6, 44}kb'})) | |
208 self.assertEqual( | |
209 [], bisect_perf_module._TryParseResultValuesFromOutput( | |
210 metrics, RESULTS_OUTPUT % {'value': '{}kb'})) | |
211 self.assertEqual( | |
212 [], bisect_perf_module._TryParseResultValuesFromOutput( | |
213 metrics, RESULTS_OUTPUT % {'value': '{XYZ}kb'})) | |
214 | 138 |
215 def testGetCompatibleCommand(self): | 139 def _AssertCompatibleCommand( |
| 140 self, expected_command, original_command, revision, target_platform): |
| 141 """Tests the modification of the command that might be done. |
| 142 |
| 143 This modification to the command is done in order to get a Telemetry |
| 144 command that works; before some revisions, the browser name that Telemetry |
| 145 expects is different in some cases, but we want it to work anyway. |
| 146 |
| 147 Specifically, only for android: |
| 148 After r276628, only android-chrome-shell works. |
| 149 Prior to r274857, only android-chromium-testshell works. |
| 150 In the range [274857, 276628], both work. |
| 151 """ |
216 bisect_options = bisect_perf_module.BisectOptions() | 152 bisect_options = bisect_perf_module.BisectOptions() |
217 bisect_options.output_buildbot_annotations = None | 153 bisect_options.output_buildbot_annotations = None |
218 source_control = source_control_module.DetermineAndCreateSourceControl( | 154 source_control = source_control_module.DetermineAndCreateSourceControl( |
219 bisect_options) | 155 bisect_options) |
220 bisect_instance = bisect_perf_module.BisectPerformanceMetrics( | 156 bisect_instance = bisect_perf_module.BisectPerformanceMetrics( |
221 source_control, bisect_options) | 157 source_control, bisect_options) |
222 bisect_instance.opts.target_platform = 'android' | 158 bisect_instance.opts.target_platform = target_platform |
223 depot = 'chromium' | |
224 # android-chrome-shell -> android-chromium-testshell | |
225 revision = 274857 | |
226 git_revision = bisect_instance.source_control.ResolveToRevision( | 159 git_revision = bisect_instance.source_control.ResolveToRevision( |
227 revision, 'chromium', bisect_perf_module.DEPOT_DEPS_NAME, 100) | 160 revision, 'chromium', bisect_perf_module.DEPOT_DEPS_NAME, 100) |
228 command = ('tools/perf/run_benchmark -v ' | 161 depot = 'chromium' |
229 '--browser=android-chrome-shell page_cycler.intl_ja_zh') | 162 command = bisect_instance.GetCompatibleCommand( |
230 expected_command = ('tools/perf/run_benchmark -v --browser=' | 163 original_command, git_revision, depot) |
231 'android-chromium-testshell page_cycler.intl_ja_zh') | 164 self.assertEqual(expected_command, command) |
232 self.assertEqual( | |
233 bisect_instance.GetCompatibleCommand(command, git_revision, depot), | |
234 expected_command) | |
235 | 165 |
236 # android-chromium-testshell -> android-chromium-testshell | 166 def testGetCompatibleCommand_ChangeToTestShell(self): |
237 revision = 274858 | 167 # For revisions <= r274857, only android-chromium-testshell is used. |
238 git_revision = bisect_instance.source_control.ResolveToRevision( | 168 self._AssertCompatibleCommand( |
239 revision, 'chromium', bisect_perf_module.DEPOT_DEPS_NAME, 100) | 169 'tools/perf/run_benchmark -v --browser=android-chromium-testshell foo', |
240 command = ('tools/perf/run_benchmark -v ' | 170 'tools/perf/run_benchmark -v --browser=android-chrome-shell foo', |
241 '--browser=android-chromium-testshell page_cycler.intl_ja_zh') | 171 274857, 'android') |
242 expected_command = ('tools/perf/run_benchmark -v --browser=' | |
243 'android-chromium-testshell page_cycler.intl_ja_zh') | |
244 self.assertEqual( | |
245 bisect_instance.GetCompatibleCommand(command, git_revision, depot), | |
246 expected_command) | |
247 | 172 |
248 # android-chromium-testshell -> android-chrome-shell | 173 def testGetCompatibleCommand_ChangeToShell(self): |
249 revision = 276628 | 174 # For revisions >= r276728, only android-chrome-shell can be used. |
250 git_revision = bisect_instance.source_control.ResolveToRevision( | 175 self._AssertCompatibleCommand( |
251 revision, 'chromium', bisect_perf_module.DEPOT_DEPS_NAME, 100) | 176 'tools/perf/run_benchmark -v --browser=android-chrome-shell foo', |
252 command = ('tools/perf/run_benchmark -v ' | 177 'tools/perf/run_benchmark -v --browser=android-chromium-testshell foo', |
253 '--browser=android-chromium-testshell page_cycler.intl_ja_zh') | 178 276628, 'android') |
254 expected_command = ('tools/perf/run_benchmark -v --browser=' | 179 |
255 'android-chrome-shell page_cycler.intl_ja_zh') | 180 def testGetCompatibleCommand_NoChange(self): |
256 self.assertEqual( | 181 # For revisions < r276728, android-chromium-testshell can be used. |
257 bisect_instance.GetCompatibleCommand(command, git_revision, depot), | 182 self._AssertCompatibleCommand( |
258 expected_command) | 183 'tools/perf/run_benchmark -v --browser=android-chromium-testshell foo', |
259 # android-chrome-shell -> android-chrome-shell | 184 'tools/perf/run_benchmark -v --browser=android-chromium-testshell foo', |
260 command = ('tools/perf/run_benchmark -v ' | 185 274858, 'android') |
261 '--browser=android-chrome-shell page_cycler.intl_ja_zh') | 186 # For revisions > r274857, android-chrome-shell can be used. |
262 expected_command = ('tools/perf/run_benchmark -v --browser=' | 187 self._AssertCompatibleCommand( |
263 'android-chrome-shell page_cycler.intl_ja_zh') | 188 'tools/perf/run_benchmark -v --browser=android-chrome-shell foo', |
264 self.assertEqual( | 189 'tools/perf/run_benchmark -v --browser=android-chrome-shell foo', |
265 bisect_instance.GetCompatibleCommand(command, git_revision, depot), | 190 274858, 'android') |
266 expected_command) | 191 |
267 # Not android platform | 192 def testGetCompatibleCommand_NonAndroidPlatform(self): |
268 bisect_instance.opts.target_platform = 'chromium' | 193 # In most cases, there's no need to change Telemetry command. |
269 command = ('tools/perf/run_benchmark -v ' | 194 # For revisions >= r276728, only android-chrome-shell can be used. |
270 '--browser=release page_cycler.intl_ja_zh') | 195 self._AssertCompatibleCommand( |
271 expected_command = ('tools/perf/run_benchmark -v --browser=' | 196 'tools/perf/run_benchmark -v --browser=release foo', |
272 'release page_cycler.intl_ja_zh') | 197 'tools/perf/run_benchmark -v --browser=release foo', |
273 self.assertEqual( | 198 276628, 'chromium') |
274 bisect_instance.GetCompatibleCommand(command, git_revision, depot), | 199 |
275 expected_command) | 200 # This method doesn't reference self; it fails if an error is thrown. |
| 201 # pylint: disable=R0201 |
| 202 def testDryRun(self): |
| 203 """Does a dry run of the bisect script. |
| 204 |
| 205 This serves as a smoke test to catch errors in the basic execution of the |
| 206 script. |
| 207 """ |
| 208 options_dict = { |
| 209 'debug_ignore_build': True, |
| 210 'debug_ignore_sync': True, |
| 211 'debug_ignore_perf_test': True, |
| 212 'command': 'fake_command', |
| 213 'metric': 'fake/metric', |
| 214 'good_revision': 280000, |
| 215 'bad_revision': 280005, |
| 216 } |
| 217 bisect_options = bisect_perf_module.BisectOptions.FromDict(options_dict) |
| 218 source_control = source_control_module.DetermineAndCreateSourceControl( |
| 219 bisect_options) |
| 220 bisect_instance = bisect_perf_module.BisectPerformanceMetrics( |
| 221 source_control, bisect_options) |
| 222 results = bisect_instance.Run(bisect_options.command, |
| 223 bisect_options.bad_revision, |
| 224 bisect_options.good_revision, |
| 225 bisect_options.metric) |
| 226 bisect_instance.FormatAndPrintResults(results) |
276 | 227 |
277 | 228 |
278 if __name__ == '__main__': | 229 if __name__ == '__main__': |
279 unittest.main() | 230 unittest.main() |
OLD | NEW |