OLD | NEW |
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 # Copyright (c) 2013 The Chromium Authors. All rights reserved. | 2 # Copyright (c) 2013 The Chromium Authors. All rights reserved. |
3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
5 | 5 |
6 """Performance Test Bisect Tool | 6 """Performance Test Bisect Tool |
7 | 7 |
8 This script bisects a series of changelists using binary search. It starts at | 8 This script bisects a series of changelists using binary search. It starts at |
9 a bad revision where a performance metric has regressed, and asks for a last | 9 a bad revision where a performance metric has regressed, and asks for a last |
10 known-good revision. It will then binary search across this revision range by | 10 known-good revision. It will then binary search across this revision range by |
11 syncing, building, and running a performance test. If the change is | 11 syncing, building, and running a performance test. If the change is |
12 suspected to occur as a result of WebKit/V8 changes, the script will | 12 suspected to occur as a result of WebKit/V8 changes, the script will |
13 further bisect changes to those depots and attempt to narrow down the revision | 13 further bisect changes to those depots and attempt to narrow down the revision |
14 range. | 14 range. |
15 | 15 |
16 | 16 |
17 An example usage (using svn cl's): | 17 An example usage (using svn cl's): |
18 | 18 |
19 ./tools/bisect-perf-regression.py -c\ | 19 ./tools/bisect-perf-regression.py -c\ |
20 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\ | 20 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\ |
21 -g 168222 -b 168232 -m shutdown/simple-user-quit | 21 -g 168222 -b 168232 -m shutdown/simple-user-quit |
22 | 22 |
23 Be aware that if you're using the git workflow and specify an svn revision, | 23 Be aware that if you're using the git workflow and specify an svn revision, |
24 the script will attempt to find the git SHA1 where svn changes up to that | 24 the script will attempt to find the git SHA1 where svn changes up to that |
25 revision were merged in. | 25 revision were merged in. |
26 | 26 |
27 | 27 |
28 An example usage (using git hashes): | 28 An example usage (using git hashes): |
29 | 29 |
30 ./tools/bisect-perf-regression.py -c\ | 30 ./tools/bisect-perf-regression.py -c\ |
31 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\ | 31 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\ |
32 -g 1f6e67861535121c5c819c16a666f2436c207e7b\ | 32 -g 1f6e67861535121c5c819c16a666f2436c207e7b\ |
33 -b b732f23b4f81c382db0b23b9035f3dadc7d925bb\ | 33 -b b732f23b4f81c382db0b23b9035f3dadc7d925bb\ |
34 -m shutdown/simple-user-quit | 34 -m shutdown/simple-user-quit |
35 """ | 35 """ |
36 | 36 |
37 import copy | 37 import copy |
38 import datetime | 38 import datetime |
39 import errno | 39 import errno |
40 import hashlib | 40 import hashlib |
41 import math | 41 import math |
42 import optparse | 42 import optparse |
43 import os | 43 import os |
44 import re | 44 import re |
45 import shlex | 45 import shlex |
46 import shutil | 46 import shutil |
47 import StringIO | 47 import StringIO |
48 import sys | 48 import sys |
49 import time | 49 import time |
50 import zipfile | 50 import zipfile |
51 | 51 |
52 sys.path.append(os.path.join(os.path.dirname(__file__), 'telemetry')) | 52 sys.path.append(os.path.join(os.path.dirname(__file__), 'telemetry')) |
53 | 53 |
54 from auto_bisect import bisect_utils | 54 from auto_bisect import bisect_utils |
55 from auto_bisect import math_utils | 55 from auto_bisect import math_utils |
56 from auto_bisect import post_perf_builder_job as bisect_builder | 56 from auto_bisect import post_perf_builder_job as bisect_builder |
57 from auto_bisect import source_control as source_control_module | 57 from auto_bisect import source_control as source_control_module |
58 from auto_bisect import ttest | 58 from auto_bisect import ttest |
59 from telemetry.util import cloud_storage | 59 from telemetry.util import cloud_storage |
60 | 60 |
61 # The additional repositories that might need to be bisected. | 61 # Below is the map of "depot" names to information about each depot. Each depot |
62 # If the repository has any dependant repositories (such as skia/src needs | 62 # is a repository, and in the process of bisecting, revision ranges in these |
63 # skia/include and skia/gyp to be updated), specify them in the 'depends' | 63 # repositories may also be bisected. |
64 # so that they're synced appropriately. | 64 # |
65 # Format is: | 65 # Each depot information dictionary may contain: |
66 # src: path to the working directory. | 66 # src: Path to the working directory. |
67 # recurse: True if this repositry will get bisected. | 67 # recurse: True if this repository will get bisected. |
68 # depends: A list of other repositories that are actually part of the same | 68 # depends: A list of other repositories that are actually part of the same |
69 # repository in svn. | 69 # repository in svn. If the repository has any dependent repositories |
70 # svn: Needed for git workflow to resolve hashes to svn revisions. | 70 # (e.g. skia/src needs skia/include and skia/gyp to be updated), then |
71 # from: Parent depot that must be bisected before this is bisected. | 71 # they are specified here. |
72 # deps_var: Key name in vars varible in DEPS file that has revision information. | 72 # svn: URL of SVN repository. Needed for git workflow to resolve hashes to |
| 73 # SVN revisions. |
| 74 # from: Parent depot that must be bisected before this is bisected. |
| 75 # deps_var: Key name in vars variable in DEPS file that has revision |
| 76 # information. |
73 DEPOT_DEPS_NAME = { | 77 DEPOT_DEPS_NAME = { |
74 'chromium' : { | 78 'chromium': { |
75 "src" : "src", | 79 'src': 'src', |
76 "recurse" : True, | 80 'recurse': True, |
77 "depends" : None, | 81 'depends': None, |
78 "from" : ['cros', 'android-chrome'], | 82 'from': ['cros', 'android-chrome'], |
79 'viewvc': 'http://src.chromium.org/viewvc/chrome?view=revision&revision=', | 83 'viewvc': |
80 'deps_var': 'chromium_rev' | 84 'http://src.chromium.org/viewvc/chrome?view=revision&revision=', |
81 }, | 85 'deps_var': 'chromium_rev' |
82 'webkit' : { | 86 }, |
83 "src" : "src/third_party/WebKit", | 87 'webkit': { |
84 "recurse" : True, | 88 'src': 'src/third_party/WebKit', |
85 "depends" : None, | 89 'recurse': True, |
86 "from" : ['chromium'], | 90 'depends': None, |
87 'viewvc': 'http://src.chromium.org/viewvc/blink?view=revision&revision=', | 91 'from': ['chromium'], |
88 'deps_var': 'webkit_revision' | 92 'viewvc': |
89 }, | 93 'http://src.chromium.org/viewvc/blink?view=revision&revision=', |
90 'angle' : { | 94 'deps_var': 'webkit_revision' |
91 "src" : "src/third_party/angle", | 95 }, |
92 "src_old" : "src/third_party/angle_dx11", | 96 'angle': { |
93 "recurse" : True, | 97 'src': 'src/third_party/angle', |
94 "depends" : None, | 98 'src_old': 'src/third_party/angle_dx11', |
95 "from" : ['chromium'], | 99 'recurse': True, |
96 "platform": 'nt', | 100 'depends': None, |
97 'deps_var': 'angle_revision' | 101 'from': ['chromium'], |
98 }, | 102 'platform': 'nt', |
99 'v8' : { | 103 'deps_var': 'angle_revision' |
100 "src" : "src/v8", | 104 }, |
101 "recurse" : True, | 105 'v8': { |
102 "depends" : None, | 106 'src': 'src/v8', |
103 "from" : ['chromium'], | 107 'recurse': True, |
104 "custom_deps": bisect_utils.GCLIENT_CUSTOM_DEPS_V8, | 108 'depends': None, |
105 'viewvc': 'https://code.google.com/p/v8/source/detail?r=', | 109 'from': ['chromium'], |
106 'deps_var': 'v8_revision' | 110 'custom_deps': bisect_utils.GCLIENT_CUSTOM_DEPS_V8, |
107 }, | 111 'viewvc': 'https://code.google.com/p/v8/source/detail?r=', |
108 'v8_bleeding_edge' : { | 112 'deps_var': 'v8_revision' |
109 "src" : "src/v8_bleeding_edge", | 113 }, |
110 "recurse" : True, | 114 'v8_bleeding_edge': { |
111 "depends" : None, | 115 'src': 'src/v8_bleeding_edge', |
112 "svn": "https://v8.googlecode.com/svn/branches/bleeding_edge", | 116 'recurse': True, |
113 "from" : ['v8'], | 117 'depends': None, |
114 'viewvc': 'https://code.google.com/p/v8/source/detail?r=', | 118 'svn': 'https://v8.googlecode.com/svn/branches/bleeding_edge', |
115 'deps_var': 'v8_revision' | 119 'from': ['v8'], |
116 }, | 120 'viewvc': 'https://code.google.com/p/v8/source/detail?r=', |
117 'skia/src' : { | 121 'deps_var': 'v8_revision' |
118 "src" : "src/third_party/skia/src", | 122 }, |
119 "recurse" : True, | 123 'skia/src': { |
120 "svn" : "http://skia.googlecode.com/svn/trunk/src", | 124 'src': 'src/third_party/skia/src', |
121 "depends" : ['skia/include', 'skia/gyp'], | 125 'recurse': True, |
122 "from" : ['chromium'], | 126 'svn': 'http://skia.googlecode.com/svn/trunk/src', |
123 'viewvc': 'https://code.google.com/p/skia/source/detail?r=', | 127 'depends': ['skia/include', 'skia/gyp'], |
124 'deps_var': 'skia_revision' | 128 'from': ['chromium'], |
125 }, | 129 'viewvc': 'https://code.google.com/p/skia/source/detail?r=', |
126 'skia/include' : { | 130 'deps_var': 'skia_revision' |
127 "src" : "src/third_party/skia/include", | 131 }, |
128 "recurse" : False, | 132 'skia/include': { |
129 "svn" : "http://skia.googlecode.com/svn/trunk/include", | 133 'src': 'src/third_party/skia/include', |
130 "depends" : None, | 134 'recurse': False, |
131 "from" : ['chromium'], | 135 'svn': 'http://skia.googlecode.com/svn/trunk/include', |
132 'viewvc': 'https://code.google.com/p/skia/source/detail?r=', | 136 'depends': None, |
133 'deps_var': 'None' | 137 'from': ['chromium'], |
134 }, | 138 'viewvc': 'https://code.google.com/p/skia/source/detail?r=', |
135 'skia/gyp' : { | 139 'deps_var': 'None' |
136 "src" : "src/third_party/skia/gyp", | 140 }, |
137 "recurse" : False, | 141 'skia/gyp': { |
138 "svn" : "http://skia.googlecode.com/svn/trunk/gyp", | 142 'src': 'src/third_party/skia/gyp', |
139 "depends" : None, | 143 'recurse': False, |
140 "from" : ['chromium'], | 144 'svn': 'http://skia.googlecode.com/svn/trunk/gyp', |
141 'viewvc': 'https://code.google.com/p/skia/source/detail?r=', | 145 'depends': None, |
142 'deps_var': 'None' | 146 'from': ['chromium'], |
143 }, | 147 'viewvc': 'https://code.google.com/p/skia/source/detail?r=', |
| 148 'deps_var': 'None' |
| 149 } |
144 } | 150 } |
145 | 151 |
146 DEPOT_NAMES = DEPOT_DEPS_NAME.keys() | 152 DEPOT_NAMES = DEPOT_DEPS_NAME.keys() |
147 | 153 |
148 CROS_SDK_PATH = os.path.join('..', 'cros', 'chromite', 'bin', 'cros_sdk') | 154 CROS_SDK_PATH = os.path.join('..', 'cros', 'chromite', 'bin', 'cros_sdk') |
149 CROS_CHROMEOS_PATTERN = 'chromeos-base/chromeos-chrome' | 155 CROS_CHROMEOS_PATTERN = 'chromeos-base/chromeos-chrome' |
150 CROS_TEST_KEY_PATH = os.path.join('..', 'cros', 'chromite', 'ssh_keys', | 156 CROS_TEST_KEY_PATH = os.path.join('..', 'cros', 'chromite', 'ssh_keys', |
151 'testing_rsa') | 157 'testing_rsa') |
152 CROS_SCRIPT_KEY_PATH = os.path.join('..', 'cros', 'src', 'scripts', | 158 CROS_SCRIPT_KEY_PATH = os.path.join('..', 'cros', 'src', 'scripts', |
153 'mod_for_test_scripts', 'ssh_keys', | 159 'mod_for_test_scripts', 'ssh_keys', |
154 'testing_rsa') | 160 'testing_rsa') |
155 | 161 |
| 162 # Possible return values from BisectPerformanceMetrics.SyncBuildAndRunRevision. |
156 BUILD_RESULT_SUCCEED = 0 | 163 BUILD_RESULT_SUCCEED = 0 |
157 BUILD_RESULT_FAIL = 1 | 164 BUILD_RESULT_FAIL = 1 |
158 BUILD_RESULT_SKIPPED = 2 | 165 BUILD_RESULT_SKIPPED = 2 |
159 | 166 |
160 # Maximum time in seconds to wait after posting build request to tryserver. | 167 # Maximum time in seconds to wait after posting build request to tryserver. |
161 # TODO: Change these values based on the actual time taken by buildbots on | 168 # TODO: Change these values based on the actual time taken by buildbots on |
162 # the tryserver. | 169 # the tryserver. |
163 MAX_MAC_BUILD_TIME = 14400 | 170 MAX_MAC_BUILD_TIME = 14400 |
164 MAX_WIN_BUILD_TIME = 14400 | 171 MAX_WIN_BUILD_TIME = 14400 |
165 MAX_LINUX_BUILD_TIME = 14400 | 172 MAX_LINUX_BUILD_TIME = 14400 |
(...skipping 12 matching lines...) Expand all Loading... |
178 +%(deps_sha)s | 185 +%(deps_sha)s |
179 """ | 186 """ |
180 | 187 |
181 # The possible values of the --bisect_mode flag, which determines what to | 188 # The possible values of the --bisect_mode flag, which determines what to |
182 # use when classifying a revision as "good" or "bad". | 189 # use when classifying a revision as "good" or "bad". |
183 BISECT_MODE_MEAN = 'mean' | 190 BISECT_MODE_MEAN = 'mean' |
184 BISECT_MODE_STD_DEV = 'std_dev' | 191 BISECT_MODE_STD_DEV = 'std_dev' |
185 BISECT_MODE_RETURN_CODE = 'return_code' | 192 BISECT_MODE_RETURN_CODE = 'return_code' |
186 | 193 |
187 # The perf dashboard specifically looks for the string | 194 # The perf dashboard specifically looks for the string |
188 # "Estimated Confidence: 95%" to decide whether or not | 195 # "Estimated Confidence: 95%" to decide whether or not to cc the author(s). |
189 # to cc the author(s). If you change this, please update the perf | 196 # If you change this, please update the perf dashboard as well. |
190 # dashboard as well. | |
191 RESULTS_BANNER = """ | 197 RESULTS_BANNER = """ |
192 ===== BISECT JOB RESULTS ===== | 198 ===== BISECT JOB RESULTS ===== |
193 Status: %(status)s | 199 Status: %(status)s |
194 | 200 |
195 Test Command: %(command)s | 201 Test Command: %(command)s |
196 Test Metric: %(metrics)s | 202 Test Metric: %(metrics)s |
197 Relative Change: %(change)s | 203 Relative Change: %(change)s |
198 Estimated Confidence: %(confidence)d%%""" | 204 Estimated Confidence: %(confidence)d%%""" |
199 | 205 |
200 # The perf dashboard specifically looks for the string | 206 # The perf dashboard specifically looks for the string |
(...skipping 22 matching lines...) Expand all Loading... |
223 --browser=android-chromium-testshell. | 229 --browser=android-chromium-testshell. |
224 c) Test command to use: %(command)s | 230 c) Test command to use: %(command)s |
225 3. Upload your patch. --bypass-hooks is necessary to upload the changes you \ | 231 3. Upload your patch. --bypass-hooks is necessary to upload the changes you \ |
226 committed locally to run-perf-test.cfg. | 232 committed locally to run-perf-test.cfg. |
227 Note: *DO NOT* commit run-perf-test.cfg changes to the project repository. | 233 Note: *DO NOT* commit run-perf-test.cfg changes to the project repository. |
228 $ git cl upload --bypass-hooks | 234 $ git cl upload --bypass-hooks |
229 4. Send your try job to the tryserver. \ | 235 4. Send your try job to the tryserver. \ |
230 [Please make sure to use appropriate bot to reproduce] | 236 [Please make sure to use appropriate bot to reproduce] |
231 $ git cl try -m tryserver.chromium.perf -b <bot> | 237 $ git cl try -m tryserver.chromium.perf -b <bot> |
232 | 238 |
233 For more details please visit \nhttps://sites.google.com/a/chromium.org/dev/\ | 239 For more details please visit |
234 developers/performance-try-bots""" | 240 https://sites.google.com/a/chromium.org/dev/developers/performance-try-bots""" |
235 | 241 |
236 RESULTS_THANKYOU = """ | 242 RESULTS_THANKYOU = """ |
237 ===== THANK YOU FOR CHOOSING BISECT AIRLINES ===== | 243 ===== THANK YOU FOR CHOOSING BISECT AIRLINES ===== |
238 Visit http://www.chromium.org/developers/core-principles for Chrome's policy | 244 Visit http://www.chromium.org/developers/core-principles for Chrome's policy |
239 on perf regressions. | 245 on perf regressions. |
240 Contact chrome-perf-dashboard-team with any questions or suggestions about | 246 Contact chrome-perf-dashboard-team with any questions or suggestions about |
241 bisecting. | 247 bisecting. |
242 . .------. | 248 . .------. |
243 . .---. \ \==) | 249 . .---. \ \==) |
244 . |PERF\ \ \\ | 250 . |PERF\ \ \\ |
245 . | ---------'-------'-----------. | 251 . | ---------'-------'-----------. |
246 . . 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 `-. | 252 . . 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 `-. |
247 . \______________.-------._______________) | 253 . \______________.-------._______________) |
248 . / / | 254 . / / |
249 . / / | 255 . / / |
250 . / /==) | 256 . / /==) |
251 . ._______.""" | 257 . ._______.""" |
252 | 258 |
253 | 259 |
254 def _AddAdditionalDepotInfo(depot_info): | 260 def _AddAdditionalDepotInfo(depot_info): |
255 """Adds additional depot info to the global depot variables.""" | 261 """Adds additional depot info to the global depot variables.""" |
256 global DEPOT_DEPS_NAME | 262 global DEPOT_DEPS_NAME |
257 global DEPOT_NAMES | 263 global DEPOT_NAMES |
258 DEPOT_DEPS_NAME = dict(DEPOT_DEPS_NAME.items() + | 264 DEPOT_DEPS_NAME = dict(DEPOT_DEPS_NAME.items() + depot_info.items()) |
259 depot_info.items()) | |
260 DEPOT_NAMES = DEPOT_DEPS_NAME.keys() | 265 DEPOT_NAMES = DEPOT_DEPS_NAME.keys() |
261 | 266 |
262 | 267 |
263 def ConfidenceScore(good_results_lists, bad_results_lists): | 268 def ConfidenceScore(good_results_lists, bad_results_lists): |
264 """Calculates a confidence score. | 269 """Calculates a confidence score. |
265 | 270 |
266 This score is a percentage which represents our degree of confidence in the | 271 This score is a percentage which represents our degree of confidence in the |
267 proposition that the good results and bad results are distinct groups, and | 272 proposition that the good results and bad results are distinct groups, and |
268 their differences aren't due to chance alone. | 273 their differences aren't due to chance alone. |
269 | 274 |
(...skipping 12 matching lines...) Expand all Loading... |
282 sample1 = sum(good_results_lists, []) | 287 sample1 = sum(good_results_lists, []) |
283 sample2 = sum(bad_results_lists, []) | 288 sample2 = sum(bad_results_lists, []) |
284 | 289 |
285 # The p-value is approximately the probability of obtaining the given set | 290 # The p-value is approximately the probability of obtaining the given set |
286 # of good and bad values just by chance. | 291 # of good and bad values just by chance. |
287 _, _, p_value = ttest.WelchsTTest(sample1, sample2) | 292 _, _, p_value = ttest.WelchsTTest(sample1, sample2) |
288 return 100.0 * (1.0 - p_value) | 293 return 100.0 * (1.0 - p_value) |
289 | 294 |
290 | 295 |
291 def GetSHA1HexDigest(contents): | 296 def GetSHA1HexDigest(contents): |
292 """Returns secured hash containing hexadecimal for the given contents.""" | 297 """Returns SHA1 hex digest of the given string.""" |
293 return hashlib.sha1(contents).hexdigest() | 298 return hashlib.sha1(contents).hexdigest() |
294 | 299 |
295 | 300 |
296 def GetZipFileName(build_revision=None, target_arch='ia32', patch_sha=None): | 301 def GetZipFileName(build_revision=None, target_arch='ia32', patch_sha=None): |
297 """Gets the archive file name for the given revision.""" | 302 """Gets the archive file name for the given revision.""" |
298 def PlatformName(): | 303 def PlatformName(): |
299 """Return a string to be used in paths for the platform.""" | 304 """Return a string to be used in paths for the platform.""" |
300 if bisect_utils.IsWindowsHost(): | 305 if bisect_utils.IsWindowsHost(): |
301 # Build archive for x64 is still stored with 'win32'suffix | 306 # Build archive for x64 is still stored with 'win32'suffix |
302 # (chromium_utils.PlatformName()). | 307 # (chromium_utils.PlatformName()). |
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
368 os.remove(target_file) | 373 os.remove(target_file) |
369 return None | 374 return None |
370 | 375 |
371 | 376 |
372 # This is copied from Chromium's project build/scripts/common/chromium_utils.py. | 377 # This is copied from Chromium's project build/scripts/common/chromium_utils.py. |
373 def MaybeMakeDirectory(*path): | 378 def MaybeMakeDirectory(*path): |
374 """Creates an entire path, if it doesn't already exist.""" | 379 """Creates an entire path, if it doesn't already exist.""" |
375 file_path = os.path.join(*path) | 380 file_path = os.path.join(*path) |
376 try: | 381 try: |
377 os.makedirs(file_path) | 382 os.makedirs(file_path) |
378 except OSError, e: | 383 except OSError as e: |
379 if e.errno != errno.EEXIST: | 384 if e.errno != errno.EEXIST: |
380 return False | 385 return False |
381 return True | 386 return True |
382 | 387 |
383 | 388 |
384 # This is copied from Chromium's project build/scripts/common/chromium_utils.py. | 389 # This is copied from Chromium's project build/scripts/common/chromium_utils.py. |
385 def ExtractZip(filename, output_dir, verbose=True): | 390 def ExtractZip(filename, output_dir, verbose=True): |
386 """ Extract the zip archive in the output directory.""" | 391 """ Extract the zip archive in the output directory.""" |
387 MaybeMakeDirectory(output_dir) | 392 MaybeMakeDirectory(output_dir) |
388 | 393 |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
421 for name in zf.namelist(): | 426 for name in zf.namelist(): |
422 if verbose: | 427 if verbose: |
423 print 'Extracting %s' % name | 428 print 'Extracting %s' % name |
424 zf.extract(name, output_dir) | 429 zf.extract(name, output_dir) |
425 if bisect_utils.IsMacHost(): | 430 if bisect_utils.IsMacHost(): |
426 # Restore permission bits. | 431 # Restore permission bits. |
427 os.chmod(os.path.join(output_dir, name), | 432 os.chmod(os.path.join(output_dir, name), |
428 zf.getinfo(name).external_attr >> 16L) | 433 zf.getinfo(name).external_attr >> 16L) |
429 | 434 |
430 | 435 |
431 | |
432 | |
433 def SetBuildSystemDefault(build_system, use_goma, goma_dir): | 436 def SetBuildSystemDefault(build_system, use_goma, goma_dir): |
434 """Sets up any environment variables needed to build with the specified build | 437 """Sets up any environment variables needed to build with the specified build |
435 system. | 438 system. |
436 | 439 |
437 Args: | 440 Args: |
438 build_system: A string specifying build system. Currently only 'ninja' or | 441 build_system: A string specifying build system. Currently only 'ninja' or |
439 'make' are supported.""" | 442 'make' are supported. |
| 443 """ |
440 if build_system == 'ninja': | 444 if build_system == 'ninja': |
441 gyp_var = os.getenv('GYP_GENERATORS', default='') | 445 gyp_var = os.getenv('GYP_GENERATORS', default='') |
442 | 446 |
443 if not gyp_var or not 'ninja' in gyp_var: | 447 if not gyp_var or not 'ninja' in gyp_var: |
444 if gyp_var: | 448 if gyp_var: |
445 os.environ['GYP_GENERATORS'] = gyp_var + ',ninja' | 449 os.environ['GYP_GENERATORS'] = gyp_var + ',ninja' |
446 else: | 450 else: |
447 os.environ['GYP_GENERATORS'] = 'ninja' | 451 os.environ['GYP_GENERATORS'] = 'ninja' |
448 | 452 |
449 if bisect_utils.IsWindowsHost(): | 453 if bisect_utils.IsWindowsHost(): |
450 os.environ['GYP_DEFINES'] = 'component=shared_library '\ | 454 os.environ['GYP_DEFINES'] = ('component=shared_library ' |
451 'incremental_chrome_dll=1 disable_nacl=1 fastbuild=1 '\ | 455 'incremental_chrome_dll=1 ' |
452 'chromium_win_pch=0' | 456 'disable_nacl=1 fastbuild=1 ' |
| 457 'chromium_win_pch=0') |
453 | 458 |
454 elif build_system == 'make': | 459 elif build_system == 'make': |
455 os.environ['GYP_GENERATORS'] = 'make' | 460 os.environ['GYP_GENERATORS'] = 'make' |
456 else: | 461 else: |
457 raise RuntimeError('%s build not supported.' % build_system) | 462 raise RuntimeError('%s build not supported.' % build_system) |
458 | 463 |
459 if use_goma: | 464 if use_goma: |
460 os.environ['GYP_DEFINES'] = '%s %s' % (os.getenv('GYP_DEFINES', default=''), | 465 os.environ['GYP_DEFINES'] = '%s %s' % (os.getenv('GYP_DEFINES', default=''), |
461 'use_goma=1') | 466 'use_goma=1') |
462 if goma_dir: | 467 if goma_dir: |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
498 for t in targets: | 503 for t in targets: |
499 cmd.extend(['/Project', t]) | 504 cmd.extend(['/Project', t]) |
500 | 505 |
501 return_code = bisect_utils.RunProcess(cmd) | 506 return_code = bisect_utils.RunProcess(cmd) |
502 | 507 |
503 return not return_code | 508 return not return_code |
504 | 509 |
505 | 510 |
506 def WriteStringToFile(text, file_name): | 511 def WriteStringToFile(text, file_name): |
507 try: | 512 try: |
508 with open(file_name, "wb") as f: | 513 with open(file_name, 'wb') as f: |
509 f.write(text) | 514 f.write(text) |
510 except IOError as e: | 515 except IOError: |
511 raise RuntimeError('Error writing to file [%s]' % file_name ) | 516 raise RuntimeError('Error writing to file [%s]' % file_name ) |
512 | 517 |
513 | 518 |
514 def ReadStringFromFile(file_name): | 519 def ReadStringFromFile(file_name): |
515 try: | 520 try: |
516 with open(file_name) as f: | 521 with open(file_name) as f: |
517 return f.read() | 522 return f.read() |
518 except IOError as e: | 523 except IOError: |
519 raise RuntimeError('Error reading file [%s]' % file_name ) | 524 raise RuntimeError('Error reading file [%s]' % file_name ) |
520 | 525 |
521 | 526 |
522 def ChangeBackslashToSlashInPatch(diff_text): | 527 def ChangeBackslashToSlashInPatch(diff_text): |
523 """Formats file paths in the given text to unix-style paths.""" | 528 """Formats file paths in the given text to unix-style paths.""" |
524 if diff_text: | 529 if diff_text: |
525 diff_lines = diff_text.split('\n') | 530 diff_lines = diff_text.split('\n') |
526 for i in range(len(diff_lines)): | 531 for i in range(len(diff_lines)): |
527 if (diff_lines[i].startswith('--- ') or | 532 if (diff_lines[i].startswith('--- ') or |
528 diff_lines[i].startswith('+++ ')): | 533 diff_lines[i].startswith('+++ ')): |
(...skipping 160 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
689 try: | 694 try: |
690 # Keys will most likely be set to 0640 after wiping the chroot. | 695 # Keys will most likely be set to 0640 after wiping the chroot. |
691 os.chmod(CROS_SCRIPT_KEY_PATH, 0600) | 696 os.chmod(CROS_SCRIPT_KEY_PATH, 0600) |
692 os.chmod(CROS_TEST_KEY_PATH, 0600) | 697 os.chmod(CROS_TEST_KEY_PATH, 0600) |
693 cmd = [CROS_SDK_PATH, '--', './bin/cros_image_to_target.py', | 698 cmd = [CROS_SDK_PATH, '--', './bin/cros_image_to_target.py', |
694 '--remote=%s' % opts.cros_remote_ip, | 699 '--remote=%s' % opts.cros_remote_ip, |
695 '--board=%s' % opts.cros_board, '--test', '--verbose'] | 700 '--board=%s' % opts.cros_board, '--test', '--verbose'] |
696 | 701 |
697 return_code = bisect_utils.RunProcess(cmd) | 702 return_code = bisect_utils.RunProcess(cmd) |
698 return not return_code | 703 return not return_code |
699 except OSError, e: | 704 except OSError: |
700 return False | 705 return False |
701 | 706 |
702 def BuildPackages(self, opts, depot): | 707 def BuildPackages(self, opts, depot): |
703 """Builds packages for cros. | 708 """Builds packages for cros. |
704 | 709 |
705 Args: | 710 Args: |
706 opts: Program options containing cros_board. | 711 opts: Program options containing cros_board. |
707 depot: The depot being bisected. | 712 depot: The depot being bisected. |
708 | 713 |
709 Returns: | 714 Returns: |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
763 | 768 |
764 Returns: | 769 Returns: |
765 True if build was successful. | 770 True if build was successful. |
766 """ | 771 """ |
767 if self.BuildPackages(opts, depot): | 772 if self.BuildPackages(opts, depot): |
768 if self.BuildImage(opts, depot): | 773 if self.BuildImage(opts, depot): |
769 return self.ImageToTarget(opts) | 774 return self.ImageToTarget(opts) |
770 return False | 775 return False |
771 | 776 |
772 | 777 |
773 | 778 def _ParseRevisionsFromDEPSFileManually(deps_file_contents): |
| 779 """Parses the vars section of the DEPS file with regex. |
| 780 |
| 781 Args: |
| 782 deps_file_contents: The DEPS file contents as a string. |
| 783 |
| 784 Returns: |
| 785 A dict in the format {depot:revision} if successful, otherwise None. |
| 786 """ |
| 787 # We'll parse the "vars" section of the DEPS file. |
| 788 rxp = re.compile('vars = {(?P<vars_body>[^}]+)', re.MULTILINE) |
| 789 re_results = rxp.search(deps_file_contents) |
| 790 |
| 791 if not re_results: |
| 792 return None |
| 793 |
| 794 # We should be left with a series of entries in the vars component of |
| 795 # the DEPS file with the following format: |
| 796 # 'depot_name': 'revision', |
| 797 vars_body = re_results.group('vars_body') |
| 798 rxp = re.compile("'(?P<depot_body>[\w_-]+)':[\s]+'(?P<rev_body>[\w@]+)'", |
| 799 re.MULTILINE) |
| 800 re_results = rxp.findall(vars_body) |
| 801 |
| 802 return dict(re_results) |
| 803 |
| 804 |
| 805 def _WaitUntilBuildIsReady( |
| 806 fetch_build, bot_name, builder_host, builder_port, build_request_id, |
| 807 max_timeout): |
| 808 """Waits until build is produced by bisect builder on tryserver. |
| 809 |
| 810 Args: |
| 811 fetch_build: Function to check and download build from cloud storage. |
| 812 bot_name: Builder bot name on tryserver. |
| 813 builder_host Tryserver hostname. |
| 814 builder_port: Tryserver port. |
| 815 build_request_id: A unique ID of the build request posted to tryserver. |
| 816 max_timeout: Maximum time to wait for the build. |
| 817 |
| 818 Returns: |
| 819 Downloaded archive file path if exists, otherwise None. |
| 820 """ |
| 821 # Build number on the tryserver. |
| 822 build_num = None |
| 823 # Interval to check build on cloud storage. |
| 824 poll_interval = 60 |
| 825 # Interval to check build status on tryserver. |
| 826 status_check_interval = 600 |
| 827 last_status_check = time.time() |
| 828 start_time = time.time() |
| 829 while True: |
| 830 # Checks for build on gs://chrome-perf and download if exists. |
| 831 res = fetch_build() |
| 832 if res: |
| 833 return (res, 'Build successfully found') |
| 834 elapsed_status_check = time.time() - last_status_check |
| 835 # To avoid overloading tryserver with status check requests, we check |
| 836 # build status for every 10 mins. |
| 837 if elapsed_status_check > status_check_interval: |
| 838 last_status_check = time.time() |
| 839 if not build_num: |
| 840 # Get the build number on tryserver for the current build. |
| 841 build_num = bisect_builder.GetBuildNumFromBuilder( |
| 842 build_request_id, bot_name, builder_host, builder_port) |
| 843 # Check the status of build using the build number. |
| 844 # Note: Build is treated as PENDING if build number is not found |
| 845 # on the the tryserver. |
| 846 build_status, status_link = bisect_builder.GetBuildStatus( |
| 847 build_num, bot_name, builder_host, builder_port) |
| 848 if build_status == bisect_builder.FAILED: |
| 849 return (None, 'Failed to produce build, log: %s' % status_link) |
| 850 elapsed_time = time.time() - start_time |
| 851 if elapsed_time > max_timeout: |
| 852 return (None, 'Timed out: %ss without build' % max_timeout) |
| 853 |
| 854 print 'Time elapsed: %ss without build.' % elapsed_time |
| 855 time.sleep(poll_interval) |
| 856 # For some reason, mac bisect bots were not flushing stdout periodically. |
| 857 # As a result buildbot command is timed-out. Flush stdout on all platforms |
| 858 # while waiting for build. |
| 859 sys.stdout.flush() |
| 860 |
| 861 |
| 862 def _UpdateV8Branch(deps_content): |
| 863 """Updates V8 branch in DEPS file to process v8_bleeding_edge. |
| 864 |
| 865 Check for "v8_branch" in DEPS file if exists update its value |
| 866 with v8_bleeding_edge branch. Note: "v8_branch" is added to DEPS |
| 867 variable from DEPS revision 254916, therefore check for "src/v8": |
| 868 <v8 source path> in DEPS in order to support prior DEPS revisions |
| 869 and update it. |
| 870 |
| 871 Args: |
| 872 deps_content: DEPS file contents to be modified. |
| 873 |
| 874 Returns: |
| 875 Modified DEPS file contents as a string. |
| 876 """ |
| 877 new_branch = r'branches/bleeding_edge' |
| 878 v8_branch_pattern = re.compile(r'(?<="v8_branch": ")(.*)(?=")') |
| 879 if re.search(v8_branch_pattern, deps_content): |
| 880 deps_content = re.sub(v8_branch_pattern, new_branch, deps_content) |
| 881 else: |
| 882 # Replaces the branch assigned to "src/v8" key in DEPS file. |
| 883 # Format of "src/v8" in DEPS: |
| 884 # "src/v8": |
| 885 # (Var("googlecode_url") % "v8") + "/trunk@" + Var("v8_revision"), |
| 886 # So, "/trunk@" is replace with "/branches/bleeding_edge@" |
| 887 v8_src_pattern = re.compile( |
| 888 r'(?<="v8"\) \+ "/)(.*)(?=@" \+ Var\("v8_revision"\))', re.MULTILINE) |
| 889 if re.search(v8_src_pattern, deps_content): |
| 890 deps_content = re.sub(v8_src_pattern, new_branch, deps_content) |
| 891 return deps_content |
| 892 |
| 893 |
| 894 def _UpdateDEPSForAngle(revision, depot, deps_file): |
| 895 """Updates DEPS file with new revision for Angle repository. |
| 896 |
| 897 This is a hack for Angle depot case because, in DEPS file "vars" dictionary |
| 898 variable contains "angle_revision" key that holds git hash instead of |
| 899 SVN revision. |
| 900 |
| 901 And sometimes "angle_revision" key is not specified in "vars" variable, |
| 902 in such cases check "deps" dictionary variable that matches |
| 903 angle.git@[a-fA-F0-9]{40}$ and replace git hash. |
| 904 """ |
| 905 deps_var = DEPOT_DEPS_NAME[depot]['deps_var'] |
| 906 try: |
| 907 deps_contents = ReadStringFromFile(deps_file) |
| 908 # Check whether the depot and revision pattern in DEPS file vars variable |
| 909 # e.g. "angle_revision": "fa63e947cb3eccf463648d21a05d5002c9b8adfa". |
| 910 angle_rev_pattern = re.compile(r'(?<="%s": ")([a-fA-F0-9]{40})(?=")' % |
| 911 deps_var, re.MULTILINE) |
| 912 match = re.search(angle_rev_pattern % deps_var, deps_contents) |
| 913 if match: |
| 914 # Update the revision information for the given depot |
| 915 new_data = re.sub(angle_rev_pattern, revision, deps_contents) |
| 916 else: |
| 917 # Check whether the depot and revision pattern in DEPS file deps |
| 918 # variable. e.g., |
| 919 # "src/third_party/angle": Var("chromium_git") + |
| 920 # "/angle/angle.git@fa63e947cb3eccf463648d21a05d5002c9b8adfa",. |
| 921 angle_rev_pattern = re.compile( |
| 922 r'(?<=angle\.git@)([a-fA-F0-9]{40})(?=")', re.MULTILINE) |
| 923 match = re.search(angle_rev_pattern, deps_contents) |
| 924 if not match: |
| 925 print 'Could not find angle revision information in DEPS file.' |
| 926 return False |
| 927 new_data = re.sub(angle_rev_pattern, revision, deps_contents) |
| 928 # Write changes to DEPS file |
| 929 WriteStringToFile(new_data, deps_file) |
| 930 return True |
| 931 except IOError, e: |
| 932 print 'Something went wrong while updating DEPS file, %s' % e |
| 933 return False |
| 934 |
| 935 |
| 936 def _TryParseHistogramValuesFromOutput(metric, text): |
| 937 """Attempts to parse a metric in the format HISTOGRAM <graph: <trace>. |
| 938 |
| 939 Args: |
| 940 metric: The metric as a list of [<trace>, <value>] strings. |
| 941 text: The text to parse the metric values from. |
| 942 |
| 943 Returns: |
| 944 A list of floating point numbers found, [] if none were found. |
| 945 """ |
| 946 metric_formatted = 'HISTOGRAM %s: %s= ' % (metric[0], metric[1]) |
| 947 |
| 948 text_lines = text.split('\n') |
| 949 values_list = [] |
| 950 |
| 951 for current_line in text_lines: |
| 952 if metric_formatted in current_line: |
| 953 current_line = current_line[len(metric_formatted):] |
| 954 |
| 955 try: |
| 956 histogram_values = eval(current_line) |
| 957 |
| 958 for b in histogram_values['buckets']: |
| 959 average_for_bucket = float(b['high'] + b['low']) * 0.5 |
| 960 # Extends the list with N-elements with the average for that bucket. |
| 961 values_list.extend([average_for_bucket] * b['count']) |
| 962 except Exception: |
| 963 pass |
| 964 |
| 965 return values_list |
| 966 |
| 967 |
| 968 def _TryParseResultValuesFromOutput(metric, text): |
| 969 """Attempts to parse a metric in the format RESULT <graph>: <trace>= ... |
| 970 |
| 971 Args: |
| 972 metric: The metric as a list of [<trace>, <value>] string pairs. |
| 973 text: The text to parse the metric values from. |
| 974 |
| 975 Returns: |
| 976 A list of floating point numbers found. |
| 977 """ |
| 978 # Format is: RESULT <graph>: <trace>= <value> <units> |
| 979 metric_re = re.escape('RESULT %s: %s=' % (metric[0], metric[1])) |
| 980 |
| 981 # The log will be parsed looking for format: |
| 982 # <*>RESULT <graph_name>: <trace_name>= <value> |
| 983 single_result_re = re.compile( |
| 984 metric_re + '\s*(?P<VALUE>[-]?\d*(\.\d*)?)') |
| 985 |
| 986 # The log will be parsed looking for format: |
| 987 # <*>RESULT <graph_name>: <trace_name>= [<value>,value,value,...] |
| 988 multi_results_re = re.compile( |
| 989 metric_re + '\s*\[\s*(?P<VALUES>[-]?[\d\., ]+)\s*\]') |
| 990 |
| 991 # The log will be parsed looking for format: |
| 992 # <*>RESULT <graph_name>: <trace_name>= {<mean>, <std deviation>} |
| 993 mean_stddev_re = re.compile( |
| 994 metric_re + |
| 995 '\s*\{\s*(?P<MEAN>[-]?\d*(\.\d*)?),\s*(?P<STDDEV>\d+(\.\d*)?)\s*\}') |
| 996 |
| 997 text_lines = text.split('\n') |
| 998 values_list = [] |
| 999 for current_line in text_lines: |
| 1000 # Parse the output from the performance test for the metric we're |
| 1001 # interested in. |
| 1002 single_result_match = single_result_re.search(current_line) |
| 1003 multi_results_match = multi_results_re.search(current_line) |
| 1004 mean_stddev_match = mean_stddev_re.search(current_line) |
| 1005 if (not single_result_match is None and |
| 1006 single_result_match.group('VALUE')): |
| 1007 values_list += [single_result_match.group('VALUE')] |
| 1008 elif (not multi_results_match is None and |
| 1009 multi_results_match.group('VALUES')): |
| 1010 metric_values = multi_results_match.group('VALUES') |
| 1011 values_list += metric_values.split(',') |
| 1012 elif (not mean_stddev_match is None and |
| 1013 mean_stddev_match.group('MEAN')): |
| 1014 values_list += [mean_stddev_match.group('MEAN')] |
| 1015 |
| 1016 values_list = [float(v) for v in values_list |
| 1017 if bisect_utils.IsStringFloat(v)] |
| 1018 |
| 1019 # If the metric is times/t, we need to sum the timings in order to get |
| 1020 # similar regression results as the try-bots. |
| 1021 metrics_to_sum = [ |
| 1022 ['times', 't'], |
| 1023 ['times', 'page_load_time'], |
| 1024 ['cold_times', 'page_load_time'], |
| 1025 ['warm_times', 'page_load_time'], |
| 1026 ] |
| 1027 |
| 1028 if metric in metrics_to_sum: |
| 1029 if values_list: |
| 1030 values_list = [reduce(lambda x, y: float(x) + float(y), values_list)] |
| 1031 |
| 1032 return values_list |
| 1033 |
| 1034 |
| 1035 def _ParseMetricValuesFromOutput(metric, text): |
| 1036 """Parses output from performance_ui_tests and retrieves the results for |
| 1037 a given metric. |
| 1038 |
| 1039 Args: |
| 1040 metric: The metric as a list of [<trace>, <value>] strings. |
| 1041 text: The text to parse the metric values from. |
| 1042 |
| 1043 Returns: |
| 1044 A list of floating point numbers found. |
| 1045 """ |
| 1046 metric_values = _TryParseResultValuesFromOutput(metric, text) |
| 1047 |
| 1048 if not metric_values: |
| 1049 metric_values = _TryParseHistogramValuesFromOutput(metric, text) |
| 1050 |
| 1051 return metric_values |
| 1052 |
| 1053 |
| 1054 def _GenerateProfileIfNecessary(command_args): |
| 1055 """Checks the command line of the performance test for dependencies on |
| 1056 profile generation, and runs tools/perf/generate_profile as necessary. |
| 1057 |
| 1058 Args: |
| 1059 command_args: Command line being passed to performance test, as a list. |
| 1060 |
| 1061 Returns: |
| 1062 False if profile generation was necessary and failed, otherwise True. |
| 1063 """ |
| 1064 if '--profile-dir' in ' '.join(command_args): |
| 1065 # If we were using python 2.7+, we could just use the argparse |
| 1066 # module's parse_known_args to grab --profile-dir. Since some of the |
| 1067 # bots still run 2.6, have to grab the arguments manually. |
| 1068 arg_dict = {} |
| 1069 args_to_parse = ['--profile-dir', '--browser'] |
| 1070 |
| 1071 for arg_to_parse in args_to_parse: |
| 1072 for i, current_arg in enumerate(command_args): |
| 1073 if arg_to_parse in current_arg: |
| 1074 current_arg_split = current_arg.split('=') |
| 1075 |
| 1076 # Check 2 cases, --arg=<val> and --arg <val> |
| 1077 if len(current_arg_split) == 2: |
| 1078 arg_dict[arg_to_parse] = current_arg_split[1] |
| 1079 elif i + 1 < len(command_args): |
| 1080 arg_dict[arg_to_parse] = command_args[i+1] |
| 1081 |
| 1082 path_to_generate = os.path.join('tools', 'perf', 'generate_profile') |
| 1083 |
| 1084 if arg_dict.has_key('--profile-dir') and arg_dict.has_key('--browser'): |
| 1085 profile_path, profile_type = os.path.split(arg_dict['--profile-dir']) |
| 1086 return not bisect_utils.RunProcess(['python', path_to_generate, |
| 1087 '--profile-type-to-generate', profile_type, |
| 1088 '--browser', arg_dict['--browser'], '--output-dir', profile_path]) |
| 1089 return False |
| 1090 return True |
| 1091 |
| 1092 |
| 1093 def _AddRevisionsIntoRevisionData(revisions, depot, sort, revision_data): |
| 1094 """Adds new revisions to the revision_data dict and initializes them. |
| 1095 |
| 1096 Args: |
| 1097 revisions: List of revisions to add. |
| 1098 depot: Depot that's currently in use (src, webkit, etc...) |
| 1099 sort: Sorting key for displaying revisions. |
| 1100 revision_data: A dict to add the new revisions into. Existing revisions |
| 1101 will have their sort keys offset. |
| 1102 """ |
| 1103 num_depot_revisions = len(revisions) |
| 1104 |
| 1105 for _, v in revision_data.iteritems(): |
| 1106 if v['sort'] > sort: |
| 1107 v['sort'] += num_depot_revisions |
| 1108 |
| 1109 for i in xrange(num_depot_revisions): |
| 1110 r = revisions[i] |
| 1111 revision_data[r] = { |
| 1112 'revision' : r, |
| 1113 'depot' : depot, |
| 1114 'value' : None, |
| 1115 'perf_time' : 0, |
| 1116 'build_time' : 0, |
| 1117 'passed' : '?', |
| 1118 'sort' : i + sort + 1, |
| 1119 } |
| 1120 |
| 1121 |
| 1122 def _PrintThankYou(): |
| 1123 print RESULTS_THANKYOU |
| 1124 |
| 1125 |
| 1126 def _PrintTableRow(column_widths, row_data): |
| 1127 """Prints out a row in a formatted table that has columns aligned. |
| 1128 |
| 1129 Args: |
| 1130 column_widths: A list of column width numbers. |
| 1131 row_data: A list of items for each column in this row. |
| 1132 """ |
| 1133 assert len(column_widths) == len(row_data) |
| 1134 text = '' |
| 1135 for i in xrange(len(column_widths)): |
| 1136 current_row_data = row_data[i].center(column_widths[i], ' ') |
| 1137 text += ('%%%ds' % column_widths[i]) % current_row_data |
| 1138 print text |
| 1139 |
| 1140 |
| 1141 def _PrintStepTime(revision_data_sorted): |
| 1142 """Prints information about how long various steps took. |
| 1143 |
| 1144 Args: |
| 1145 revision_data_sorted: The sorted list of revision data dictionaries.""" |
| 1146 step_perf_time_avg = 0.0 |
| 1147 step_build_time_avg = 0.0 |
| 1148 step_count = 0.0 |
| 1149 for _, current_data in revision_data_sorted: |
| 1150 if current_data['value']: |
| 1151 step_perf_time_avg += current_data['perf_time'] |
| 1152 step_build_time_avg += current_data['build_time'] |
| 1153 step_count += 1 |
| 1154 if step_count: |
| 1155 step_perf_time_avg = step_perf_time_avg / step_count |
| 1156 step_build_time_avg = step_build_time_avg / step_count |
| 1157 print |
| 1158 print 'Average build time : %s' % datetime.timedelta( |
| 1159 seconds=int(step_build_time_avg)) |
| 1160 print 'Average test time : %s' % datetime.timedelta( |
| 1161 seconds=int(step_perf_time_avg)) |
| 1162 |
| 1163 def _FindOtherRegressions(revision_data_sorted, bad_greater_than_good): |
| 1164 """Compiles a list of other possible regressions from the revision data. |
| 1165 |
| 1166 Args: |
| 1167 revision_data_sorted: Sorted list of (revision, revision data dict) pairs. |
| 1168 bad_greater_than_good: Whether the result value at the "bad" revision is |
| 1169 numerically greater than the result value at the "good" revision. |
| 1170 |
| 1171 Returns: |
| 1172 A list of [current_rev, previous_rev, confidence] for other places where |
| 1173 there may have been a regression. |
| 1174 """ |
| 1175 other_regressions = [] |
| 1176 previous_values = [] |
| 1177 previous_id = None |
| 1178 for current_id, current_data in revision_data_sorted: |
| 1179 current_values = current_data['value'] |
| 1180 if current_values: |
| 1181 current_values = current_values['values'] |
| 1182 if previous_values: |
| 1183 confidence = ConfidenceScore(previous_values, [current_values]) |
| 1184 mean_of_prev_runs = math_utils.Mean(sum(previous_values, [])) |
| 1185 mean_of_current_runs = math_utils.Mean(current_values) |
| 1186 |
| 1187 # Check that the potential regression is in the same direction as |
| 1188 # the overall regression. If the mean of the previous runs < the |
| 1189 # mean of the current runs, this local regression is in same |
| 1190 # direction. |
| 1191 prev_less_than_current = mean_of_prev_runs < mean_of_current_runs |
| 1192 is_same_direction = (prev_less_than_current if |
| 1193 bad_greater_than_good else not prev_less_than_current) |
| 1194 |
| 1195 # Only report potential regressions with high confidence. |
| 1196 if is_same_direction and confidence > 50: |
| 1197 other_regressions.append([current_id, previous_id, confidence]) |
| 1198 previous_values.append(current_values) |
| 1199 previous_id = current_id |
| 1200 return other_regressions |
774 | 1201 |
775 class BisectPerformanceMetrics(object): | 1202 class BisectPerformanceMetrics(object): |
776 """This class contains functionality to perform a bisection of a range of | 1203 """This class contains functionality to perform a bisection of a range of |
777 revisions to narrow down where performance regressions may have occurred. | 1204 revisions to narrow down where performance regressions may have occurred. |
778 | 1205 |
779 The main entry-point is the Run method. | 1206 The main entry-point is the Run method. |
780 """ | 1207 """ |
781 | 1208 |
782 def __init__(self, source_control, opts): | 1209 def __init__(self, source_control, opts): |
783 super(BisectPerformanceMetrics, self).__init__() | 1210 super(BisectPerformanceMetrics, self).__init__() |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
823 cwd = os.getcwd() | 1250 cwd = os.getcwd() |
824 self.ChangeToDepotWorkingDirectory('cros') | 1251 self.ChangeToDepotWorkingDirectory('cros') |
825 | 1252 |
826 # Print the commit timestamps for every commit in the revision time | 1253 # Print the commit timestamps for every commit in the revision time |
827 # range. We'll sort them and bisect by that. There is a remote chance that | 1254 # range. We'll sort them and bisect by that. There is a remote chance that |
828 # 2 (or more) commits will share the exact same timestamp, but it's | 1255 # 2 (or more) commits will share the exact same timestamp, but it's |
829 # probably safe to ignore that case. | 1256 # probably safe to ignore that case. |
830 cmd = ['repo', 'forall', '-c', | 1257 cmd = ['repo', 'forall', '-c', |
831 'git log --format=%%ct --before=%d --after=%d' % ( | 1258 'git log --format=%%ct --before=%d --after=%d' % ( |
832 revision_range_end, revision_range_start)] | 1259 revision_range_end, revision_range_start)] |
833 (output, return_code) = bisect_utils.RunProcessAndRetrieveOutput(cmd) | 1260 output, return_code = bisect_utils.RunProcessAndRetrieveOutput(cmd) |
834 | 1261 |
835 assert not return_code, 'An error occurred while running'\ | 1262 assert not return_code, ('An error occurred while running ' |
836 ' "%s"' % ' '.join(cmd) | 1263 '"%s"' % ' '.join(cmd)) |
837 | 1264 |
838 os.chdir(cwd) | 1265 os.chdir(cwd) |
839 | 1266 |
840 revision_work_list = list(set( | 1267 revision_work_list = list(set( |
841 [int(o) for o in output.split('\n') if bisect_utils.IsStringInt(o)])) | 1268 [int(o) for o in output.split('\n') if bisect_utils.IsStringInt(o)])) |
842 revision_work_list = sorted(revision_work_list, reverse=True) | 1269 revision_work_list = sorted(revision_work_list, reverse=True) |
843 else: | 1270 else: |
844 cwd = self._GetDepotDirectory(depot) | 1271 cwd = self._GetDepotDirectory(depot) |
845 revision_work_list = self.source_control.GetRevisionList(bad_revision, | 1272 revision_work_list = self.source_control.GetRevisionList(bad_revision, |
846 good_revision, cwd=cwd) | 1273 good_revision, cwd=cwd) |
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
924 | 1351 |
925 bleeding_edge_revision = None | 1352 bleeding_edge_revision = None |
926 | 1353 |
927 for c in commits: | 1354 for c in commits: |
928 bleeding_edge_revision = self._GetV8BleedingEdgeFromV8TrunkIfMappable(c) | 1355 bleeding_edge_revision = self._GetV8BleedingEdgeFromV8TrunkIfMappable(c) |
929 if bleeding_edge_revision: | 1356 if bleeding_edge_revision: |
930 break | 1357 break |
931 | 1358 |
932 return bleeding_edge_revision | 1359 return bleeding_edge_revision |
933 | 1360 |
934 def _ParseRevisionsFromDEPSFileManually(self, deps_file_contents): | |
935 """Manually parses the vars section of the DEPS file to determine | |
936 chromium/blink/etc... revisions. | |
937 | |
938 Returns: | |
939 A dict in the format {depot:revision} if successful, otherwise None. | |
940 """ | |
941 # We'll parse the "vars" section of the DEPS file. | |
942 rxp = re.compile('vars = {(?P<vars_body>[^}]+)', re.MULTILINE) | |
943 re_results = rxp.search(deps_file_contents) | |
944 locals = {} | |
945 | |
946 if not re_results: | |
947 return None | |
948 | |
949 # We should be left with a series of entries in the vars component of | |
950 # the DEPS file with the following format: | |
951 # 'depot_name': 'revision', | |
952 vars_body = re_results.group('vars_body') | |
953 rxp = re.compile("'(?P<depot_body>[\w_-]+)':[\s]+'(?P<rev_body>[\w@]+)'", | |
954 re.MULTILINE) | |
955 re_results = rxp.findall(vars_body) | |
956 | |
957 return dict(re_results) | |
958 | |
959 def _ParseRevisionsFromDEPSFile(self, depot): | 1361 def _ParseRevisionsFromDEPSFile(self, depot): |
960 """Parses the local DEPS file to determine blink/skia/v8 revisions which may | 1362 """Parses the local DEPS file to determine blink/skia/v8 revisions which may |
961 be needed if the bisect recurses into those depots later. | 1363 be needed if the bisect recurses into those depots later. |
962 | 1364 |
963 Args: | 1365 Args: |
964 depot: Depot being bisected. | 1366 depot: Name of depot being bisected. |
965 | 1367 |
966 Returns: | 1368 Returns: |
967 A dict in the format {depot:revision} if successful, otherwise None. | 1369 A dict in the format {depot:revision} if successful, otherwise None. |
968 """ | 1370 """ |
969 try: | 1371 try: |
970 deps_data = {'Var': lambda _: deps_data["vars"][_], | 1372 deps_data = { |
971 'From': lambda *args: None | 1373 'Var': lambda _: deps_data["vars"][_], |
972 } | 1374 'From': lambda *args: None, |
| 1375 } |
973 execfile(bisect_utils.FILE_DEPS_GIT, {}, deps_data) | 1376 execfile(bisect_utils.FILE_DEPS_GIT, {}, deps_data) |
974 deps_data = deps_data['deps'] | 1377 deps_data = deps_data['deps'] |
975 | 1378 |
976 rxp = re.compile(".git@(?P<revision>[a-fA-F0-9]+)") | 1379 rxp = re.compile(".git@(?P<revision>[a-fA-F0-9]+)") |
977 results = {} | 1380 results = {} |
978 for depot_name, depot_data in DEPOT_DEPS_NAME.iteritems(): | 1381 for depot_name, depot_data in DEPOT_DEPS_NAME.iteritems(): |
979 if (depot_data.get('platform') and | 1382 if (depot_data.get('platform') and |
980 depot_data.get('platform') != os.name): | 1383 depot_data.get('platform') != os.name): |
981 continue | 1384 continue |
982 | 1385 |
983 if (depot_data.get('recurse') and depot in depot_data.get('from')): | 1386 if (depot_data.get('recurse') and depot in depot_data.get('from')): |
984 depot_data_src = depot_data.get('src') or depot_data.get('src_old') | 1387 depot_data_src = depot_data.get('src') or depot_data.get('src_old') |
985 src_dir = deps_data.get(depot_data_src) | 1388 src_dir = deps_data.get(depot_data_src) |
986 if src_dir: | 1389 if src_dir: |
987 self.depot_cwd[depot_name] = os.path.join(self.src_cwd, | 1390 self.depot_cwd[depot_name] = os.path.join(self.src_cwd, |
988 depot_data_src[4:]) | 1391 depot_data_src[4:]) |
989 re_results = rxp.search(src_dir) | 1392 re_results = rxp.search(src_dir) |
990 if re_results: | 1393 if re_results: |
991 results[depot_name] = re_results.group('revision') | 1394 results[depot_name] = re_results.group('revision') |
992 else: | 1395 else: |
993 warning_text = ('Couldn\'t parse revision for %s while bisecting ' | 1396 warning_text = ('Couldn\'t parse revision for %s while bisecting ' |
994 '%s' % (depot_name, depot)) | 1397 '%s' % (depot_name, depot)) |
995 if not warning_text in self.warnings: | 1398 if not warning_text in self.warnings: |
996 self.warnings.append(warning_text) | 1399 self.warnings.append(warning_text) |
997 else: | 1400 else: |
998 results[depot_name] = None | 1401 results[depot_name] = None |
999 return results | 1402 return results |
1000 except ImportError: | 1403 except ImportError: |
1001 deps_file_contents = ReadStringFromFile(bisect_utils.FILE_DEPS_GIT) | 1404 deps_file_contents = ReadStringFromFile(bisect_utils.FILE_DEPS_GIT) |
1002 parse_results = self._ParseRevisionsFromDEPSFileManually( | 1405 parse_results = _ParseRevisionsFromDEPSFileManually(deps_file_contents) |
1003 deps_file_contents) | |
1004 results = {} | 1406 results = {} |
1005 for depot_name, depot_revision in parse_results.iteritems(): | 1407 for depot_name, depot_revision in parse_results.iteritems(): |
1006 depot_revision = depot_revision.strip('@') | 1408 depot_revision = depot_revision.strip('@') |
1007 print depot_name, depot_revision | 1409 print depot_name, depot_revision |
1008 for current_name, current_data in DEPOT_DEPS_NAME.iteritems(): | 1410 for current_name, current_data in DEPOT_DEPS_NAME.iteritems(): |
1009 if (current_data.has_key('deps_var') and | 1411 if (current_data.has_key('deps_var') and |
1010 current_data['deps_var'] == depot_name): | 1412 current_data['deps_var'] == depot_name): |
1011 src_name = current_name | 1413 src_name = current_name |
1012 results[src_name] = depot_revision | 1414 results[src_name] = depot_revision |
1013 break | 1415 break |
1014 return results | 1416 return results |
1015 | 1417 |
1016 def Get3rdPartyRevisionsFromCurrentRevision(self, depot, revision): | 1418 def _Get3rdPartyRevisions(self, depot): |
1017 """Parses the DEPS file to determine WebKit/v8/etc... versions. | 1419 """Parses the DEPS file to determine WebKit/v8/etc... versions. |
1018 | 1420 |
1019 Returns: | 1421 Returns: |
1020 A dict in the format {depot:revision} if successful, otherwise None. | 1422 A dict in the format {depot:revision} if successful, otherwise None. |
1021 """ | 1423 """ |
1022 cwd = os.getcwd() | 1424 cwd = os.getcwd() |
1023 self.ChangeToDepotWorkingDirectory(depot) | 1425 self.ChangeToDepotWorkingDirectory(depot) |
1024 | 1426 |
1025 results = {} | 1427 results = {} |
1026 | 1428 |
1027 if depot == 'chromium' or depot == 'android-chrome': | 1429 if depot == 'chromium' or depot == 'android-chrome': |
1028 results = self._ParseRevisionsFromDEPSFile(depot) | 1430 results = self._ParseRevisionsFromDEPSFile(depot) |
1029 os.chdir(cwd) | 1431 os.chdir(cwd) |
1030 elif depot == 'cros': | 1432 elif depot == 'cros': |
1031 cmd = [CROS_SDK_PATH, '--', 'portageq-%s' % self.opts.cros_board, | 1433 cmd = [CROS_SDK_PATH, '--', 'portageq-%s' % self.opts.cros_board, |
1032 'best_visible', '/build/%s' % self.opts.cros_board, 'ebuild', | 1434 'best_visible', '/build/%s' % self.opts.cros_board, 'ebuild', |
1033 CROS_CHROMEOS_PATTERN] | 1435 CROS_CHROMEOS_PATTERN] |
1034 (output, return_code) = bisect_utils.RunProcessAndRetrieveOutput(cmd) | 1436 output, return_code = bisect_utils.RunProcessAndRetrieveOutput(cmd) |
1035 | 1437 |
1036 assert not return_code, 'An error occurred while running' \ | 1438 assert not return_code, ('An error occurred while running ' |
1037 ' "%s"' % ' '.join(cmd) | 1439 '"%s"' % ' '.join(cmd)) |
1038 | 1440 |
1039 if len(output) > CROS_CHROMEOS_PATTERN: | 1441 if len(output) > CROS_CHROMEOS_PATTERN: |
1040 output = output[len(CROS_CHROMEOS_PATTERN):] | 1442 output = output[len(CROS_CHROMEOS_PATTERN):] |
1041 | 1443 |
1042 if len(output) > 1: | 1444 if len(output) > 1: |
1043 output = output.split('_')[0] | 1445 output = output.split('_')[0] |
1044 | 1446 |
1045 if len(output) > 3: | 1447 if len(output) > 3: |
1046 contents = output.split('.') | 1448 contents = output.split('.') |
1047 | 1449 |
1048 version = contents[2] | 1450 version = contents[2] |
1049 | 1451 |
1050 if contents[3] != '0': | 1452 if contents[3] != '0': |
1051 warningText = 'Chrome version: %s.%s but using %s.0 to bisect.' % \ | 1453 warningText = ('Chrome version: %s.%s but using %s.0 to bisect.' % |
1052 (version, contents[3], version) | 1454 (version, contents[3], version)) |
1053 if not warningText in self.warnings: | 1455 if not warningText in self.warnings: |
1054 self.warnings.append(warningText) | 1456 self.warnings.append(warningText) |
1055 | 1457 |
1056 cwd = os.getcwd() | 1458 cwd = os.getcwd() |
1057 self.ChangeToDepotWorkingDirectory('chromium') | 1459 self.ChangeToDepotWorkingDirectory('chromium') |
1058 cmd = ['log', '-1', '--format=%H', | 1460 cmd = ['log', '-1', '--format=%H', |
1059 '--author=chrome-release@google.com', | 1461 '--author=chrome-release@google.com', |
1060 '--grep=to %s' % version, 'origin/master'] | 1462 '--grep=to %s' % version, 'origin/master'] |
1061 return_code = bisect_utils.CheckRunGit(cmd) | 1463 return_code = bisect_utils.CheckRunGit(cmd) |
1062 os.chdir(cwd) | 1464 os.chdir(cwd) |
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1178 if os.path.exists(os.path.join(abs_build_dir, 'out', build_type)): | 1580 if os.path.exists(os.path.join(abs_build_dir, 'out', build_type)): |
1179 output_dir = os.path.join(abs_build_dir, 'out', build_type) | 1581 output_dir = os.path.join(abs_build_dir, 'out', build_type) |
1180 else: | 1582 else: |
1181 raise IOError('Missing extracted folder %s ' % output_dir) | 1583 raise IOError('Missing extracted folder %s ' % output_dir) |
1182 | 1584 |
1183 print 'Moving build from %s to %s' % ( | 1585 print 'Moving build from %s to %s' % ( |
1184 output_dir, target_build_output_dir) | 1586 output_dir, target_build_output_dir) |
1185 shutil.move(output_dir, target_build_output_dir) | 1587 shutil.move(output_dir, target_build_output_dir) |
1186 return True | 1588 return True |
1187 except Exception as e: | 1589 except Exception as e: |
1188 print 'Somewthing went wrong while extracting archive file: %s' % e | 1590 print 'Something went wrong while extracting archive file: %s' % e |
1189 self.BackupOrRestoreOutputdirectory(restore=True) | 1591 self.BackupOrRestoreOutputdirectory(restore=True) |
1190 # Cleanup any leftovers from unzipping. | 1592 # Cleanup any leftovers from unzipping. |
1191 if os.path.exists(output_dir): | 1593 if os.path.exists(output_dir): |
1192 RmTreeAndMkDir(output_dir, skip_makedir=True) | 1594 RmTreeAndMkDir(output_dir, skip_makedir=True) |
1193 finally: | 1595 finally: |
1194 # Delete downloaded archive | 1596 # Delete downloaded archive |
1195 if os.path.exists(downloaded_file): | 1597 if os.path.exists(downloaded_file): |
1196 os.remove(downloaded_file) | 1598 os.remove(downloaded_file) |
1197 return False | 1599 return False |
1198 | 1600 |
1199 def WaitUntilBuildIsReady(self, fetch_build, bot_name, builder_host, | |
1200 builder_port, build_request_id, max_timeout): | |
1201 """Waits until build is produced by bisect builder on tryserver. | |
1202 | |
1203 Args: | |
1204 fetch_build: Function to check and download build from cloud storage. | |
1205 bot_name: Builder bot name on tryserver. | |
1206 builder_host Tryserver hostname. | |
1207 builder_port: Tryserver port. | |
1208 build_request_id: A unique ID of the build request posted to tryserver. | |
1209 max_timeout: Maximum time to wait for the build. | |
1210 | |
1211 Returns: | |
1212 Downloaded archive file path if exists, otherwise None. | |
1213 """ | |
1214 # Build number on the tryserver. | |
1215 build_num = None | |
1216 # Interval to check build on cloud storage. | |
1217 poll_interval = 60 | |
1218 # Interval to check build status on tryserver. | |
1219 status_check_interval = 600 | |
1220 last_status_check = time.time() | |
1221 start_time = time.time() | |
1222 while True: | |
1223 # Checks for build on gs://chrome-perf and download if exists. | |
1224 res = fetch_build() | |
1225 if res: | |
1226 return (res, 'Build successfully found') | |
1227 elapsed_status_check = time.time() - last_status_check | |
1228 # To avoid overloading tryserver with status check requests, we check | |
1229 # build status for every 10 mins. | |
1230 if elapsed_status_check > status_check_interval: | |
1231 last_status_check = time.time() | |
1232 if not build_num: | |
1233 # Get the build number on tryserver for the current build. | |
1234 build_num = bisect_builder.GetBuildNumFromBuilder( | |
1235 build_request_id, bot_name, builder_host, builder_port) | |
1236 # Check the status of build using the build number. | |
1237 # Note: Build is treated as PENDING if build number is not found | |
1238 # on the the tryserver. | |
1239 build_status, status_link = bisect_builder.GetBuildStatus( | |
1240 build_num, bot_name, builder_host, builder_port) | |
1241 if build_status == bisect_builder.FAILED: | |
1242 return (None, 'Failed to produce build, log: %s' % status_link) | |
1243 elapsed_time = time.time() - start_time | |
1244 if elapsed_time > max_timeout: | |
1245 return (None, 'Timed out: %ss without build' % max_timeout) | |
1246 | |
1247 print 'Time elapsed: %ss without build.' % elapsed_time | |
1248 time.sleep(poll_interval) | |
1249 # For some reason, mac bisect bots were not flushing stdout periodically. | |
1250 # As a result buildbot command is timed-out. Flush stdout on all platforms | |
1251 # while waiting for build. | |
1252 sys.stdout.flush() | |
1253 | |
1254 def PostBuildRequestAndWait(self, revision, fetch_build, patch=None): | 1601 def PostBuildRequestAndWait(self, revision, fetch_build, patch=None): |
1255 """POSTs the build request job to the tryserver instance. | 1602 """POSTs the build request job to the tryserver instance. |
1256 | 1603 |
1257 A try job build request is posted to tryserver.chromium.perf master, | 1604 A try job build request is posted to tryserver.chromium.perf master, |
1258 and waits for the binaries to be produced and archived on cloud storage. | 1605 and waits for the binaries to be produced and archived on cloud storage. |
1259 Once the build is ready and stored onto cloud, build archive is downloaded | 1606 Once the build is ready and stored onto cloud, build archive is downloaded |
1260 into the output folder. | 1607 into the output folder. |
1261 | 1608 |
1262 Args: | 1609 Args: |
1263 revision: A Git hash revision. | 1610 revision: A Git hash revision. |
1264 fetch_build: Function to check and download build from cloud storage. | 1611 fetch_build: Function to check and download build from cloud storage. |
1265 patch: A DEPS patch (used while bisecting 3rd party repositories). | 1612 patch: A DEPS patch (used while bisecting 3rd party repositories). |
1266 | 1613 |
1267 Returns: | 1614 Returns: |
1268 Downloaded archive file path when requested build exists and download is | 1615 Downloaded archive file path when requested build exists and download is |
1269 successful, otherwise None. | 1616 successful, otherwise None. |
1270 """ | 1617 """ |
1271 # Get SVN revision for the given SHA. | 1618 # Get SVN revision for the given SHA. |
1272 svn_revision = self.source_control.SVNFindRev(revision) | 1619 svn_revision = self.source_control.SVNFindRev(revision) |
1273 if not svn_revision: | 1620 if not svn_revision: |
1274 raise RuntimeError( | 1621 raise RuntimeError( |
1275 'Failed to determine SVN revision for %s' % revision) | 1622 'Failed to determine SVN revision for %s' % revision) |
1276 | 1623 |
1277 def GetBuilderNameAndBuildTime(target_platform, target_arch='ia32'): | 1624 def GetBuilderNameAndBuildTime(target_platform, target_arch='ia32'): |
1278 """Gets builder bot name and buildtime in seconds based on platform.""" | 1625 """Gets builder bot name and build time in seconds based on platform.""" |
1279 # Bot names should match the one listed in tryserver.chromium's | 1626 # Bot names should match the one listed in tryserver.chromium's |
1280 # master.cfg which produces builds for bisect. | 1627 # master.cfg which produces builds for bisect. |
1281 if bisect_utils.IsWindowsHost(): | 1628 if bisect_utils.IsWindowsHost(): |
1282 if bisect_utils.Is64BitWindows() and target_arch == 'x64': | 1629 if bisect_utils.Is64BitWindows() and target_arch == 'x64': |
1283 return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME) | 1630 return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME) |
1284 return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME) | 1631 return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME) |
1285 if bisect_utils.IsLinuxHost(): | 1632 if bisect_utils.IsLinuxHost(): |
1286 if target_platform == 'android': | 1633 if target_platform == 'android': |
1287 return ('android_perf_bisect_builder', MAX_LINUX_BUILD_TIME) | 1634 return ('android_perf_bisect_builder', MAX_LINUX_BUILD_TIME) |
1288 return ('linux_perf_bisect_builder', MAX_LINUX_BUILD_TIME) | 1635 return ('linux_perf_bisect_builder', MAX_LINUX_BUILD_TIME) |
1289 if bisect_utils.IsMacHost(): | 1636 if bisect_utils.IsMacHost(): |
1290 return ('mac_perf_bisect_builder', MAX_MAC_BUILD_TIME) | 1637 return ('mac_perf_bisect_builder', MAX_MAC_BUILD_TIME) |
1291 raise NotImplementedError('Unsupported Platform "%s".' % sys.platform) | 1638 raise NotImplementedError('Unsupported Platform "%s".' % sys.platform) |
1292 if not fetch_build: | 1639 if not fetch_build: |
1293 return False | 1640 return False |
1294 | 1641 |
1295 bot_name, build_timeout = GetBuilderNameAndBuildTime( | 1642 bot_name, build_timeout = GetBuilderNameAndBuildTime( |
1296 self.opts.target_platform, self.opts.target_arch) | 1643 self.opts.target_platform, self.opts.target_arch) |
1297 builder_host = self.opts.builder_host | 1644 builder_host = self.opts.builder_host |
1298 builder_port = self.opts.builder_port | 1645 builder_port = self.opts.builder_port |
1299 # Create a unique ID for each build request posted to tryserver builders. | 1646 # Create a unique ID for each build request posted to tryserver builders. |
1300 # This ID is added to "Reason" property in build's json. | 1647 # This ID is added to "Reason" property in build's json. |
1301 build_request_id = GetSHA1HexDigest( | 1648 build_request_id = GetSHA1HexDigest( |
1302 '%s-%s-%s' % (svn_revision, patch, time.time())) | 1649 '%s-%s-%s' % (svn_revision, patch, time.time())) |
1303 | 1650 |
1304 # Creates a try job description. | 1651 # Creates a try job description. |
1305 job_args = {'host': builder_host, | 1652 job_args = { |
1306 'port': builder_port, | 1653 'host': builder_host, |
1307 'revision': 'src@%s' % svn_revision, | 1654 'port': builder_port, |
1308 'bot': bot_name, | 1655 'revision': 'src@%s' % svn_revision, |
1309 'name': build_request_id | 1656 'bot': bot_name, |
1310 } | 1657 'name': build_request_id, |
| 1658 } |
1311 # Update patch information if supplied. | 1659 # Update patch information if supplied. |
1312 if patch: | 1660 if patch: |
1313 job_args['patch'] = patch | 1661 job_args['patch'] = patch |
1314 # Posts job to build the revision on the server. | 1662 # Posts job to build the revision on the server. |
1315 if bisect_builder.PostTryJob(job_args): | 1663 if bisect_builder.PostTryJob(job_args): |
1316 target_file, error_msg = self.WaitUntilBuildIsReady(fetch_build, | 1664 target_file, error_msg = _WaitUntilBuildIsReady( |
1317 bot_name, | 1665 fetch_build, bot_name, builder_host, builder_port, build_request_id, |
1318 builder_host, | 1666 build_timeout) |
1319 builder_port, | |
1320 build_request_id, | |
1321 build_timeout) | |
1322 if not target_file: | 1667 if not target_file: |
1323 print '%s [revision: %s]' % (error_msg, svn_revision) | 1668 print '%s [revision: %s]' % (error_msg, svn_revision) |
1324 return None | 1669 return None |
1325 return target_file | 1670 return target_file |
1326 print 'Failed to post build request for revision: [%s]' % svn_revision | 1671 print 'Failed to post build request for revision: [%s]' % svn_revision |
1327 return None | 1672 return None |
1328 | 1673 |
1329 def IsDownloadable(self, depot): | 1674 def IsDownloadable(self, depot): |
1330 """Checks if build is downloadable based on target platform and depot.""" | 1675 """Checks if build is downloadable based on target platform and depot.""" |
1331 if (self.opts.target_platform in ['chromium', 'android'] and | 1676 if (self.opts.target_platform in ['chromium', 'android'] and |
(...skipping 20 matching lines...) Expand all Loading... |
1352 """ | 1697 """ |
1353 if not os.path.exists(deps_file): | 1698 if not os.path.exists(deps_file): |
1354 return False | 1699 return False |
1355 | 1700 |
1356 deps_var = DEPOT_DEPS_NAME[depot]['deps_var'] | 1701 deps_var = DEPOT_DEPS_NAME[depot]['deps_var'] |
1357 # Don't update DEPS file if deps_var is not set in DEPOT_DEPS_NAME. | 1702 # Don't update DEPS file if deps_var is not set in DEPOT_DEPS_NAME. |
1358 if not deps_var: | 1703 if not deps_var: |
1359 print 'DEPS update not supported for Depot: %s', depot | 1704 print 'DEPS update not supported for Depot: %s', depot |
1360 return False | 1705 return False |
1361 | 1706 |
1362 # Hack to Angle repository because, in DEPS file "vars" dictionary variable | 1707 # Hack for Angle repository. In the DEPS file, "vars" dictionary variable |
1363 # contains "angle_revision" key that holds git hash instead of SVN revision. | 1708 # contains "angle_revision" key that holds git hash instead of SVN revision. |
1364 # And sometime "angle_revision" key is not specified in "vars" variable, | 1709 # And sometime "angle_revision" key is not specified in "vars" variable. |
1365 # in such cases check "deps" dictionary variable that matches | 1710 # In such cases check, "deps" dictionary variable that matches |
1366 # angle.git@[a-fA-F0-9]{40}$ and replace git hash. | 1711 # angle.git@[a-fA-F0-9]{40}$ and replace git hash. |
1367 if depot == 'angle': | 1712 if depot == 'angle': |
1368 return self.UpdateDEPSForAngle(revision, depot, deps_file) | 1713 return _UpdateDEPSForAngle(revision, depot, deps_file) |
1369 | 1714 |
1370 try: | 1715 try: |
1371 deps_contents = ReadStringFromFile(deps_file) | 1716 deps_contents = ReadStringFromFile(deps_file) |
1372 # Check whether the depot and revision pattern in DEPS file vars | 1717 # Check whether the depot and revision pattern in DEPS file vars |
1373 # e.g. for webkit the format is "webkit_revision": "12345". | 1718 # e.g. for webkit the format is "webkit_revision": "12345". |
1374 deps_revision = re.compile(r'(?<="%s": ")([0-9]+)(?=")' % deps_var, | 1719 deps_revision = re.compile(r'(?<="%s": ")([0-9]+)(?=")' % deps_var, |
1375 re.MULTILINE) | 1720 re.MULTILINE) |
1376 match = re.search(deps_revision, deps_contents) | 1721 match = re.search(deps_revision, deps_contents) |
1377 if match: | 1722 if match: |
1378 svn_revision = self.source_control.SVNFindRev( | 1723 svn_revision = self.source_control.SVNFindRev( |
1379 revision, self._GetDepotDirectory(depot)) | 1724 revision, self._GetDepotDirectory(depot)) |
1380 if not svn_revision: | 1725 if not svn_revision: |
1381 print 'Could not determine SVN revision for %s' % revision | 1726 print 'Could not determine SVN revision for %s' % revision |
1382 return False | 1727 return False |
1383 # Update the revision information for the given depot | 1728 # Update the revision information for the given depot |
1384 new_data = re.sub(deps_revision, str(svn_revision), deps_contents) | 1729 new_data = re.sub(deps_revision, str(svn_revision), deps_contents) |
1385 | 1730 |
1386 # For v8_bleeding_edge revisions change V8 branch in order | 1731 # For v8_bleeding_edge revisions change V8 branch in order |
1387 # to fetch bleeding edge revision. | 1732 # to fetch bleeding edge revision. |
1388 if depot == 'v8_bleeding_edge': | 1733 if depot == 'v8_bleeding_edge': |
1389 new_data = self.UpdateV8Branch(new_data) | 1734 new_data = _UpdateV8Branch(new_data) |
1390 if not new_data: | 1735 if not new_data: |
1391 return False | 1736 return False |
1392 # Write changes to DEPS file | 1737 # Write changes to DEPS file |
1393 WriteStringToFile(new_data, deps_file) | 1738 WriteStringToFile(new_data, deps_file) |
1394 return True | 1739 return True |
1395 except IOError, e: | 1740 except IOError, e: |
1396 print 'Something went wrong while updating DEPS file. [%s]' % e | 1741 print 'Something went wrong while updating DEPS file. [%s]' % e |
1397 return False | 1742 return False |
1398 | 1743 |
1399 def UpdateV8Branch(self, deps_content): | |
1400 """Updates V8 branch in DEPS file to process v8_bleeding_edge. | |
1401 | |
1402 Check for "v8_branch" in DEPS file if exists update its value | |
1403 with v8_bleeding_edge branch. Note: "v8_branch" is added to DEPS | |
1404 variable from DEPS revision 254916, therefore check for "src/v8": | |
1405 <v8 source path> in DEPS in order to support prior DEPS revisions | |
1406 and update it. | |
1407 | |
1408 Args: | |
1409 deps_content: DEPS file contents to be modified. | |
1410 | |
1411 Returns: | |
1412 Modified DEPS file contents as a string. | |
1413 """ | |
1414 new_branch = r'branches/bleeding_edge' | |
1415 v8_branch_pattern = re.compile(r'(?<="v8_branch": ")(.*)(?=")') | |
1416 if re.search(v8_branch_pattern, deps_content): | |
1417 deps_content = re.sub(v8_branch_pattern, new_branch, deps_content) | |
1418 else: | |
1419 # Replaces the branch assigned to "src/v8" key in DEPS file. | |
1420 # Format of "src/v8" in DEPS: | |
1421 # "src/v8": | |
1422 # (Var("googlecode_url") % "v8") + "/trunk@" + Var("v8_revision"), | |
1423 # So, "/trunk@" is replace with "/branches/bleeding_edge@" | |
1424 v8_src_pattern = re.compile( | |
1425 r'(?<="v8"\) \+ "/)(.*)(?=@" \+ Var\("v8_revision"\))', re.MULTILINE) | |
1426 if re.search(v8_src_pattern, deps_content): | |
1427 deps_content = re.sub(v8_src_pattern, new_branch, deps_content) | |
1428 return deps_content | |
1429 | |
1430 def UpdateDEPSForAngle(self, revision, depot, deps_file): | |
1431 """Updates DEPS file with new revision for Angle repository. | |
1432 | |
1433 This is a hack for Angle depot case because, in DEPS file "vars" dictionary | |
1434 variable contains "angle_revision" key that holds git hash instead of | |
1435 SVN revision. | |
1436 | |
1437 And sometimes "angle_revision" key is not specified in "vars" variable, | |
1438 in such cases check "deps" dictionary variable that matches | |
1439 angle.git@[a-fA-F0-9]{40}$ and replace git hash. | |
1440 """ | |
1441 deps_var = DEPOT_DEPS_NAME[depot]['deps_var'] | |
1442 try: | |
1443 deps_contents = ReadStringFromFile(deps_file) | |
1444 # Check whether the depot and revision pattern in DEPS file vars variable | |
1445 # e.g. "angle_revision": "fa63e947cb3eccf463648d21a05d5002c9b8adfa". | |
1446 angle_rev_pattern = re.compile(r'(?<="%s": ")([a-fA-F0-9]{40})(?=")' % | |
1447 deps_var, re.MULTILINE) | |
1448 match = re.search(angle_rev_pattern % deps_var, deps_contents) | |
1449 if match: | |
1450 # Update the revision information for the given depot | |
1451 new_data = re.sub(angle_rev_pattern, revision, deps_contents) | |
1452 else: | |
1453 # Check whether the depot and revision pattern in DEPS file deps | |
1454 # variable. e.g., | |
1455 # "src/third_party/angle": Var("chromium_git") + | |
1456 # "/angle/angle.git@fa63e947cb3eccf463648d21a05d5002c9b8adfa",. | |
1457 angle_rev_pattern = re.compile( | |
1458 r'(?<=angle\.git@)([a-fA-F0-9]{40})(?=")', re.MULTILINE) | |
1459 match = re.search(angle_rev_pattern, deps_contents) | |
1460 if not match: | |
1461 print 'Could not find angle revision information in DEPS file.' | |
1462 return False | |
1463 new_data = re.sub(angle_rev_pattern, revision, deps_contents) | |
1464 # Write changes to DEPS file | |
1465 WriteStringToFile(new_data, deps_file) | |
1466 return True | |
1467 except IOError, e: | |
1468 print 'Something went wrong while updating DEPS file, %s' % e | |
1469 return False | |
1470 | |
1471 def CreateDEPSPatch(self, depot, revision): | 1744 def CreateDEPSPatch(self, depot, revision): |
1472 """Modifies DEPS and returns diff as text. | 1745 """Modifies DEPS and returns diff as text. |
1473 | 1746 |
1474 Args: | 1747 Args: |
1475 depot: Current depot being bisected. | 1748 depot: Current depot being bisected. |
1476 revision: A git hash revision of the dependency repository. | 1749 revision: A git hash revision of the dependency repository. |
1477 | 1750 |
1478 Returns: | 1751 Returns: |
1479 A tuple with git hash of chromium revision and DEPS patch text. | 1752 A tuple with git hash of chromium revision and DEPS patch text. |
1480 """ | 1753 """ |
1481 deps_file_path = os.path.join(self.src_cwd, bisect_utils.FILE_DEPS) | 1754 deps_file_path = os.path.join(self.src_cwd, bisect_utils.FILE_DEPS) |
1482 if not os.path.exists(deps_file_path): | 1755 if not os.path.exists(deps_file_path): |
1483 raise RuntimeError('DEPS file does not exists.[%s]' % deps_file_path) | 1756 raise RuntimeError('DEPS file does not exists.[%s]' % deps_file_path) |
1484 # Get current chromium revision (git hash). | 1757 # Get current chromium revision (git hash). |
1485 cmd = ['rev-parse', 'HEAD'] | 1758 cmd = ['rev-parse', 'HEAD'] |
1486 chromium_sha = bisect_utils.CheckRunGit(cmd).strip() | 1759 chromium_sha = bisect_utils.CheckRunGit(cmd).strip() |
1487 if not chromium_sha: | 1760 if not chromium_sha: |
1488 raise RuntimeError('Failed to determine Chromium revision for %s' % | 1761 raise RuntimeError('Failed to determine Chromium revision for %s' % |
1489 revision) | 1762 revision) |
1490 if ('chromium' in DEPOT_DEPS_NAME[depot]['from'] or | 1763 if ('chromium' in DEPOT_DEPS_NAME[depot]['from'] or |
1491 'v8' in DEPOT_DEPS_NAME[depot]['from']): | 1764 'v8' in DEPOT_DEPS_NAME[depot]['from']): |
1492 # Checkout DEPS file for the current chromium revision. | 1765 # Checkout DEPS file for the current chromium revision. |
1493 if self.source_control.CheckoutFileAtRevision(bisect_utils.FILE_DEPS, | 1766 if self.source_control.CheckoutFileAtRevision( |
1494 chromium_sha, | 1767 bisect_utils.FILE_DEPS, chromium_sha, cwd=self.src_cwd): |
1495 cwd=self.src_cwd): | |
1496 if self.UpdateDeps(revision, depot, deps_file_path): | 1768 if self.UpdateDeps(revision, depot, deps_file_path): |
1497 diff_command = ['diff', | 1769 diff_command = [ |
1498 '--src-prefix=src/', | 1770 'diff', |
1499 '--dst-prefix=src/', | 1771 '--src-prefix=src/', |
1500 '--no-ext-diff', | 1772 '--dst-prefix=src/', |
1501 bisect_utils.FILE_DEPS] | 1773 '--no-ext-diff', |
1502 diff_text = bisect_utils.CheckRunGit( | 1774 bisect_utils.FILE_DEPS, |
1503 diff_command, cwd=self.src_cwd) | 1775 ] |
| 1776 diff_text = bisect_utils.CheckRunGit(diff_command, cwd=self.src_cwd) |
1504 return (chromium_sha, ChangeBackslashToSlashInPatch(diff_text)) | 1777 return (chromium_sha, ChangeBackslashToSlashInPatch(diff_text)) |
1505 else: | 1778 else: |
1506 raise RuntimeError('Failed to update DEPS file for chromium: [%s]' % | 1779 raise RuntimeError( |
1507 chromium_sha) | 1780 'Failed to update DEPS file for chromium: [%s]' % chromium_sha) |
1508 else: | 1781 else: |
1509 raise RuntimeError('DEPS checkout Failed for chromium revision : [%s]' % | 1782 raise RuntimeError( |
1510 chromium_sha) | 1783 'DEPS checkout Failed for chromium revision : [%s]' % chromium_sha) |
1511 return (None, None) | 1784 return (None, None) |
1512 | 1785 |
1513 def BuildCurrentRevision(self, depot, revision=None): | 1786 def BuildCurrentRevision(self, depot, revision=None): |
1514 """Builds chrome and performance_ui_tests on the current revision. | 1787 """Builds chrome and performance_ui_tests on the current revision. |
1515 | 1788 |
1516 Returns: | 1789 Returns: |
1517 True if the build was successful. | 1790 True if the build was successful. |
1518 """ | 1791 """ |
1519 if self.opts.debug_ignore_build: | 1792 if self.opts.debug_ignore_build: |
1520 return True | 1793 return True |
1521 cwd = os.getcwd() | 1794 cwd = os.getcwd() |
1522 os.chdir(self.src_cwd) | 1795 os.chdir(self.src_cwd) |
1523 # Fetch build archive for the given revision from the cloud storage when | 1796 # Fetch build archive for the given revision from the cloud storage when |
1524 # the storage bucket is passed. | 1797 # the storage bucket is passed. |
1525 if self.IsDownloadable(depot) and revision: | 1798 if self.IsDownloadable(depot) and revision: |
1526 deps_patch = None | 1799 deps_patch = None |
1527 if depot != 'chromium': | 1800 if depot != 'chromium': |
1528 # Create a DEPS patch with new revision for dependency repository. | 1801 # Create a DEPS patch with new revision for dependency repository. |
1529 (revision, deps_patch) = self.CreateDEPSPatch(depot, revision) | 1802 revision, deps_patch = self.CreateDEPSPatch(depot, revision) |
1530 if self.DownloadCurrentBuild(revision, patch=deps_patch): | 1803 if self.DownloadCurrentBuild(revision, patch=deps_patch): |
1531 os.chdir(cwd) | 1804 os.chdir(cwd) |
1532 if deps_patch: | 1805 if deps_patch: |
1533 # Reverts the changes to DEPS file. | 1806 # Reverts the changes to DEPS file. |
1534 self.source_control.CheckoutFileAtRevision(bisect_utils.FILE_DEPS, | 1807 self.source_control.CheckoutFileAtRevision( |
1535 revision, | 1808 bisect_utils.FILE_DEPS, revision, cwd=self.src_cwd) |
1536 cwd=self.src_cwd) | |
1537 return True | 1809 return True |
1538 return False | 1810 return False |
1539 | 1811 |
1540 # These codes are executed when bisect bots builds binaries locally. | 1812 # These codes are executed when bisect bots builds binaries locally. |
1541 build_success = self.builder.Build(depot, self.opts) | 1813 build_success = self.builder.Build(depot, self.opts) |
1542 os.chdir(cwd) | 1814 os.chdir(cwd) |
1543 return build_success | 1815 return build_success |
1544 | 1816 |
1545 def RunGClientHooks(self): | 1817 def RunGClientHooks(self): |
1546 """Runs gclient with runhooks command. | 1818 """Runs gclient with runhooks command. |
1547 | 1819 |
1548 Returns: | 1820 Returns: |
1549 True if gclient reports no errors. | 1821 True if gclient reports no errors. |
1550 """ | 1822 """ |
1551 | |
1552 if self.opts.debug_ignore_build: | 1823 if self.opts.debug_ignore_build: |
1553 return True | 1824 return True |
1554 | |
1555 return not bisect_utils.RunGClient(['runhooks'], cwd=self.src_cwd) | 1825 return not bisect_utils.RunGClient(['runhooks'], cwd=self.src_cwd) |
1556 | 1826 |
1557 def TryParseHistogramValuesFromOutput(self, metric, text): | |
1558 """Attempts to parse a metric in the format HISTOGRAM <graph: <trace>. | |
1559 | |
1560 Args: | |
1561 metric: The metric as a list of [<trace>, <value>] strings. | |
1562 text: The text to parse the metric values from. | |
1563 | |
1564 Returns: | |
1565 A list of floating point numbers found. | |
1566 """ | |
1567 metric_formatted = 'HISTOGRAM %s: %s= ' % (metric[0], metric[1]) | |
1568 | |
1569 text_lines = text.split('\n') | |
1570 values_list = [] | |
1571 | |
1572 for current_line in text_lines: | |
1573 if metric_formatted in current_line: | |
1574 current_line = current_line[len(metric_formatted):] | |
1575 | |
1576 try: | |
1577 histogram_values = eval(current_line) | |
1578 | |
1579 for b in histogram_values['buckets']: | |
1580 average_for_bucket = float(b['high'] + b['low']) * 0.5 | |
1581 # Extends the list with N-elements with the average for that bucket. | |
1582 values_list.extend([average_for_bucket] * b['count']) | |
1583 except: | |
1584 pass | |
1585 | |
1586 return values_list | |
1587 | |
1588 def TryParseResultValuesFromOutput(self, metric, text): | |
1589 """Attempts to parse a metric in the format RESULT <graph>: <trace>= ... | |
1590 | |
1591 Args: | |
1592 metric: The metric as a list of [<trace>, <value>] strings. | |
1593 text: The text to parse the metric values from. | |
1594 | |
1595 Returns: | |
1596 A list of floating point numbers found. | |
1597 """ | |
1598 # Format is: RESULT <graph>: <trace>= <value> <units> | |
1599 metric_re = re.escape('RESULT %s: %s=' % (metric[0], metric[1])) | |
1600 | |
1601 # The log will be parsed looking for format: | |
1602 # <*>RESULT <graph_name>: <trace_name>= <value> | |
1603 single_result_re = re.compile( | |
1604 metric_re + '\s*(?P<VALUE>[-]?\d*(\.\d*)?)') | |
1605 | |
1606 # The log will be parsed looking for format: | |
1607 # <*>RESULT <graph_name>: <trace_name>= [<value>,value,value,...] | |
1608 multi_results_re = re.compile( | |
1609 metric_re + '\s*\[\s*(?P<VALUES>[-]?[\d\., ]+)\s*\]') | |
1610 | |
1611 # The log will be parsed looking for format: | |
1612 # <*>RESULT <graph_name>: <trace_name>= {<mean>, <std deviation>} | |
1613 mean_stddev_re = re.compile( | |
1614 metric_re + | |
1615 '\s*\{\s*(?P<MEAN>[-]?\d*(\.\d*)?),\s*(?P<STDDEV>\d+(\.\d*)?)\s*\}') | |
1616 | |
1617 text_lines = text.split('\n') | |
1618 values_list = [] | |
1619 for current_line in text_lines: | |
1620 # Parse the output from the performance test for the metric we're | |
1621 # interested in. | |
1622 single_result_match = single_result_re.search(current_line) | |
1623 multi_results_match = multi_results_re.search(current_line) | |
1624 mean_stddev_match = mean_stddev_re.search(current_line) | |
1625 if (not single_result_match is None and | |
1626 single_result_match.group('VALUE')): | |
1627 values_list += [single_result_match.group('VALUE')] | |
1628 elif (not multi_results_match is None and | |
1629 multi_results_match.group('VALUES')): | |
1630 metric_values = multi_results_match.group('VALUES') | |
1631 values_list += metric_values.split(',') | |
1632 elif (not mean_stddev_match is None and | |
1633 mean_stddev_match.group('MEAN')): | |
1634 values_list += [mean_stddev_match.group('MEAN')] | |
1635 | |
1636 values_list = [float(v) for v in values_list | |
1637 if bisect_utils.IsStringFloat(v)] | |
1638 | |
1639 # If the metric is times/t, we need to sum the timings in order to get | |
1640 # similar regression results as the try-bots. | |
1641 metrics_to_sum = [['times', 't'], ['times', 'page_load_time'], | |
1642 ['cold_times', 'page_load_time'], ['warm_times', 'page_load_time']] | |
1643 | |
1644 if metric in metrics_to_sum: | |
1645 if values_list: | |
1646 values_list = [reduce(lambda x, y: float(x) + float(y), values_list)] | |
1647 | |
1648 return values_list | |
1649 | |
1650 def ParseMetricValuesFromOutput(self, metric, text): | |
1651 """Parses output from performance_ui_tests and retrieves the results for | |
1652 a given metric. | |
1653 | |
1654 Args: | |
1655 metric: The metric as a list of [<trace>, <value>] strings. | |
1656 text: The text to parse the metric values from. | |
1657 | |
1658 Returns: | |
1659 A list of floating point numbers found. | |
1660 """ | |
1661 metric_values = self.TryParseResultValuesFromOutput(metric, text) | |
1662 | |
1663 if not metric_values: | |
1664 metric_values = self.TryParseHistogramValuesFromOutput(metric, text) | |
1665 | |
1666 return metric_values | |
1667 | |
1668 def _GenerateProfileIfNecessary(self, command_args): | |
1669 """Checks the command line of the performance test for dependencies on | |
1670 profile generation, and runs tools/perf/generate_profile as necessary. | |
1671 | |
1672 Args: | |
1673 command_args: Command line being passed to performance test, as a list. | |
1674 | |
1675 Returns: | |
1676 False if profile generation was necessary and failed, otherwise True. | |
1677 """ | |
1678 | |
1679 if '--profile-dir' in ' '.join(command_args): | |
1680 # If we were using python 2.7+, we could just use the argparse | |
1681 # module's parse_known_args to grab --profile-dir. Since some of the | |
1682 # bots still run 2.6, have to grab the arguments manually. | |
1683 arg_dict = {} | |
1684 args_to_parse = ['--profile-dir', '--browser'] | |
1685 | |
1686 for arg_to_parse in args_to_parse: | |
1687 for i, current_arg in enumerate(command_args): | |
1688 if arg_to_parse in current_arg: | |
1689 current_arg_split = current_arg.split('=') | |
1690 | |
1691 # Check 2 cases, --arg=<val> and --arg <val> | |
1692 if len(current_arg_split) == 2: | |
1693 arg_dict[arg_to_parse] = current_arg_split[1] | |
1694 elif i + 1 < len(command_args): | |
1695 arg_dict[arg_to_parse] = command_args[i+1] | |
1696 | |
1697 path_to_generate = os.path.join('tools', 'perf', 'generate_profile') | |
1698 | |
1699 if arg_dict.has_key('--profile-dir') and arg_dict.has_key('--browser'): | |
1700 profile_path, profile_type = os.path.split(arg_dict['--profile-dir']) | |
1701 return not bisect_utils.RunProcess(['python', path_to_generate, | |
1702 '--profile-type-to-generate', profile_type, | |
1703 '--browser', arg_dict['--browser'], '--output-dir', profile_path]) | |
1704 return False | |
1705 return True | |
1706 | |
1707 def _IsBisectModeUsingMetric(self): | 1827 def _IsBisectModeUsingMetric(self): |
1708 return self.opts.bisect_mode in [BISECT_MODE_MEAN, BISECT_MODE_STD_DEV] | 1828 return self.opts.bisect_mode in [BISECT_MODE_MEAN, BISECT_MODE_STD_DEV] |
1709 | 1829 |
1710 def _IsBisectModeReturnCode(self): | 1830 def _IsBisectModeReturnCode(self): |
1711 return self.opts.bisect_mode in [BISECT_MODE_RETURN_CODE] | 1831 return self.opts.bisect_mode in [BISECT_MODE_RETURN_CODE] |
1712 | 1832 |
1713 def _IsBisectModeStandardDeviation(self): | 1833 def _IsBisectModeStandardDeviation(self): |
1714 return self.opts.bisect_mode in [BISECT_MODE_STD_DEV] | 1834 return self.opts.bisect_mode in [BISECT_MODE_STD_DEV] |
1715 | 1835 |
1716 def GetCompatibleCommand(self, command_to_run, revision, depot): | 1836 def GetCompatibleCommand(self, command_to_run, revision, depot): |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1770 'std_dev': 0.0, | 1890 'std_dev': 0.0, |
1771 'values': [0.0] | 1891 'values': [0.0] |
1772 } | 1892 } |
1773 return (fake_results, success_code) | 1893 return (fake_results, success_code) |
1774 | 1894 |
1775 # For Windows platform set posix=False, to parse windows paths correctly. | 1895 # For Windows platform set posix=False, to parse windows paths correctly. |
1776 # On Windows, path separators '\' or '\\' are replace by '' when posix=True, | 1896 # On Windows, path separators '\' or '\\' are replace by '' when posix=True, |
1777 # refer to http://bugs.python.org/issue1724822. By default posix=True. | 1897 # refer to http://bugs.python.org/issue1724822. By default posix=True. |
1778 args = shlex.split(command_to_run, posix=not bisect_utils.IsWindowsHost()) | 1898 args = shlex.split(command_to_run, posix=not bisect_utils.IsWindowsHost()) |
1779 | 1899 |
1780 if not self._GenerateProfileIfNecessary(args): | 1900 if not _GenerateProfileIfNecessary(args): |
1781 err_text = 'Failed to generate profile for performance test.' | 1901 err_text = 'Failed to generate profile for performance test.' |
1782 return (err_text, failure_code) | 1902 return (err_text, failure_code) |
1783 | 1903 |
1784 # If running a Telemetry test for Chrome OS, insert the remote IP and | 1904 # If running a Telemetry test for Chrome OS, insert the remote IP and |
1785 # identity parameters. | 1905 # identity parameters. |
1786 is_telemetry = bisect_utils.IsTelemetryCommand(command_to_run) | 1906 is_telemetry = bisect_utils.IsTelemetryCommand(command_to_run) |
1787 if self.opts.target_platform == 'cros' and is_telemetry: | 1907 if self.opts.target_platform == 'cros' and is_telemetry: |
1788 args.append('--remote=%s' % self.opts.cros_remote_ip) | 1908 args.append('--remote=%s' % self.opts.cros_remote_ip) |
1789 args.append('--identity=%s' % CROS_TEST_KEY_PATH) | 1909 args.append('--identity=%s' % CROS_TEST_KEY_PATH) |
1790 | 1910 |
1791 start_time = time.time() | 1911 start_time = time.time() |
1792 | 1912 |
1793 metric_values = [] | 1913 metric_values = [] |
1794 output_of_all_runs = '' | 1914 output_of_all_runs = '' |
1795 for i in xrange(self.opts.repeat_test_count): | 1915 for i in xrange(self.opts.repeat_test_count): |
1796 # Can ignore the return code since if the tests fail, it won't return 0. | 1916 # Can ignore the return code since if the tests fail, it won't return 0. |
1797 current_args = copy.copy(args) | 1917 current_args = copy.copy(args) |
1798 if is_telemetry: | 1918 if is_telemetry: |
1799 if i == 0 and reset_on_first_run: | 1919 if i == 0 and reset_on_first_run: |
1800 current_args.append('--reset-results') | 1920 current_args.append('--reset-results') |
1801 elif i == self.opts.repeat_test_count - 1 and upload_on_last_run: | 1921 elif i == self.opts.repeat_test_count - 1 and upload_on_last_run: |
1802 current_args.append('--upload-results') | 1922 current_args.append('--upload-results') |
1803 if results_label: | 1923 if results_label: |
1804 current_args.append('--results-label=%s' % results_label) | 1924 current_args.append('--results-label=%s' % results_label) |
1805 try: | 1925 try: |
1806 (output, return_code) = bisect_utils.RunProcessAndRetrieveOutput( | 1926 output, return_code = bisect_utils.RunProcessAndRetrieveOutput( |
1807 current_args, cwd=self.src_cwd) | 1927 current_args, cwd=self.src_cwd) |
1808 except OSError, e: | 1928 except OSError, e: |
1809 if e.errno == errno.ENOENT: | 1929 if e.errno == errno.ENOENT: |
1810 err_text = ('Something went wrong running the performance test. ' | 1930 err_text = ('Something went wrong running the performance test. ' |
1811 'Please review the command line:\n\n') | 1931 'Please review the command line:\n\n') |
1812 if 'src/' in ' '.join(args): | 1932 if 'src/' in ' '.join(args): |
1813 err_text += ('Check that you haven\'t accidentally specified a ' | 1933 err_text += ('Check that you haven\'t accidentally specified a ' |
1814 'path with src/ in the command.\n\n') | 1934 'path with src/ in the command.\n\n') |
1815 err_text += ' '.join(args) | 1935 err_text += ' '.join(args) |
1816 err_text += '\n' | 1936 err_text += '\n' |
1817 | 1937 |
1818 return (err_text, failure_code) | 1938 return (err_text, failure_code) |
1819 raise | 1939 raise |
1820 | 1940 |
1821 output_of_all_runs += output | 1941 output_of_all_runs += output |
1822 if self.opts.output_buildbot_annotations: | 1942 if self.opts.output_buildbot_annotations: |
1823 print output | 1943 print output |
1824 | 1944 |
1825 if self._IsBisectModeUsingMetric(): | 1945 if self._IsBisectModeUsingMetric(): |
1826 metric_values += self.ParseMetricValuesFromOutput(metric, output) | 1946 metric_values += _ParseMetricValuesFromOutput(metric, output) |
1827 # If we're bisecting on a metric (ie, changes in the mean or | 1947 # If we're bisecting on a metric (ie, changes in the mean or |
1828 # standard deviation) and no metric values are produced, bail out. | 1948 # standard deviation) and no metric values are produced, bail out. |
1829 if not metric_values: | 1949 if not metric_values: |
1830 break | 1950 break |
1831 elif self._IsBisectModeReturnCode(): | 1951 elif self._IsBisectModeReturnCode(): |
1832 metric_values.append(return_code) | 1952 metric_values.append(return_code) |
1833 | 1953 |
1834 elapsed_minutes = (time.time() - start_time) / 60.0 | 1954 elapsed_minutes = (time.time() - start_time) / 60.0 |
1835 if elapsed_minutes >= self.opts.max_time_minutes: | 1955 if elapsed_minutes >= self.opts.max_time_minutes: |
1836 break | 1956 break |
1837 | 1957 |
1838 if len(metric_values) == 0: | 1958 if len(metric_values) == 0: |
1839 err_text = 'Metric %s was not found in the test output.' % metric | 1959 err_text = 'Metric %s was not found in the test output.' % metric |
1840 # TODO(qyearsley): Consider also getting and displaying a list of metrics | 1960 # TODO(qyearsley): Consider also getting and displaying a list of metrics |
1841 # that were found in the output here. | 1961 # that were found in the output here. |
1842 return (err_text, failure_code, output_of_all_runs) | 1962 return (err_text, failure_code, output_of_all_runs) |
1843 | 1963 |
1844 # If we're bisecting on return codes, we're really just looking for zero vs | 1964 # If we're bisecting on return codes, we're really just looking for zero vs |
1845 # non-zero. | 1965 # non-zero. |
1846 if self._IsBisectModeReturnCode(): | 1966 if self._IsBisectModeReturnCode(): |
1847 # If any of the return codes is non-zero, output 1. | 1967 # If any of the return codes is non-zero, output 1. |
1848 overall_return_code = 0 if ( | 1968 overall_return_code = 0 if ( |
1849 all(current_value == 0 for current_value in metric_values)) else 1 | 1969 all(current_value == 0 for current_value in metric_values)) else 1 |
1850 | 1970 |
1851 values = { | 1971 values = { |
1852 'mean': overall_return_code, | 1972 'mean': overall_return_code, |
1853 'std_err': 0.0, | 1973 'std_err': 0.0, |
1854 'std_dev': 0.0, | 1974 'std_dev': 0.0, |
1855 'values': metric_values, | 1975 'values': metric_values, |
1856 } | 1976 } |
1857 | 1977 |
1858 print 'Results of performance test: Command returned with %d' % ( | 1978 print 'Results of performance test: Command returned with %d' % ( |
1859 overall_return_code) | 1979 overall_return_code) |
1860 print | 1980 print |
1861 else: | 1981 else: |
1862 # Need to get the average value if there were multiple values. | 1982 # Need to get the average value if there were multiple values. |
1863 truncated_mean = math_utils.TruncatedMean( | 1983 truncated_mean = math_utils.TruncatedMean( |
1864 metric_values, self.opts.truncate_percent) | 1984 metric_values, self.opts.truncate_percent) |
1865 standard_err = math_utils.StandardError(metric_values) | 1985 standard_err = math_utils.StandardError(metric_values) |
1866 standard_dev = math_utils.StandardDeviation(metric_values) | 1986 standard_dev = math_utils.StandardDeviation(metric_values) |
1867 | 1987 |
1868 if self._IsBisectModeStandardDeviation(): | 1988 if self._IsBisectModeStandardDeviation(): |
1869 metric_values = [standard_dev] | 1989 metric_values = [standard_dev] |
1870 | 1990 |
1871 values = { | 1991 values = { |
1872 'mean': truncated_mean, | 1992 'mean': truncated_mean, |
1873 'std_err': standard_err, | 1993 'std_err': standard_err, |
1874 'std_dev': standard_dev, | 1994 'std_dev': standard_dev, |
1875 'values': metric_values, | 1995 'values': metric_values, |
1876 } | 1996 } |
1877 | 1997 |
1878 print 'Results of performance test: %12f %12f' % ( | 1998 print 'Results of performance test: %12f %12f' % ( |
1879 truncated_mean, standard_err) | 1999 truncated_mean, standard_err) |
1880 print | 2000 print |
1881 return (values, success_code, output_of_all_runs) | 2001 return (values, success_code, output_of_all_runs) |
1882 | 2002 |
1883 def FindAllRevisionsToSync(self, revision, depot): | 2003 def FindAllRevisionsToSync(self, revision, depot): |
1884 """Finds all dependant revisions and depots that need to be synced for a | 2004 """Finds all dependant revisions and depots that need to be synced for a |
1885 given revision. This is only useful in the git workflow, as an svn depot | 2005 given revision. This is only useful in the git workflow, as an svn depot |
(...skipping 12 matching lines...) Expand all Loading... |
1898 """ | 2018 """ |
1899 revisions_to_sync = [[depot, revision]] | 2019 revisions_to_sync = [[depot, revision]] |
1900 | 2020 |
1901 is_base = ((depot == 'chromium') or (depot == 'cros') or | 2021 is_base = ((depot == 'chromium') or (depot == 'cros') or |
1902 (depot == 'android-chrome')) | 2022 (depot == 'android-chrome')) |
1903 | 2023 |
1904 # Some SVN depots were split into multiple git depots, so we need to | 2024 # Some SVN depots were split into multiple git depots, so we need to |
1905 # figure out for each mirror which git revision to grab. There's no | 2025 # figure out for each mirror which git revision to grab. There's no |
1906 # guarantee that the SVN revision will exist for each of the dependant | 2026 # guarantee that the SVN revision will exist for each of the dependant |
1907 # depots, so we have to grep the git logs and grab the next earlier one. | 2027 # depots, so we have to grep the git logs and grab the next earlier one. |
1908 if not is_base and\ | 2028 if (not is_base |
1909 DEPOT_DEPS_NAME[depot]['depends'] and\ | 2029 and DEPOT_DEPS_NAME[depot]['depends'] |
1910 self.source_control.IsGit(): | 2030 and self.source_control.IsGit()): |
1911 svn_rev = self.source_control.SVNFindRev(revision) | 2031 svn_rev = self.source_control.SVNFindRev(revision) |
1912 | 2032 |
1913 for d in DEPOT_DEPS_NAME[depot]['depends']: | 2033 for d in DEPOT_DEPS_NAME[depot]['depends']: |
1914 self.ChangeToDepotWorkingDirectory(d) | 2034 self.ChangeToDepotWorkingDirectory(d) |
1915 | 2035 |
1916 dependant_rev = self.source_control.ResolveToRevision( | 2036 dependant_rev = self.source_control.ResolveToRevision( |
1917 svn_rev, d, DEPOT_DEPS_NAME, -1000) | 2037 svn_rev, d, DEPOT_DEPS_NAME, -1000) |
1918 | 2038 |
1919 if dependant_rev: | 2039 if dependant_rev: |
1920 revisions_to_sync.append([d, dependant_rev]) | 2040 revisions_to_sync.append([d, dependant_rev]) |
(...skipping 135 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2056 output = bisect_utils.CheckRunGit(cmd) | 2176 output = bisect_utils.CheckRunGit(cmd) |
2057 | 2177 |
2058 files = output.splitlines() | 2178 files = output.splitlines() |
2059 | 2179 |
2060 if len(files) == 1 and files[0] == 'DEPS': | 2180 if len(files) == 1 and files[0] == 'DEPS': |
2061 return True | 2181 return True |
2062 | 2182 |
2063 return False | 2183 return False |
2064 | 2184 |
2065 def SyncBuildAndRunRevision(self, revision, depot, command_to_run, metric, | 2185 def SyncBuildAndRunRevision(self, revision, depot, command_to_run, metric, |
2066 skippable=False): | 2186 skippable=False): |
2067 """Performs a full sync/build/run of the specified revision. | 2187 """Performs a full sync/build/run of the specified revision. |
2068 | 2188 |
2069 Args: | 2189 Args: |
2070 revision: The revision to sync to. | 2190 revision: The revision to sync to. |
2071 depot: The depot that's being used at the moment (src, webkit, etc.) | 2191 depot: The depot that's being used at the moment (src, webkit, etc.) |
2072 command_to_run: The command to execute the performance test. | 2192 command_to_run: The command to execute the performance test. |
2073 metric: The performance metric being tested. | 2193 metric: The performance metric being tested. |
2074 | 2194 |
2075 Returns: | 2195 Returns: |
2076 On success, a tuple containing the results of the performance test. | 2196 On success, a tuple containing the results of the performance test. |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2110 sync_client): | 2230 sync_client): |
2111 success = False | 2231 success = False |
2112 | 2232 |
2113 break | 2233 break |
2114 | 2234 |
2115 if success: | 2235 if success: |
2116 success = self.RunPostSync(depot) | 2236 success = self.RunPostSync(depot) |
2117 if success: | 2237 if success: |
2118 if skippable and self.ShouldSkipRevision(depot, revision): | 2238 if skippable and self.ShouldSkipRevision(depot, revision): |
2119 return ('Skipped revision: [%s]' % str(revision), | 2239 return ('Skipped revision: [%s]' % str(revision), |
2120 BUILD_RESULT_SKIPPED) | 2240 BUILD_RESULT_SKIPPED) |
2121 | 2241 |
2122 start_build_time = time.time() | 2242 start_build_time = time.time() |
2123 if self.BuildCurrentRevision(depot, revision): | 2243 if self.BuildCurrentRevision(depot, revision): |
2124 after_build_time = time.time() | 2244 after_build_time = time.time() |
2125 # Hack to support things that got changed. | 2245 # Hack to support things that got changed. |
2126 command_to_run = self.GetCompatibleCommand( | 2246 command_to_run = self.GetCompatibleCommand( |
2127 command_to_run, revision, depot) | 2247 command_to_run, revision, depot) |
2128 results = self.RunPerformanceTestAndParseResults(command_to_run, | 2248 results = self.RunPerformanceTestAndParseResults(command_to_run, |
2129 metric) | 2249 metric) |
2130 # Restore build output directory once the tests are done, to avoid | 2250 # Restore build output directory once the tests are done, to avoid |
2131 # any descrepancy. | 2251 # any descrepancy. |
2132 if self.IsDownloadable(depot) and revision: | 2252 if self.IsDownloadable(depot) and revision: |
2133 self.BackupOrRestoreOutputdirectory(restore=True) | 2253 self.BackupOrRestoreOutputdirectory(restore=True) |
2134 | 2254 |
2135 if results[1] == 0: | 2255 if results[1] == 0: |
2136 external_revisions = self.Get3rdPartyRevisionsFromCurrentRevision( | 2256 external_revisions = self._Get3rdPartyRevisions(depot) |
2137 depot, revision) | |
2138 | 2257 |
2139 if not external_revisions is None: | 2258 if not external_revisions is None: |
2140 return (results[0], results[1], external_revisions, | 2259 return (results[0], results[1], external_revisions, |
2141 time.time() - after_build_time, after_build_time - | 2260 time.time() - after_build_time, after_build_time - |
2142 start_build_time) | 2261 start_build_time) |
2143 else: | 2262 else: |
2144 return ('Failed to parse DEPS file for external revisions.', | 2263 return ('Failed to parse DEPS file for external revisions.', |
2145 BUILD_RESULT_FAIL) | 2264 BUILD_RESULT_FAIL) |
2146 else: | 2265 else: |
2147 return results | 2266 return results |
2148 else: | 2267 else: |
2149 return ('Failed to build revision: [%s]' % (str(revision, )), | 2268 return ('Failed to build revision: [%s]' % str(revision), |
2150 BUILD_RESULT_FAIL) | 2269 BUILD_RESULT_FAIL) |
2151 else: | 2270 else: |
2152 return ('Failed to run [gclient runhooks].', BUILD_RESULT_FAIL) | 2271 return ('Failed to run [gclient runhooks].', BUILD_RESULT_FAIL) |
2153 else: | 2272 else: |
2154 return ('Failed to sync revision: [%s]' % (str(revision, )), | 2273 return ('Failed to sync revision: [%s]' % str(revision), |
2155 BUILD_RESULT_FAIL) | 2274 BUILD_RESULT_FAIL) |
2156 | 2275 |
2157 def _CheckIfRunPassed(self, current_value, known_good_value, known_bad_value): | 2276 def _CheckIfRunPassed(self, current_value, known_good_value, known_bad_value): |
2158 """Given known good and bad values, decide if the current_value passed | 2277 """Given known good and bad values, decide if the current_value passed |
2159 or failed. | 2278 or failed. |
2160 | 2279 |
2161 Args: | 2280 Args: |
2162 current_value: The value of the metric being checked. | 2281 current_value: The value of the metric being checked. |
2163 known_bad_value: The reference value for a "failed" run. | 2282 known_bad_value: The reference value for a "failed" run. |
2164 known_good_value: The reference value for a "passed" run. | 2283 known_good_value: The reference value for a "passed" run. |
2165 | 2284 |
(...skipping 13 matching lines...) Expand all Loading... |
2179 return dist_to_good_value < dist_to_bad_value | 2298 return dist_to_good_value < dist_to_bad_value |
2180 | 2299 |
2181 def _GetDepotDirectory(self, depot_name): | 2300 def _GetDepotDirectory(self, depot_name): |
2182 if depot_name == 'chromium': | 2301 if depot_name == 'chromium': |
2183 return self.src_cwd | 2302 return self.src_cwd |
2184 elif depot_name == 'cros': | 2303 elif depot_name == 'cros': |
2185 return self.cros_cwd | 2304 return self.cros_cwd |
2186 elif depot_name in DEPOT_NAMES: | 2305 elif depot_name in DEPOT_NAMES: |
2187 return self.depot_cwd[depot_name] | 2306 return self.depot_cwd[depot_name] |
2188 else: | 2307 else: |
2189 assert False, 'Unknown depot [ %s ] encountered. Possibly a new one'\ | 2308 assert False, ('Unknown depot [ %s ] encountered. Possibly a new one ' |
2190 ' was added without proper support?' % depot_name | 2309 'was added without proper support?' % depot_name) |
2191 | 2310 |
2192 def ChangeToDepotWorkingDirectory(self, depot_name): | 2311 def ChangeToDepotWorkingDirectory(self, depot_name): |
2193 """Given a depot, changes to the appropriate working directory. | 2312 """Given a depot, changes to the appropriate working directory. |
2194 | 2313 |
2195 Args: | 2314 Args: |
2196 depot_name: The name of the depot (see DEPOT_NAMES). | 2315 depot_name: The name of the depot (see DEPOT_NAMES). |
2197 """ | 2316 """ |
2198 os.chdir(self._GetDepotDirectory(depot_name)) | 2317 os.chdir(self._GetDepotDirectory(depot_name)) |
2199 | 2318 |
2200 def _FillInV8BleedingEdgeInfo(self, min_revision_data, max_revision_data): | 2319 def _FillInV8BleedingEdgeInfo(self, min_revision_data, max_revision_data): |
2201 r1 = self._GetNearestV8BleedingEdgeFromTrunk(min_revision_data['revision'], | 2320 r1 = self._GetNearestV8BleedingEdgeFromTrunk(min_revision_data['revision'], |
2202 search_forward=True) | 2321 search_forward=True) |
2203 r2 = self._GetNearestV8BleedingEdgeFromTrunk(max_revision_data['revision'], | 2322 r2 = self._GetNearestV8BleedingEdgeFromTrunk(max_revision_data['revision'], |
2204 search_forward=False) | 2323 search_forward=False) |
2205 min_revision_data['external']['v8_bleeding_edge'] = r1 | 2324 min_revision_data['external']['v8_bleeding_edge'] = r1 |
2206 max_revision_data['external']['v8_bleeding_edge'] = r2 | 2325 max_revision_data['external']['v8_bleeding_edge'] = r2 |
2207 | 2326 |
2208 if (not self._GetV8BleedingEdgeFromV8TrunkIfMappable( | 2327 if (not self._GetV8BleedingEdgeFromV8TrunkIfMappable( |
2209 min_revision_data['revision']) or | 2328 min_revision_data['revision']) |
2210 not self._GetV8BleedingEdgeFromV8TrunkIfMappable( | 2329 or not self._GetV8BleedingEdgeFromV8TrunkIfMappable( |
2211 max_revision_data['revision'])): | 2330 max_revision_data['revision'])): |
2212 self.warnings.append('Trunk revisions in V8 did not map directly to ' | 2331 self.warnings.append( |
2213 'bleeding_edge. Attempted to expand the range to find V8 rolls which ' | 2332 'Trunk revisions in V8 did not map directly to bleeding_edge. ' |
2214 'did map directly to bleeding_edge revisions, but results might not ' | 2333 'Attempted to expand the range to find V8 rolls which did map ' |
2215 'be valid.') | 2334 'directly to bleeding_edge revisions, but results might not be ' |
| 2335 'valid.') |
2216 | 2336 |
2217 def _FindNextDepotToBisect(self, current_depot, current_revision, | 2337 def _FindNextDepotToBisect( |
2218 min_revision_data, max_revision_data): | 2338 self, current_depot, min_revision_data, max_revision_data): |
2219 """Given the state of the bisect, decides which depot the script should | 2339 """Decides which depot the script should dive into next (if any). |
2220 dive into next (if any). | |
2221 | 2340 |
2222 Args: | 2341 Args: |
2223 current_depot: Current depot being bisected. | 2342 current_depot: Current depot being bisected. |
2224 current_revision: Current revision synced to. | |
2225 min_revision_data: Data about the earliest revision in the bisect range. | 2343 min_revision_data: Data about the earliest revision in the bisect range. |
2226 max_revision_data: Data about the latest revision in the bisect range. | 2344 max_revision_data: Data about the latest revision in the bisect range. |
2227 | 2345 |
2228 Returns: | 2346 Returns: |
2229 The depot to bisect next, or None. | 2347 Name of the depot to bisect next, or None. |
2230 """ | 2348 """ |
2231 external_depot = None | 2349 external_depot = None |
2232 for next_depot in DEPOT_NAMES: | 2350 for next_depot in DEPOT_NAMES: |
2233 if DEPOT_DEPS_NAME[next_depot].has_key('platform'): | 2351 if DEPOT_DEPS_NAME[next_depot].has_key('platform'): |
2234 if DEPOT_DEPS_NAME[next_depot]['platform'] != os.name: | 2352 if DEPOT_DEPS_NAME[next_depot]['platform'] != os.name: |
2235 continue | 2353 continue |
2236 | 2354 |
2237 if not (DEPOT_DEPS_NAME[next_depot]["recurse"] and | 2355 if not (DEPOT_DEPS_NAME[next_depot]['recurse'] |
2238 min_revision_data['depot'] in DEPOT_DEPS_NAME[next_depot]['from']): | 2356 and min_revision_data['depot'] |
| 2357 in DEPOT_DEPS_NAME[next_depot]['from']): |
2239 continue | 2358 continue |
2240 | 2359 |
2241 if current_depot == 'v8': | 2360 if current_depot == 'v8': |
2242 # We grab the bleeding_edge info here rather than earlier because we | 2361 # We grab the bleeding_edge info here rather than earlier because we |
2243 # finally have the revision range. From that we can search forwards and | 2362 # finally have the revision range. From that we can search forwards and |
2244 # backwards to try to match trunk revisions to bleeding_edge. | 2363 # backwards to try to match trunk revisions to bleeding_edge. |
2245 self._FillInV8BleedingEdgeInfo(min_revision_data, max_revision_data) | 2364 self._FillInV8BleedingEdgeInfo(min_revision_data, max_revision_data) |
2246 | 2365 |
2247 if (min_revision_data['external'].get(next_depot) == | 2366 if (min_revision_data['external'].get(next_depot) == |
2248 max_revision_data['external'].get(next_depot)): | 2367 max_revision_data['external'].get(next_depot)): |
2249 continue | 2368 continue |
2250 | 2369 |
2251 if (min_revision_data['external'].get(next_depot) and | 2370 if (min_revision_data['external'].get(next_depot) and |
2252 max_revision_data['external'].get(next_depot)): | 2371 max_revision_data['external'].get(next_depot)): |
2253 external_depot = next_depot | 2372 external_depot = next_depot |
2254 break | 2373 break |
2255 | 2374 |
2256 return external_depot | 2375 return external_depot |
2257 | 2376 |
2258 def PrepareToBisectOnDepot(self, | 2377 def PrepareToBisectOnDepot( |
2259 current_depot, | 2378 self, current_depot, end_revision, start_revision, previous_revision): |
2260 end_revision, | |
2261 start_revision, | |
2262 previous_depot, | |
2263 previous_revision): | |
2264 """Changes to the appropriate directory and gathers a list of revisions | 2379 """Changes to the appropriate directory and gathers a list of revisions |
2265 to bisect between |start_revision| and |end_revision|. | 2380 to bisect between |start_revision| and |end_revision|. |
2266 | 2381 |
2267 Args: | 2382 Args: |
2268 current_depot: The depot we want to bisect. | 2383 current_depot: The depot we want to bisect. |
2269 end_revision: End of the revision range. | 2384 end_revision: End of the revision range. |
2270 start_revision: Start of the revision range. | 2385 start_revision: Start of the revision range. |
2271 previous_depot: The depot we were previously bisecting. | |
2272 previous_revision: The last revision we synced to on |previous_depot|. | 2386 previous_revision: The last revision we synced to on |previous_depot|. |
2273 | 2387 |
2274 Returns: | 2388 Returns: |
2275 A list containing the revisions between |start_revision| and | 2389 A list containing the revisions between |start_revision| and |
2276 |end_revision| inclusive. | 2390 |end_revision| inclusive. |
2277 """ | 2391 """ |
2278 # Change into working directory of external library to run | 2392 # Change into working directory of external library to run |
2279 # subsequent commands. | 2393 # subsequent commands. |
2280 self.ChangeToDepotWorkingDirectory(current_depot) | 2394 self.ChangeToDepotWorkingDirectory(current_depot) |
2281 | 2395 |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2319 Args: | 2433 Args: |
2320 good_rev: The last known good revision where the performance regression | 2434 good_rev: The last known good revision where the performance regression |
2321 has not occurred yet. | 2435 has not occurred yet. |
2322 bad_rev: A revision where the performance regression has already occurred. | 2436 bad_rev: A revision where the performance regression has already occurred. |
2323 cmd: The command to execute the performance test. | 2437 cmd: The command to execute the performance test. |
2324 metric: The metric being tested for regression. | 2438 metric: The metric being tested for regression. |
2325 | 2439 |
2326 Returns: | 2440 Returns: |
2327 A tuple with the results of building and running each revision. | 2441 A tuple with the results of building and running each revision. |
2328 """ | 2442 """ |
2329 bad_run_results = self.SyncBuildAndRunRevision(bad_rev, | 2443 bad_run_results = self.SyncBuildAndRunRevision( |
2330 target_depot, | 2444 bad_rev, target_depot, cmd, metric) |
2331 cmd, | |
2332 metric) | |
2333 | 2445 |
2334 good_run_results = None | 2446 good_run_results = None |
2335 | 2447 |
2336 if not bad_run_results[1]: | 2448 if not bad_run_results[1]: |
2337 good_run_results = self.SyncBuildAndRunRevision(good_rev, | 2449 good_run_results = self.SyncBuildAndRunRevision( |
2338 target_depot, | 2450 good_rev, target_depot, cmd, metric) |
2339 cmd, | |
2340 metric) | |
2341 | 2451 |
2342 return (bad_run_results, good_run_results) | 2452 return (bad_run_results, good_run_results) |
2343 | 2453 |
2344 def AddRevisionsIntoRevisionData(self, revisions, depot, sort, revision_data): | |
2345 """Adds new revisions to the revision_data dict and initializes them. | |
2346 | |
2347 Args: | |
2348 revisions: List of revisions to add. | |
2349 depot: Depot that's currently in use (src, webkit, etc...) | |
2350 sort: Sorting key for displaying revisions. | |
2351 revision_data: A dict to add the new revisions into. Existing revisions | |
2352 will have their sort keys offset. | |
2353 """ | |
2354 | |
2355 num_depot_revisions = len(revisions) | |
2356 | |
2357 for _, v in revision_data.iteritems(): | |
2358 if v['sort'] > sort: | |
2359 v['sort'] += num_depot_revisions | |
2360 | |
2361 for i in xrange(num_depot_revisions): | |
2362 r = revisions[i] | |
2363 | |
2364 revision_data[r] = {'revision' : r, | |
2365 'depot' : depot, | |
2366 'value' : None, | |
2367 'perf_time' : 0, | |
2368 'build_time' : 0, | |
2369 'passed' : '?', | |
2370 'sort' : i + sort + 1} | |
2371 | |
2372 def PrintRevisionsToBisectMessage(self, revision_list, depot): | 2454 def PrintRevisionsToBisectMessage(self, revision_list, depot): |
2373 if self.opts.output_buildbot_annotations: | 2455 if self.opts.output_buildbot_annotations: |
2374 step_name = 'Bisection Range: [%s - %s]' % ( | 2456 step_name = 'Bisection Range: [%s - %s]' % ( |
2375 revision_list[len(revision_list)-1], revision_list[0]) | 2457 revision_list[len(revision_list)-1], revision_list[0]) |
2376 bisect_utils.OutputAnnotationStepStart(step_name) | 2458 bisect_utils.OutputAnnotationStepStart(step_name) |
2377 | 2459 |
2378 print | 2460 print |
2379 print 'Revisions to bisect on [%s]:' % depot | 2461 print 'Revisions to bisect on [%s]:' % depot |
2380 for revision_id in revision_list: | 2462 for revision_id in revision_list: |
2381 print ' -> %s' % (revision_id, ) | 2463 print ' -> %s' % (revision_id, ) |
2382 print | 2464 print |
2383 | 2465 |
2384 if self.opts.output_buildbot_annotations: | 2466 if self.opts.output_buildbot_annotations: |
2385 bisect_utils.OutputAnnotationStepClosed() | 2467 bisect_utils.OutputAnnotationStepClosed() |
2386 | 2468 |
2387 def NudgeRevisionsIfDEPSChange(self, bad_revision, good_revision): | 2469 def NudgeRevisionsIfDEPSChange(self, bad_revision, good_revision): |
2388 """Checks to see if changes to DEPS file occurred, and that the revision | 2470 """Checks to see if changes to DEPS file occurred, and that the revision |
2389 range also includes the change to .DEPS.git. If it doesn't, attempts to | 2471 range also includes the change to .DEPS.git. If it doesn't, attempts to |
2390 expand the revision range to include it. | 2472 expand the revision range to include it. |
2391 | 2473 |
2392 Args: | 2474 Args: |
2393 bad_rev: First known bad revision. | 2475 bad_rev: First known bad revision. |
2394 good_revision: Last known good revision. | 2476 good_revision: Last known good revision. |
2395 | 2477 |
2396 Returns: | 2478 Returns: |
2397 A tuple with the new bad and good revisions. | 2479 A tuple with the new bad and good revisions. |
2398 """ | 2480 """ |
2399 if self.source_control.IsGit() and self.opts.target_platform == 'chromium': | 2481 if self.source_control.IsGit() and self.opts.target_platform == 'chromium': |
2400 changes_to_deps = self.source_control.QueryFileRevisionHistory( | 2482 changes_to_deps = self.source_control.QueryFileRevisionHistory( |
2401 'DEPS', good_revision, bad_revision) | 2483 'DEPS', good_revision, bad_revision) |
2402 | 2484 |
2403 if changes_to_deps: | 2485 if changes_to_deps: |
2404 # DEPS file was changed, search from the oldest change to DEPS file to | 2486 # DEPS file was changed, search from the oldest change to DEPS file to |
2405 # bad_revision to see if there are matching .DEPS.git changes. | 2487 # bad_revision to see if there are matching .DEPS.git changes. |
2406 oldest_deps_change = changes_to_deps[-1] | 2488 oldest_deps_change = changes_to_deps[-1] |
2407 changes_to_gitdeps = self.source_control.QueryFileRevisionHistory( | 2489 changes_to_gitdeps = self.source_control.QueryFileRevisionHistory( |
(...skipping 14 matching lines...) Expand all Loading... |
2422 output = output.strip() | 2504 output = output.strip() |
2423 if output: | 2505 if output: |
2424 self.warnings.append('Detected change to DEPS and modified ' | 2506 self.warnings.append('Detected change to DEPS and modified ' |
2425 'revision range to include change to .DEPS.git') | 2507 'revision range to include change to .DEPS.git') |
2426 return (output, good_revision) | 2508 return (output, good_revision) |
2427 else: | 2509 else: |
2428 self.warnings.append('Detected change to DEPS but couldn\'t find ' | 2510 self.warnings.append('Detected change to DEPS but couldn\'t find ' |
2429 'matching change to .DEPS.git') | 2511 'matching change to .DEPS.git') |
2430 return (bad_revision, good_revision) | 2512 return (bad_revision, good_revision) |
2431 | 2513 |
2432 def CheckIfRevisionsInProperOrder(self, | 2514 def CheckIfRevisionsInProperOrder( |
2433 target_depot, | 2515 self, target_depot, good_revision, bad_revision): |
2434 good_revision, | |
2435 bad_revision): | |
2436 """Checks that |good_revision| is an earlier revision than |bad_revision|. | 2516 """Checks that |good_revision| is an earlier revision than |bad_revision|. |
2437 | 2517 |
2438 Args: | 2518 Args: |
2439 good_revision: Number/tag of the known good revision. | 2519 good_revision: Number/tag of the known good revision. |
2440 bad_revision: Number/tag of the known bad revision. | 2520 bad_revision: Number/tag of the known bad revision. |
2441 | 2521 |
2442 Returns: | 2522 Returns: |
2443 True if the revisions are in the proper order (good earlier than bad). | 2523 True if the revisions are in the proper order (good earlier than bad). |
2444 """ | 2524 """ |
2445 if self.source_control.IsGit() and target_depot != 'cros': | 2525 if self.source_control.IsGit() and target_depot != 'cros': |
2446 cmd = ['log', '--format=%ct', '-1', good_revision] | 2526 cmd = ['log', '--format=%ct', '-1', good_revision] |
2447 cwd = self._GetDepotDirectory(target_depot) | 2527 cwd = self._GetDepotDirectory(target_depot) |
2448 | 2528 |
2449 output = bisect_utils.CheckRunGit(cmd, cwd=cwd) | 2529 output = bisect_utils.CheckRunGit(cmd, cwd=cwd) |
2450 good_commit_time = int(output) | 2530 good_commit_time = int(output) |
2451 | 2531 |
2452 cmd = ['log', '--format=%ct', '-1', bad_revision] | 2532 cmd = ['log', '--format=%ct', '-1', bad_revision] |
2453 output = bisect_utils.CheckRunGit(cmd, cwd=cwd) | 2533 output = bisect_utils.CheckRunGit(cmd, cwd=cwd) |
(...skipping 29 matching lines...) Expand all Loading... |
2483 'built local ChromeShell(refer to crbug.com/385324).\n' | 2563 'built local ChromeShell(refer to crbug.com/385324).\n' |
2484 'Please try bisecting revisions greater than or equal to r265549.')} | 2564 'Please try bisecting revisions greater than or equal to r265549.')} |
2485 return None | 2565 return None |
2486 | 2566 |
2487 def Run(self, command_to_run, bad_revision_in, good_revision_in, metric): | 2567 def Run(self, command_to_run, bad_revision_in, good_revision_in, metric): |
2488 """Given known good and bad revisions, run a binary search on all | 2568 """Given known good and bad revisions, run a binary search on all |
2489 intermediate revisions to determine the CL where the performance regression | 2569 intermediate revisions to determine the CL where the performance regression |
2490 occurred. | 2570 occurred. |
2491 | 2571 |
2492 Args: | 2572 Args: |
2493 command_to_run: Specify the command to execute the performance test. | 2573 command_to_run: Specify the command to execute the performance test. |
2494 good_revision: Number/tag of the known good revision. | 2574 good_revision: Number/tag of the known good revision. |
2495 bad_revision: Number/tag of the known bad revision. | 2575 bad_revision: Number/tag of the known bad revision. |
2496 metric: The performance metric to monitor. | 2576 metric: The performance metric to monitor. |
2497 | 2577 |
2498 Returns: | 2578 Returns: |
2499 A dict with 2 members, 'revision_data' and 'error'. On success, | 2579 A dict with 2 members, 'revision_data' and 'error'. On success, |
2500 'revision_data' will contain a dict mapping revision ids to | 2580 'revision_data' will contain a dict mapping revision ids to |
2501 data about that revision. Each piece of revision data consists of a | 2581 data about that revision. Each piece of revision data consists of a |
2502 dict with the following keys: | 2582 dict with the following keys: |
2503 | 2583 |
2504 'passed': Represents whether the performance test was successful at | 2584 'passed': Represents whether the performance test was successful at |
2505 that revision. Possible values include: 1 (passed), 0 (failed), | 2585 that revision. Possible values include: 1 (passed), 0 (failed), |
2506 '?' (skipped), 'F' (build failed). | 2586 '?' (skipped), 'F' (build failed). |
2507 'depot': The depot that this revision is from (ie. WebKit) | 2587 'depot': The depot that this revision is from (ie. WebKit) |
2508 'external': If the revision is a 'src' revision, 'external' contains | 2588 'external': If the revision is a 'src' revision, 'external' contains |
2509 the revisions of each of the external libraries. | 2589 the revisions of each of the external libraries. |
2510 'sort': A sort value for sorting the dict in order of commits. | 2590 'sort': A sort value for sorting the dict in order of commits. |
2511 | 2591 |
2512 For example: | 2592 For example: |
| 2593 { |
| 2594 'error':None, |
| 2595 'revision_data': |
2513 { | 2596 { |
2514 'error':None, | 2597 'CL #1': |
2515 'revision_data': | |
2516 { | 2598 { |
2517 'CL #1': | 2599 'passed':False, |
2518 { | 2600 'depot':'chromium', |
2519 'passed':False, | 2601 'external':None, |
2520 'depot':'chromium', | 2602 'sort':0 |
2521 'external':None, | |
2522 'sort':0 | |
2523 } | |
2524 } | 2603 } |
2525 } | 2604 } |
| 2605 } |
2526 | 2606 |
2527 If an error occurred, the 'error' field will contain the message and | 2607 If an error occurred, the 'error' field will contain the message and |
2528 'revision_data' will be empty. | 2608 'revision_data' will be empty. |
2529 """ | 2609 """ |
2530 results = {'revision_data' : {}, | 2610 results = { |
2531 'error' : None} | 2611 'revision_data' : {}, |
| 2612 'error' : None, |
| 2613 } |
2532 | 2614 |
2533 # Choose depot to bisect first | 2615 # Choose depot to bisect first |
2534 target_depot = 'chromium' | 2616 target_depot = 'chromium' |
2535 if self.opts.target_platform == 'cros': | 2617 if self.opts.target_platform == 'cros': |
2536 target_depot = 'cros' | 2618 target_depot = 'cros' |
2537 elif self.opts.target_platform == 'android-chrome': | 2619 elif self.opts.target_platform == 'android-chrome': |
2538 target_depot = 'android-chrome' | 2620 target_depot = 'android-chrome' |
2539 | 2621 |
2540 cwd = os.getcwd() | 2622 cwd = os.getcwd() |
2541 self.ChangeToDepotWorkingDirectory(target_depot) | 2623 self.ChangeToDepotWorkingDirectory(target_depot) |
2542 | 2624 |
2543 # If they passed SVN CL's, etc... we can try match them to git SHA1's. | 2625 # If they passed SVN CL's, etc... we can try match them to git SHA1's. |
2544 bad_revision = self.source_control.ResolveToRevision( | 2626 bad_revision = self.source_control.ResolveToRevision( |
2545 bad_revision_in, target_depot, DEPOT_DEPS_NAME, 100) | 2627 bad_revision_in, target_depot, DEPOT_DEPS_NAME, 100) |
2546 good_revision = self.source_control.ResolveToRevision( | 2628 good_revision = self.source_control.ResolveToRevision( |
2547 good_revision_in, target_depot, DEPOT_DEPS_NAME, -100) | 2629 good_revision_in, target_depot, DEPOT_DEPS_NAME, -100) |
2548 | 2630 |
2549 os.chdir(cwd) | 2631 os.chdir(cwd) |
2550 | 2632 |
2551 | |
2552 if bad_revision is None: | 2633 if bad_revision is None: |
2553 results['error'] = 'Could\'t resolve [%s] to SHA1.' % (bad_revision_in,) | 2634 results['error'] = 'Could\'t resolve [%s] to SHA1.' % (bad_revision_in,) |
2554 return results | 2635 return results |
2555 | 2636 |
2556 if good_revision is None: | 2637 if good_revision is None: |
2557 results['error'] = 'Could\'t resolve [%s] to SHA1.' % (good_revision_in,) | 2638 results['error'] = 'Could\'t resolve [%s] to SHA1.' % (good_revision_in,) |
2558 return results | 2639 return results |
2559 | 2640 |
2560 # Check that they didn't accidentally swap good and bad revisions. | 2641 # Check that they didn't accidentally swap good and bad revisions. |
2561 if not self.CheckIfRevisionsInProperOrder( | 2642 if not self.CheckIfRevisionsInProperOrder( |
2562 target_depot, good_revision, bad_revision): | 2643 target_depot, good_revision, bad_revision): |
2563 results['error'] = 'bad_revision < good_revision, did you swap these '\ | 2644 results['error'] = ('bad_revision < good_revision, did you swap these ' |
2564 'by mistake?' | 2645 'by mistake?') |
2565 return results | 2646 return results |
2566 | 2647 |
2567 (bad_revision, good_revision) = self.NudgeRevisionsIfDEPSChange( | 2648 bad_revision, good_revision = self.NudgeRevisionsIfDEPSChange( |
2568 bad_revision, good_revision) | 2649 bad_revision, good_revision) |
2569 | 2650 |
2570 if self.opts.output_buildbot_annotations: | 2651 if self.opts.output_buildbot_annotations: |
2571 bisect_utils.OutputAnnotationStepStart('Gathering Revisions') | 2652 bisect_utils.OutputAnnotationStepStart('Gathering Revisions') |
2572 | 2653 |
2573 cannot_bisect = self.CanPerformBisect(good_revision) | 2654 cannot_bisect = self.CanPerformBisect(good_revision) |
2574 if cannot_bisect: | 2655 if cannot_bisect: |
2575 results['error'] = cannot_bisect.get('error') | 2656 results['error'] = cannot_bisect.get('error') |
2576 return results | 2657 return results |
2577 | 2658 |
2578 print 'Gathering revision range for bisection.' | 2659 print 'Gathering revision range for bisection.' |
2579 # Retrieve a list of revisions to do bisection on. | 2660 # Retrieve a list of revisions to do bisection on. |
2580 src_revision_list = self.GetRevisionList(target_depot, | 2661 src_revision_list = self.GetRevisionList( |
2581 bad_revision, | 2662 target_depot, bad_revision, good_revision) |
2582 good_revision) | |
2583 | 2663 |
2584 if self.opts.output_buildbot_annotations: | 2664 if self.opts.output_buildbot_annotations: |
2585 bisect_utils.OutputAnnotationStepClosed() | 2665 bisect_utils.OutputAnnotationStepClosed() |
2586 | 2666 |
2587 if src_revision_list: | 2667 if src_revision_list: |
2588 # revision_data will store information about a revision such as the | 2668 # revision_data will store information about a revision such as the |
2589 # depot it came from, the webkit/V8 revision at that time, | 2669 # depot it came from, the webkit/V8 revision at that time, |
2590 # performance timing, build state, etc... | 2670 # performance timing, build state, etc... |
2591 revision_data = results['revision_data'] | 2671 revision_data = results['revision_data'] |
2592 | 2672 |
2593 # revision_list is the list we're binary searching through at the moment. | 2673 # revision_list is the list we're binary searching through at the moment. |
2594 revision_list = [] | 2674 revision_list = [] |
2595 | 2675 |
2596 sort_key_ids = 0 | 2676 sort_key_ids = 0 |
2597 | 2677 |
2598 for current_revision_id in src_revision_list: | 2678 for current_revision_id in src_revision_list: |
2599 sort_key_ids += 1 | 2679 sort_key_ids += 1 |
2600 | 2680 |
2601 revision_data[current_revision_id] = {'value' : None, | 2681 revision_data[current_revision_id] = { |
2602 'passed' : '?', | 2682 'value' : None, |
2603 'depot' : target_depot, | 2683 'passed' : '?', |
2604 'external' : None, | 2684 'depot' : target_depot, |
2605 'perf_time' : 0, | 2685 'external' : None, |
2606 'build_time' : 0, | 2686 'perf_time' : 0, |
2607 'sort' : sort_key_ids} | 2687 'build_time' : 0, |
| 2688 'sort' : sort_key_ids, |
| 2689 } |
2608 revision_list.append(current_revision_id) | 2690 revision_list.append(current_revision_id) |
2609 | 2691 |
2610 min_revision = 0 | 2692 min_revision = 0 |
2611 max_revision = len(revision_list) - 1 | 2693 max_revision = len(revision_list) - 1 |
2612 | 2694 |
2613 self.PrintRevisionsToBisectMessage(revision_list, target_depot) | 2695 self.PrintRevisionsToBisectMessage(revision_list, target_depot) |
2614 | 2696 |
2615 if self.opts.output_buildbot_annotations: | 2697 if self.opts.output_buildbot_annotations: |
2616 bisect_utils.OutputAnnotationStepStart('Gathering Reference Values') | 2698 bisect_utils.OutputAnnotationStepStart('Gathering Reference Values') |
2617 | 2699 |
2618 print 'Gathering reference values for bisection.' | 2700 print 'Gathering reference values for bisection.' |
2619 | 2701 |
2620 # Perform the performance tests on the good and bad revisions, to get | 2702 # Perform the performance tests on the good and bad revisions, to get |
2621 # reference values. | 2703 # reference values. |
2622 (bad_results, good_results) = self.GatherReferenceValues(good_revision, | 2704 bad_results, good_results = self.GatherReferenceValues(good_revision, |
2623 bad_revision, | 2705 bad_revision, |
2624 command_to_run, | 2706 command_to_run, |
2625 metric, | 2707 metric, |
2626 target_depot) | 2708 target_depot) |
2627 | 2709 |
2628 if self.opts.output_buildbot_annotations: | 2710 if self.opts.output_buildbot_annotations: |
2629 bisect_utils.OutputAnnotationStepClosed() | 2711 bisect_utils.OutputAnnotationStepClosed() |
2630 | 2712 |
2631 if bad_results[1]: | 2713 if bad_results[1]: |
2632 results['error'] = ('An error occurred while building and running ' | 2714 results['error'] = ('An error occurred while building and running ' |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2676 if max_revision - min_revision <= 1: | 2758 if max_revision - min_revision <= 1: |
2677 current_depot = min_revision_data['depot'] | 2759 current_depot = min_revision_data['depot'] |
2678 if min_revision_data['passed'] == '?': | 2760 if min_revision_data['passed'] == '?': |
2679 next_revision_index = min_revision | 2761 next_revision_index = min_revision |
2680 elif max_revision_data['passed'] == '?': | 2762 elif max_revision_data['passed'] == '?': |
2681 next_revision_index = max_revision | 2763 next_revision_index = max_revision |
2682 elif current_depot in ['android-chrome', 'cros', 'chromium', 'v8']: | 2764 elif current_depot in ['android-chrome', 'cros', 'chromium', 'v8']: |
2683 previous_revision = revision_list[min_revision] | 2765 previous_revision = revision_list[min_revision] |
2684 # If there were changes to any of the external libraries we track, | 2766 # If there were changes to any of the external libraries we track, |
2685 # should bisect the changes there as well. | 2767 # should bisect the changes there as well. |
2686 external_depot = self._FindNextDepotToBisect(current_depot, | 2768 external_depot = self._FindNextDepotToBisect( |
2687 previous_revision, min_revision_data, max_revision_data) | 2769 current_depot, min_revision_data, max_revision_data) |
2688 | 2770 |
2689 # If there was no change in any of the external depots, the search | 2771 # If there was no change in any of the external depots, the search |
2690 # is over. | 2772 # is over. |
2691 if not external_depot: | 2773 if not external_depot: |
2692 if current_depot == 'v8': | 2774 if current_depot == 'v8': |
2693 self.warnings.append('Unfortunately, V8 bisection couldn\'t ' | 2775 self.warnings.append('Unfortunately, V8 bisection couldn\'t ' |
2694 'continue any further. The script can only bisect into ' | 2776 'continue any further. The script can only bisect into ' |
2695 'V8\'s bleeding_edge repository if both the current and ' | 2777 'V8\'s bleeding_edge repository if both the current and ' |
2696 'previous revisions in trunk map directly to revisions in ' | 2778 'previous revisions in trunk map directly to revisions in ' |
2697 'bleeding_edge.') | 2779 'bleeding_edge.') |
2698 break | 2780 break |
2699 | 2781 |
2700 earliest_revision = max_revision_data['external'][external_depot] | 2782 earliest_revision = max_revision_data['external'][external_depot] |
2701 latest_revision = min_revision_data['external'][external_depot] | 2783 latest_revision = min_revision_data['external'][external_depot] |
2702 | 2784 |
2703 new_revision_list = self.PrepareToBisectOnDepot(external_depot, | 2785 new_revision_list = self.PrepareToBisectOnDepot( |
2704 latest_revision, | 2786 external_depot, latest_revision, earliest_revision, |
2705 earliest_revision, | 2787 previous_revision) |
2706 next_revision_depot, | |
2707 previous_revision) | |
2708 | 2788 |
2709 if not new_revision_list: | 2789 if not new_revision_list: |
2710 results['error'] = 'An error occurred attempting to retrieve'\ | 2790 results['error'] = ('An error occurred attempting to retrieve ' |
2711 ' revision range: [%s..%s]' % \ | 2791 'revision range: [%s..%s]' % |
2712 (earliest_revision, latest_revision) | 2792 (earliest_revision, latest_revision)) |
2713 return results | 2793 return results |
2714 | 2794 |
2715 self.AddRevisionsIntoRevisionData(new_revision_list, | 2795 _AddRevisionsIntoRevisionData( |
2716 external_depot, | 2796 new_revision_list, external_depot, min_revision_data['sort'], |
2717 min_revision_data['sort'], | 2797 revision_data) |
2718 revision_data) | |
2719 | 2798 |
2720 # Reset the bisection and perform it on the newly inserted | 2799 # Reset the bisection and perform it on the newly inserted |
2721 # changelists. | 2800 # changelists. |
2722 revision_list = new_revision_list | 2801 revision_list = new_revision_list |
2723 min_revision = 0 | 2802 min_revision = 0 |
2724 max_revision = len(revision_list) - 1 | 2803 max_revision = len(revision_list) - 1 |
2725 sort_key_ids += len(revision_list) | 2804 sort_key_ids += len(revision_list) |
2726 | 2805 |
2727 print 'Regression in metric:%s appears to be the result of changes'\ | 2806 print ('Regression in metric %s appears to be the result of ' |
2728 ' in [%s].' % (metric, external_depot) | 2807 'changes in [%s].' % (metric, external_depot)) |
2729 | 2808 |
2730 self.PrintRevisionsToBisectMessage(revision_list, external_depot) | 2809 self.PrintRevisionsToBisectMessage(revision_list, external_depot) |
2731 | 2810 |
2732 continue | 2811 continue |
2733 else: | 2812 else: |
2734 break | 2813 break |
2735 else: | 2814 else: |
2736 next_revision_index = int((max_revision - min_revision) / 2) +\ | 2815 next_revision_index = (int((max_revision - min_revision) / 2) + |
2737 min_revision | 2816 min_revision) |
2738 | 2817 |
2739 next_revision_id = revision_list[next_revision_index] | 2818 next_revision_id = revision_list[next_revision_index] |
2740 next_revision_data = revision_data[next_revision_id] | 2819 next_revision_data = revision_data[next_revision_id] |
2741 next_revision_depot = next_revision_data['depot'] | 2820 next_revision_depot = next_revision_data['depot'] |
2742 | 2821 |
2743 self.ChangeToDepotWorkingDirectory(next_revision_depot) | 2822 self.ChangeToDepotWorkingDirectory(next_revision_depot) |
2744 | 2823 |
2745 if self.opts.output_buildbot_annotations: | 2824 if self.opts.output_buildbot_annotations: |
2746 step_name = 'Working on [%s]' % next_revision_id | 2825 step_name = 'Working on [%s]' % next_revision_id |
2747 bisect_utils.OutputAnnotationStepStart(step_name) | 2826 bisect_utils.OutputAnnotationStepStart(step_name) |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2783 # If the build is broken, remove it and redo search. | 2862 # If the build is broken, remove it and redo search. |
2784 revision_list.pop(next_revision_index) | 2863 revision_list.pop(next_revision_index) |
2785 | 2864 |
2786 max_revision -= 1 | 2865 max_revision -= 1 |
2787 | 2866 |
2788 if self.opts.output_buildbot_annotations: | 2867 if self.opts.output_buildbot_annotations: |
2789 self._PrintPartialResults(results) | 2868 self._PrintPartialResults(results) |
2790 bisect_utils.OutputAnnotationStepClosed() | 2869 bisect_utils.OutputAnnotationStepClosed() |
2791 else: | 2870 else: |
2792 # Weren't able to sync and retrieve the revision range. | 2871 # Weren't able to sync and retrieve the revision range. |
2793 results['error'] = 'An error occurred attempting to retrieve revision '\ | 2872 results['error'] = ('An error occurred attempting to retrieve revision ' |
2794 'range: [%s..%s]' % (good_revision, bad_revision) | 2873 'range: [%s..%s]' % (good_revision, bad_revision)) |
2795 | 2874 |
2796 return results | 2875 return results |
2797 | 2876 |
2798 def _PrintPartialResults(self, results_dict): | 2877 def _PrintPartialResults(self, results_dict): |
2799 revision_data = results_dict['revision_data'] | 2878 revision_data = results_dict['revision_data'] |
2800 revision_data_sorted = sorted(revision_data.iteritems(), | 2879 revision_data_sorted = sorted(revision_data.iteritems(), |
2801 key = lambda x: x[1]['sort']) | 2880 key = lambda x: x[1]['sort']) |
2802 results_dict = self._GetResultsDict(revision_data, revision_data_sorted) | 2881 results_dict = self._GetResultsDict(revision_data, revision_data_sorted) |
2803 | 2882 |
2804 self._PrintTestedCommitsTable(revision_data_sorted, | 2883 self._PrintTestedCommitsTable(revision_data_sorted, |
2805 results_dict['first_working_revision'], | 2884 results_dict['first_working_revision'], |
2806 results_dict['last_broken_revision'], | 2885 results_dict['last_broken_revision'], |
2807 100, final_step=False) | 2886 100, final_step=False) |
2808 | 2887 |
2809 def _PrintConfidence(self, results_dict): | |
2810 # The perf dashboard specifically looks for the string | |
2811 # "Confidence in Bisection Results: 100%" to decide whether or not | |
2812 # to cc the author(s). If you change this, please update the perf | |
2813 # dashboard as well. | |
2814 print 'Confidence in Bisection Results: %d%%' % results_dict['confidence'] | |
2815 | |
2816 def _ConfidenceLevelStatus(self, results_dict): | 2888 def _ConfidenceLevelStatus(self, results_dict): |
2817 if not results_dict['confidence']: | 2889 if not results_dict['confidence']: |
2818 return None | 2890 return None |
2819 confidence_status = 'Successful with %(level)s confidence%(warning)s.' | 2891 confidence_status = 'Successful with %(level)s confidence%(warning)s.' |
2820 if results_dict['confidence'] >= 95: | 2892 if results_dict['confidence'] >= 95: |
2821 level = 'high' | 2893 level = 'high' |
2822 else: | 2894 else: |
2823 level = 'low' | 2895 level = 'low' |
2824 warning = ' and warnings' | 2896 warning = ' and warnings' |
2825 if not self.warnings: | 2897 if not self.warnings: |
2826 warning = '' | 2898 warning = '' |
2827 return confidence_status % {'level': level, 'warning': warning} | 2899 return confidence_status % {'level': level, 'warning': warning} |
2828 | 2900 |
2829 def _PrintThankYou(self): | |
2830 print RESULTS_THANKYOU | |
2831 | |
2832 def _PrintBanner(self, results_dict): | |
2833 if self._IsBisectModeReturnCode(): | |
2834 metrics = 'N/A' | |
2835 change = 'Yes' | |
2836 else: | |
2837 metrics = '/'.join(self.opts.metric) | |
2838 change = '%.02f%% (+/-%.02f%%)' % ( | |
2839 results_dict['regression_size'], results_dict['regression_std_err']) | |
2840 | |
2841 if results_dict['culprit_revisions'] and results_dict['confidence']: | |
2842 status = self._ConfidenceLevelStatus(results_dict) | |
2843 else: | |
2844 status = 'Failure, could not reproduce.' | |
2845 change = 'Bisect could not reproduce a change.' | |
2846 | |
2847 print RESULTS_BANNER % { | |
2848 'status': status, | |
2849 'command': self.opts.command, | |
2850 'metrics': metrics, | |
2851 'change': change, | |
2852 'confidence': results_dict['confidence'], | |
2853 } | |
2854 | |
2855 | |
2856 def _PrintFailedBanner(self, results_dict): | |
2857 print | |
2858 if self._IsBisectModeReturnCode(): | |
2859 print 'Bisect could not reproduce a change in the return code.' | |
2860 else: | |
2861 print ('Bisect could not reproduce a change in the ' | |
2862 '%s metric.' % '/'.join(self.opts.metric)) | |
2863 print | |
2864 | |
2865 def _GetViewVCLinkFromDepotAndHash(self, cl, depot): | 2901 def _GetViewVCLinkFromDepotAndHash(self, cl, depot): |
2866 info = self.source_control.QueryRevisionInfo(cl, | 2902 info = self.source_control.QueryRevisionInfo(cl, |
2867 self._GetDepotDirectory(depot)) | 2903 self._GetDepotDirectory(depot)) |
2868 if depot and DEPOT_DEPS_NAME[depot].has_key('viewvc'): | 2904 if depot and DEPOT_DEPS_NAME[depot].has_key('viewvc'): |
2869 try: | 2905 try: |
2870 # Format is "git-svn-id: svn://....@123456 <other data>" | 2906 # Format is "git-svn-id: svn://....@123456 <other data>" |
2871 svn_line = [i for i in info['body'].splitlines() if 'git-svn-id:' in i] | 2907 svn_line = [i for i in info['body'].splitlines() if 'git-svn-id:' in i] |
2872 svn_revision = svn_line[0].split('@') | 2908 svn_revision = svn_line[0].split('@') |
2873 svn_revision = svn_revision[1].split(' ')[0] | 2909 svn_revision = svn_revision[1].split(' ')[0] |
2874 return DEPOT_DEPS_NAME[depot]['viewvc'] + svn_revision | 2910 return DEPOT_DEPS_NAME[depot]['viewvc'] + svn_revision |
(...skipping 11 matching lines...) Expand all Loading... |
2886 else: | 2922 else: |
2887 commit_info = ('\nFailed to parse svn revision from body:\n%s' % | 2923 commit_info = ('\nFailed to parse svn revision from body:\n%s' % |
2888 info['body']) | 2924 info['body']) |
2889 print RESULTS_REVISION_INFO % { | 2925 print RESULTS_REVISION_INFO % { |
2890 'subject': info['subject'], | 2926 'subject': info['subject'], |
2891 'author': info['author'], | 2927 'author': info['author'], |
2892 'email_info': email_info, | 2928 'email_info': email_info, |
2893 'commit_info': commit_info, | 2929 'commit_info': commit_info, |
2894 'cl': cl, | 2930 'cl': cl, |
2895 'cl_date': info['date'] | 2931 'cl_date': info['date'] |
2896 } | 2932 } |
2897 | |
2898 def _PrintTableRow(self, column_widths, row_data): | |
2899 assert len(column_widths) == len(row_data) | |
2900 | |
2901 text = '' | |
2902 for i in xrange(len(column_widths)): | |
2903 current_row_data = row_data[i].center(column_widths[i], ' ') | |
2904 text += ('%%%ds' % column_widths[i]) % current_row_data | |
2905 print text | |
2906 | 2933 |
2907 def _PrintTestedCommitsHeader(self): | 2934 def _PrintTestedCommitsHeader(self): |
2908 if self.opts.bisect_mode == BISECT_MODE_MEAN: | 2935 if self.opts.bisect_mode == BISECT_MODE_MEAN: |
2909 self._PrintTableRow( | 2936 _PrintTableRow( |
2910 [20, 70, 14, 12, 13], | 2937 [20, 70, 14, 12, 13], |
2911 ['Depot', 'Commit SHA', 'Mean', 'Std. Error', 'State']) | 2938 ['Depot', 'Commit SHA', 'Mean', 'Std. Error', 'State']) |
2912 elif self.opts.bisect_mode == BISECT_MODE_STD_DEV: | 2939 elif self.opts.bisect_mode == BISECT_MODE_STD_DEV: |
2913 self._PrintTableRow( | 2940 _PrintTableRow( |
2914 [20, 70, 14, 12, 13], | 2941 [20, 70, 14, 12, 13], |
2915 ['Depot', 'Commit SHA', 'Std. Error', 'Mean', 'State']) | 2942 ['Depot', 'Commit SHA', 'Std. Error', 'Mean', 'State']) |
2916 elif self.opts.bisect_mode == BISECT_MODE_RETURN_CODE: | 2943 elif self.opts.bisect_mode == BISECT_MODE_RETURN_CODE: |
2917 self._PrintTableRow( | 2944 _PrintTableRow( |
2918 [20, 70, 14, 13], | 2945 [20, 70, 14, 13], |
2919 ['Depot', 'Commit SHA', 'Return Code', 'State']) | 2946 ['Depot', 'Commit SHA', 'Return Code', 'State']) |
2920 else: | 2947 else: |
2921 assert False, "Invalid bisect_mode specified." | 2948 assert False, 'Invalid bisect_mode specified.' |
2922 print ' %20s %70s %14s %13s' % ('Depot'.center(20, ' '), | |
2923 'Commit SHA'.center(70, ' '), 'Return Code'.center(14, ' '), | |
2924 'State'.center(13, ' ')) | |
2925 | 2949 |
2926 def _PrintTestedCommitsEntry(self, current_data, cl_link, state_str): | 2950 def _PrintTestedCommitsEntry(self, current_data, cl_link, state_str): |
2927 if self.opts.bisect_mode == BISECT_MODE_MEAN: | 2951 if self.opts.bisect_mode == BISECT_MODE_MEAN: |
2928 std_error = '+-%.02f' % current_data['value']['std_err'] | 2952 std_error = '+-%.02f' % current_data['value']['std_err'] |
2929 mean = '%.02f' % current_data['value']['mean'] | 2953 mean = '%.02f' % current_data['value']['mean'] |
2930 self._PrintTableRow( | 2954 _PrintTableRow( |
2931 [20, 70, 12, 14, 13], | 2955 [20, 70, 12, 14, 13], |
2932 [current_data['depot'], cl_link, mean, std_error, state_str]) | 2956 [current_data['depot'], cl_link, mean, std_error, state_str]) |
2933 elif self.opts.bisect_mode == BISECT_MODE_STD_DEV: | 2957 elif self.opts.bisect_mode == BISECT_MODE_STD_DEV: |
2934 std_error = '+-%.02f' % current_data['value']['std_err'] | 2958 std_error = '+-%.02f' % current_data['value']['std_err'] |
2935 mean = '%.02f' % current_data['value']['mean'] | 2959 mean = '%.02f' % current_data['value']['mean'] |
2936 self._PrintTableRow( | 2960 _PrintTableRow( |
2937 [20, 70, 12, 14, 13], | 2961 [20, 70, 12, 14, 13], |
2938 [current_data['depot'], cl_link, std_error, mean, state_str]) | 2962 [current_data['depot'], cl_link, std_error, mean, state_str]) |
2939 elif self.opts.bisect_mode == BISECT_MODE_RETURN_CODE: | 2963 elif self.opts.bisect_mode == BISECT_MODE_RETURN_CODE: |
2940 mean = '%d' % current_data['value']['mean'] | 2964 mean = '%d' % current_data['value']['mean'] |
2941 self._PrintTableRow( | 2965 _PrintTableRow( |
2942 [20, 70, 14, 13], | 2966 [20, 70, 14, 13], |
2943 [current_data['depot'], cl_link, mean, state_str]) | 2967 [current_data['depot'], cl_link, mean, state_str]) |
2944 | 2968 |
2945 def _PrintTestedCommitsTable(self, revision_data_sorted, | 2969 def _PrintTestedCommitsTable( |
2946 first_working_revision, last_broken_revision, confidence, | 2970 self, revision_data_sorted, first_working_revision, last_broken_revision, |
2947 final_step=True): | 2971 confidence, final_step=True): |
2948 print | 2972 print |
2949 if final_step: | 2973 if final_step: |
2950 print '===== TESTED COMMITS =====' | 2974 print '===== TESTED COMMITS =====' |
2951 else: | 2975 else: |
2952 print '===== PARTIAL RESULTS =====' | 2976 print '===== PARTIAL RESULTS =====' |
2953 self._PrintTestedCommitsHeader() | 2977 self._PrintTestedCommitsHeader() |
2954 state = 0 | 2978 state = 0 |
2955 for current_id, current_data in revision_data_sorted: | 2979 for current_id, current_data in revision_data_sorted: |
2956 if current_data['value']: | 2980 if current_data['value']: |
2957 if (current_id == last_broken_revision or | 2981 if (current_id == last_broken_revision or |
(...skipping 18 matching lines...) Expand all Loading... |
2976 state_str = '' | 3000 state_str = '' |
2977 state_str = state_str.center(13, ' ') | 3001 state_str = state_str.center(13, ' ') |
2978 | 3002 |
2979 cl_link = self._GetViewVCLinkFromDepotAndHash(current_id, | 3003 cl_link = self._GetViewVCLinkFromDepotAndHash(current_id, |
2980 current_data['depot']) | 3004 current_data['depot']) |
2981 if not cl_link: | 3005 if not cl_link: |
2982 cl_link = current_id | 3006 cl_link = current_id |
2983 self._PrintTestedCommitsEntry(current_data, cl_link, state_str) | 3007 self._PrintTestedCommitsEntry(current_data, cl_link, state_str) |
2984 | 3008 |
2985 def _PrintReproSteps(self): | 3009 def _PrintReproSteps(self): |
| 3010 """Prints out a section of the results explaining how to run the test. |
| 3011 |
| 3012 This message includes the command used to run the test. |
| 3013 """ |
2986 command = '$ ' + self.opts.command | 3014 command = '$ ' + self.opts.command |
2987 if bisect_utils.IsTelemetryCommand(self.opts.command): | 3015 if bisect_utils.IsTelemetryCommand(self.opts.command): |
2988 command += ('\nAlso consider passing --profiler=list to see available ' | 3016 command += ('\nAlso consider passing --profiler=list to see available ' |
2989 'profilers.') | 3017 'profilers.') |
2990 print REPRO_STEPS_LOCAL % {'command': command} | 3018 print REPRO_STEPS_LOCAL % {'command': command} |
2991 print REPRO_STEPS_TRYJOB % {'command': command} | 3019 print REPRO_STEPS_TRYJOB % {'command': command} |
2992 | 3020 |
2993 def _PrintOtherRegressions(self, other_regressions, revision_data): | 3021 def _PrintOtherRegressions(self, other_regressions, revision_data): |
| 3022 """Prints a section of the results about other potential regressions.""" |
2994 print | 3023 print |
2995 print 'Other regressions may have occurred:' | 3024 print 'Other regressions may have occurred:' |
2996 print ' %8s %70s %10s' % ('Depot'.center(8, ' '), | 3025 print ' %8s %70s %10s' % ('Depot'.center(8, ' '), |
2997 'Range'.center(70, ' '), 'Confidence'.center(10, ' ')) | 3026 'Range'.center(70, ' '), 'Confidence'.center(10, ' ')) |
2998 for regression in other_regressions: | 3027 for regression in other_regressions: |
2999 current_id, previous_id, confidence = regression | 3028 current_id, previous_id, confidence = regression |
3000 current_data = revision_data[current_id] | 3029 current_data = revision_data[current_id] |
3001 previous_data = revision_data[previous_id] | 3030 previous_data = revision_data[previous_id] |
3002 | 3031 |
3003 current_link = self._GetViewVCLinkFromDepotAndHash(current_id, | 3032 current_link = self._GetViewVCLinkFromDepotAndHash(current_id, |
3004 current_data['depot']) | 3033 current_data['depot']) |
3005 previous_link = self._GetViewVCLinkFromDepotAndHash(previous_id, | 3034 previous_link = self._GetViewVCLinkFromDepotAndHash(previous_id, |
3006 previous_data['depot']) | 3035 previous_data['depot']) |
3007 | 3036 |
3008 # If we can't map it to a viewable URL, at least show the original hash. | 3037 # If we can't map it to a viewable URL, at least show the original hash. |
3009 if not current_link: | 3038 if not current_link: |
3010 current_link = current_id | 3039 current_link = current_id |
3011 if not previous_link: | 3040 if not previous_link: |
3012 previous_link = previous_id | 3041 previous_link = previous_id |
3013 | 3042 |
3014 print ' %8s %70s %s' % ( | 3043 print ' %8s %70s %s' % ( |
3015 current_data['depot'], current_link, | 3044 current_data['depot'], current_link, |
3016 ('%d%%' % confidence).center(10, ' ')) | 3045 ('%d%%' % confidence).center(10, ' ')) |
3017 print ' %8s %70s' % ( | 3046 print ' %8s %70s' % ( |
3018 previous_data['depot'], previous_link) | 3047 previous_data['depot'], previous_link) |
3019 print | 3048 print |
3020 | 3049 |
3021 def _PrintStepTime(self, revision_data_sorted): | |
3022 step_perf_time_avg = 0.0 | |
3023 step_build_time_avg = 0.0 | |
3024 step_count = 0.0 | |
3025 for _, current_data in revision_data_sorted: | |
3026 if current_data['value']: | |
3027 step_perf_time_avg += current_data['perf_time'] | |
3028 step_build_time_avg += current_data['build_time'] | |
3029 step_count += 1 | |
3030 if step_count: | |
3031 step_perf_time_avg = step_perf_time_avg / step_count | |
3032 step_build_time_avg = step_build_time_avg / step_count | |
3033 print | |
3034 print 'Average build time : %s' % datetime.timedelta( | |
3035 seconds=int(step_build_time_avg)) | |
3036 print 'Average test time : %s' % datetime.timedelta( | |
3037 seconds=int(step_perf_time_avg)) | |
3038 | |
3039 def _PrintWarnings(self): | |
3040 if not self.warnings: | |
3041 return | |
3042 print | |
3043 print 'WARNINGS:' | |
3044 for w in set(self.warnings): | |
3045 print ' ! %s' % w | |
3046 | |
3047 def _FindOtherRegressions(self, revision_data_sorted, bad_greater_than_good): | |
3048 other_regressions = [] | |
3049 previous_values = [] | |
3050 previous_id = None | |
3051 for current_id, current_data in revision_data_sorted: | |
3052 current_values = current_data['value'] | |
3053 if current_values: | |
3054 current_values = current_values['values'] | |
3055 if previous_values: | |
3056 confidence = ConfidenceScore(previous_values, [current_values]) | |
3057 mean_of_prev_runs = math_utils.Mean(sum(previous_values, [])) | |
3058 mean_of_current_runs = math_utils.Mean(current_values) | |
3059 | |
3060 # Check that the potential regression is in the same direction as | |
3061 # the overall regression. If the mean of the previous runs < the | |
3062 # mean of the current runs, this local regression is in same | |
3063 # direction. | |
3064 prev_less_than_current = mean_of_prev_runs < mean_of_current_runs | |
3065 is_same_direction = (prev_less_than_current if | |
3066 bad_greater_than_good else not prev_less_than_current) | |
3067 | |
3068 # Only report potential regressions with high confidence. | |
3069 if is_same_direction and confidence > 50: | |
3070 other_regressions.append([current_id, previous_id, confidence]) | |
3071 previous_values.append(current_values) | |
3072 previous_id = current_id | |
3073 return other_regressions | |
3074 | |
3075 | |
3076 def _GetResultsDict(self, revision_data, revision_data_sorted): | 3050 def _GetResultsDict(self, revision_data, revision_data_sorted): |
3077 # Find range where it possibly broke. | 3051 # Find range where it possibly broke. |
3078 first_working_revision = None | 3052 first_working_revision = None |
3079 first_working_revision_index = -1 | 3053 first_working_revision_index = -1 |
3080 last_broken_revision = None | 3054 last_broken_revision = None |
3081 last_broken_revision_index = -1 | 3055 last_broken_revision_index = -1 |
3082 | 3056 |
3083 for i in xrange(len(revision_data_sorted)): | 3057 for i in xrange(len(revision_data_sorted)): |
3084 k, v = revision_data_sorted[i] | 3058 k, v = revision_data_sorted[i] |
3085 if v['passed'] == 1: | 3059 if v['passed'] == 1: |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3129 cwd = os.getcwd() | 3103 cwd = os.getcwd() |
3130 self.ChangeToDepotWorkingDirectory( | 3104 self.ChangeToDepotWorkingDirectory( |
3131 revision_data[last_broken_revision]['depot']) | 3105 revision_data[last_broken_revision]['depot']) |
3132 | 3106 |
3133 if revision_data[last_broken_revision]['depot'] == 'cros': | 3107 if revision_data[last_broken_revision]['depot'] == 'cros': |
3134 # Want to get a list of all the commits and what depots they belong | 3108 # Want to get a list of all the commits and what depots they belong |
3135 # to so that we can grab info about each. | 3109 # to so that we can grab info about each. |
3136 cmd = ['repo', 'forall', '-c', | 3110 cmd = ['repo', 'forall', '-c', |
3137 'pwd ; git log --pretty=oneline --before=%d --after=%d' % ( | 3111 'pwd ; git log --pretty=oneline --before=%d --after=%d' % ( |
3138 last_broken_revision, first_working_revision + 1)] | 3112 last_broken_revision, first_working_revision + 1)] |
3139 (output, return_code) = bisect_utils.RunProcessAndRetrieveOutput(cmd) | 3113 output, return_code = bisect_utils.RunProcessAndRetrieveOutput(cmd) |
3140 | 3114 |
3141 changes = [] | 3115 changes = [] |
3142 assert not return_code, 'An error occurred while running'\ | 3116 assert not return_code, ('An error occurred while running ' |
3143 ' "%s"' % ' '.join(cmd) | 3117 '"%s"' % ' '.join(cmd)) |
3144 last_depot = None | 3118 last_depot = None |
3145 cwd = os.getcwd() | 3119 cwd = os.getcwd() |
3146 for l in output.split('\n'): | 3120 for l in output.split('\n'): |
3147 if l: | 3121 if l: |
3148 # Output will be in form: | 3122 # Output will be in form: |
3149 # /path_to_depot | 3123 # /path_to_depot |
3150 # /path_to_other_depot | 3124 # /path_to_other_depot |
3151 # <SHA1> | 3125 # <SHA1> |
3152 # /path_again | 3126 # /path_again |
3153 # <SHA1> | 3127 # <SHA1> |
(...skipping 11 matching lines...) Expand all Loading... |
3165 else: | 3139 else: |
3166 for i in xrange(last_broken_revision_index, len(revision_data_sorted)): | 3140 for i in xrange(last_broken_revision_index, len(revision_data_sorted)): |
3167 k, v = revision_data_sorted[i] | 3141 k, v = revision_data_sorted[i] |
3168 if k == first_working_revision: | 3142 if k == first_working_revision: |
3169 break | 3143 break |
3170 self.ChangeToDepotWorkingDirectory(v['depot']) | 3144 self.ChangeToDepotWorkingDirectory(v['depot']) |
3171 info = self.source_control.QueryRevisionInfo(k) | 3145 info = self.source_control.QueryRevisionInfo(k) |
3172 culprit_revisions.append((k, info, v['depot'])) | 3146 culprit_revisions.append((k, info, v['depot'])) |
3173 os.chdir(cwd) | 3147 os.chdir(cwd) |
3174 | 3148 |
3175 # Check for any other possible regression ranges | 3149 # Check for any other possible regression ranges. |
3176 other_regressions = self._FindOtherRegressions(revision_data_sorted, | 3150 other_regressions = _FindOtherRegressions( |
3177 mean_of_bad_runs > mean_of_good_runs) | 3151 revision_data_sorted, mean_of_bad_runs > mean_of_good_runs) |
3178 | 3152 |
3179 return { | 3153 return { |
3180 'first_working_revision': first_working_revision, | 3154 'first_working_revision': first_working_revision, |
3181 'last_broken_revision': last_broken_revision, | 3155 'last_broken_revision': last_broken_revision, |
3182 'culprit_revisions': culprit_revisions, | 3156 'culprit_revisions': culprit_revisions, |
3183 'other_regressions': other_regressions, | 3157 'other_regressions': other_regressions, |
3184 'regression_size': regression_size, | 3158 'regression_size': regression_size, |
3185 'regression_std_err': regression_std_err, | 3159 'regression_std_err': regression_std_err, |
3186 'confidence': confidence, | 3160 'confidence': confidence, |
3187 } | 3161 } |
3188 | 3162 |
3189 def _CheckForWarnings(self, results_dict): | 3163 def _CheckForWarnings(self, results_dict): |
3190 if len(results_dict['culprit_revisions']) > 1: | 3164 if len(results_dict['culprit_revisions']) > 1: |
3191 self.warnings.append('Due to build errors, regression range could ' | 3165 self.warnings.append('Due to build errors, regression range could ' |
3192 'not be narrowed down to a single commit.') | 3166 'not be narrowed down to a single commit.') |
3193 if self.opts.repeat_test_count == 1: | 3167 if self.opts.repeat_test_count == 1: |
3194 self.warnings.append('Tests were only set to run once. This may ' | 3168 self.warnings.append('Tests were only set to run once. This may ' |
3195 'be insufficient to get meaningful results.') | 3169 'be insufficient to get meaningful results.') |
3196 if results_dict['confidence'] < 100: | 3170 if results_dict['confidence'] < 100: |
3197 if results_dict['confidence']: | 3171 if results_dict['confidence']: |
3198 self.warnings.append( | 3172 self.warnings.append( |
3199 'Confidence is less than 100%. There could be other candidates ' | 3173 'Confidence is less than 100%. There could be other candidates ' |
3200 'for this regression. Try bisecting again with increased ' | 3174 'for this regression. Try bisecting again with increased ' |
3201 'repeat_count or on a sub-metric that shows the regression more ' | 3175 'repeat_count or on a sub-metric that shows the regression more ' |
3202 'clearly.') | 3176 'clearly.') |
3203 else: | 3177 else: |
3204 self.warnings.append( | 3178 self.warnings.append( |
3205 'Confidence is 0%. Try bisecting again on another platform, with ' | 3179 'Confidence is 0%. Try bisecting again on another platform, with ' |
3206 'increased repeat_count or on a sub-metric that shows the ' | 3180 'increased repeat_count or on a sub-metric that shows the ' |
3207 'regression more clearly.') | 3181 'regression more clearly.') |
3208 | 3182 |
3209 def FormatAndPrintResults(self, bisect_results): | 3183 def FormatAndPrintResults(self, bisect_results): |
3210 """Prints the results from a bisection run in a readable format. | 3184 """Prints the results from a bisection run in a readable format. |
3211 | 3185 |
3212 Args | 3186 Args: |
3213 bisect_results: The results from a bisection test run. | 3187 bisect_results: The results from a bisection test run. |
3214 """ | 3188 """ |
3215 revision_data = bisect_results['revision_data'] | 3189 revision_data = bisect_results['revision_data'] |
3216 revision_data_sorted = sorted(revision_data.iteritems(), | 3190 revision_data_sorted = sorted(revision_data.iteritems(), |
3217 key = lambda x: x[1]['sort']) | 3191 key = lambda x: x[1]['sort']) |
3218 results_dict = self._GetResultsDict(revision_data, revision_data_sorted) | 3192 results_dict = self._GetResultsDict(revision_data, revision_data_sorted) |
3219 | 3193 |
3220 self._CheckForWarnings(results_dict) | 3194 self._CheckForWarnings(results_dict) |
3221 | 3195 |
3222 if self.opts.output_buildbot_annotations: | 3196 if self.opts.output_buildbot_annotations: |
(...skipping 27 matching lines...) Expand all Loading... |
3250 for culprit in results_dict['culprit_revisions']: | 3224 for culprit in results_dict['culprit_revisions']: |
3251 cl, info, depot = culprit | 3225 cl, info, depot = culprit |
3252 self._PrintRevisionInfo(cl, info, depot) | 3226 self._PrintRevisionInfo(cl, info, depot) |
3253 if results_dict['other_regressions']: | 3227 if results_dict['other_regressions']: |
3254 self._PrintOtherRegressions(results_dict['other_regressions'], | 3228 self._PrintOtherRegressions(results_dict['other_regressions'], |
3255 revision_data) | 3229 revision_data) |
3256 self._PrintTestedCommitsTable(revision_data_sorted, | 3230 self._PrintTestedCommitsTable(revision_data_sorted, |
3257 results_dict['first_working_revision'], | 3231 results_dict['first_working_revision'], |
3258 results_dict['last_broken_revision'], | 3232 results_dict['last_broken_revision'], |
3259 results_dict['confidence']) | 3233 results_dict['confidence']) |
3260 self._PrintStepTime(revision_data_sorted) | 3234 _PrintStepTime(revision_data_sorted) |
3261 self._PrintReproSteps() | 3235 self._PrintReproSteps() |
3262 self._PrintThankYou() | 3236 self._PrintThankYou() |
3263 if self.opts.output_buildbot_annotations: | 3237 if self.opts.output_buildbot_annotations: |
3264 bisect_utils.OutputAnnotationStepClosed() | 3238 bisect_utils.OutputAnnotationStepClosed() |
3265 | 3239 |
| 3240 def _PrintBanner(self, results_dict): |
| 3241 if self._IsBisectModeReturnCode(): |
| 3242 metrics = 'N/A' |
| 3243 change = 'Yes' |
| 3244 else: |
| 3245 metrics = '/'.join(self.opts.metric) |
| 3246 change = '%.02f%% (+/-%.02f%%)' % ( |
| 3247 results_dict['regression_size'], results_dict['regression_std_err']) |
3266 | 3248 |
3267 def IsPlatformSupported(opts): | 3249 if results_dict['culprit_revisions'] and results_dict['confidence']: |
| 3250 status = self._ConfidenceLevelStatus(results_dict) |
| 3251 else: |
| 3252 status = 'Failure, could not reproduce.' |
| 3253 change = 'Bisect could not reproduce a change.' |
| 3254 |
| 3255 print RESULTS_BANNER % { |
| 3256 'status': status, |
| 3257 'command': self.opts.command, |
| 3258 'metrics': metrics, |
| 3259 'change': change, |
| 3260 'confidence': results_dict['confidence'], |
| 3261 } |
| 3262 |
| 3263 def _PrintWarnings(self): |
| 3264 """Prints a list of warning strings if there are any.""" |
| 3265 if not self.warnings: |
| 3266 return |
| 3267 print |
| 3268 print 'WARNINGS:' |
| 3269 for w in set(self.warnings): |
| 3270 print ' ! %s' % w |
| 3271 |
| 3272 |
| 3273 def _IsPlatformSupported(): |
3268 """Checks that this platform and build system are supported. | 3274 """Checks that this platform and build system are supported. |
3269 | 3275 |
3270 Args: | 3276 Args: |
3271 opts: The options parsed from the command line. | 3277 opts: The options parsed from the command line. |
3272 | 3278 |
3273 Returns: | 3279 Returns: |
3274 True if the platform and build system are supported. | 3280 True if the platform and build system are supported. |
3275 """ | 3281 """ |
3276 # Haven't tested the script out on any other platforms yet. | 3282 # Haven't tested the script out on any other platforms yet. |
3277 supported = ['posix', 'nt'] | 3283 supported = ['posix', 'nt'] |
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3335 self.debug_ignore_build = None | 3341 self.debug_ignore_build = None |
3336 self.debug_ignore_sync = None | 3342 self.debug_ignore_sync = None |
3337 self.debug_ignore_perf_test = None | 3343 self.debug_ignore_perf_test = None |
3338 self.gs_bucket = None | 3344 self.gs_bucket = None |
3339 self.target_arch = 'ia32' | 3345 self.target_arch = 'ia32' |
3340 self.target_build_type = 'Release' | 3346 self.target_build_type = 'Release' |
3341 self.builder_host = None | 3347 self.builder_host = None |
3342 self.builder_port = None | 3348 self.builder_port = None |
3343 self.bisect_mode = BISECT_MODE_MEAN | 3349 self.bisect_mode = BISECT_MODE_MEAN |
3344 | 3350 |
3345 def _CreateCommandLineParser(self): | 3351 @staticmethod |
| 3352 def _CreateCommandLineParser(): |
3346 """Creates a parser with bisect options. | 3353 """Creates a parser with bisect options. |
3347 | 3354 |
3348 Returns: | 3355 Returns: |
3349 An instance of optparse.OptionParser. | 3356 An instance of optparse.OptionParser. |
3350 """ | 3357 """ |
3351 usage = ('%prog [options] [-- chromium-options]\n' | 3358 usage = ('%prog [options] [-- chromium-options]\n' |
3352 'Perform binary search on revision history to find a minimal ' | 3359 'Perform binary search on revision history to find a minimal ' |
3353 'range of revisions where a peformance metric regressed.\n') | 3360 'range of revisions where a peformance metric regressed.\n') |
3354 | 3361 |
3355 parser = optparse.OptionParser(usage=usage) | 3362 parser = optparse.OptionParser(usage=usage) |
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3422 group.add_option('--target_platform', | 3429 group.add_option('--target_platform', |
3423 type='choice', | 3430 type='choice', |
3424 choices=['chromium', 'cros', 'android', 'android-chrome'], | 3431 choices=['chromium', 'cros', 'android', 'android-chrome'], |
3425 default='chromium', | 3432 default='chromium', |
3426 help='The target platform. Choices are "chromium" ' | 3433 help='The target platform. Choices are "chromium" ' |
3427 '(current platform), "cros", or "android". If you ' | 3434 '(current platform), "cros", or "android". If you ' |
3428 'specify something other than "chromium", you must be ' | 3435 'specify something other than "chromium", you must be ' |
3429 'properly set up to build that platform.') | 3436 'properly set up to build that platform.') |
3430 group.add_option('--no_custom_deps', | 3437 group.add_option('--no_custom_deps', |
3431 dest='no_custom_deps', | 3438 dest='no_custom_deps', |
3432 action="store_true", | 3439 action='store_true', |
3433 default=False, | 3440 default=False, |
3434 help='Run the script with custom_deps or not.') | 3441 help='Run the script with custom_deps or not.') |
3435 group.add_option('--extra_src', | 3442 group.add_option('--extra_src', |
3436 type='str', | 3443 type='str', |
3437 help='Path to a script which can be used to modify ' | 3444 help='Path to a script which can be used to modify ' |
3438 'the bisect script\'s behavior.') | 3445 'the bisect script\'s behavior.') |
3439 group.add_option('--cros_board', | 3446 group.add_option('--cros_board', |
3440 type='str', | 3447 type='str', |
3441 help='The cros board type to build.') | 3448 help='The cros board type to build.') |
3442 group.add_option('--cros_remote_ip', | 3449 group.add_option('--cros_remote_ip', |
3443 type='str', | 3450 type='str', |
3444 help='The remote machine to image to.') | 3451 help='The remote machine to image to.') |
3445 group.add_option('--use_goma', | 3452 group.add_option('--use_goma', |
3446 action="store_true", | 3453 action='store_true', |
3447 help='Add a bunch of extra threads for goma, and enable ' | 3454 help='Add a bunch of extra threads for goma, and enable ' |
3448 'goma') | 3455 'goma') |
3449 group.add_option('--goma_dir', | 3456 group.add_option('--goma_dir', |
3450 help='Path to goma tools (or system default if not ' | 3457 help='Path to goma tools (or system default if not ' |
3451 'specified).') | 3458 'specified).') |
3452 group.add_option('--output_buildbot_annotations', | 3459 group.add_option('--output_buildbot_annotations', |
3453 action="store_true", | 3460 action='store_true', |
3454 help='Add extra annotation output for buildbot.') | 3461 help='Add extra annotation output for buildbot.') |
3455 group.add_option('--gs_bucket', | 3462 group.add_option('--gs_bucket', |
3456 default='', | 3463 default='', |
3457 dest='gs_bucket', | 3464 dest='gs_bucket', |
3458 type='str', | 3465 type='str', |
3459 help=('Name of Google Storage bucket to upload or ' | 3466 help=('Name of Google Storage bucket to upload or ' |
3460 'download build. e.g., chrome-perf')) | 3467 'download build. e.g., chrome-perf')) |
3461 group.add_option('--target_arch', | 3468 group.add_option('--target_arch', |
3462 type='choice', | 3469 type='choice', |
3463 choices=['ia32', 'x64', 'arm'], | 3470 choices=['ia32', 'x64', 'arm'], |
(...skipping 14 matching lines...) Expand all Loading... |
3478 ' try job request.')) | 3485 ' try job request.')) |
3479 group.add_option('--builder_port', | 3486 group.add_option('--builder_port', |
3480 dest='builder_port', | 3487 dest='builder_port', |
3481 type='int', | 3488 type='int', |
3482 help=('HTTP port of the server to produce build by posting' | 3489 help=('HTTP port of the server to produce build by posting' |
3483 ' try job request.')) | 3490 ' try job request.')) |
3484 parser.add_option_group(group) | 3491 parser.add_option_group(group) |
3485 | 3492 |
3486 group = optparse.OptionGroup(parser, 'Debug options') | 3493 group = optparse.OptionGroup(parser, 'Debug options') |
3487 group.add_option('--debug_ignore_build', | 3494 group.add_option('--debug_ignore_build', |
3488 action="store_true", | 3495 action='store_true', |
3489 help='DEBUG: Don\'t perform builds.') | 3496 help='DEBUG: Don\'t perform builds.') |
3490 group.add_option('--debug_ignore_sync', | 3497 group.add_option('--debug_ignore_sync', |
3491 action="store_true", | 3498 action='store_true', |
3492 help='DEBUG: Don\'t perform syncs.') | 3499 help='DEBUG: Don\'t perform syncs.') |
3493 group.add_option('--debug_ignore_perf_test', | 3500 group.add_option('--debug_ignore_perf_test', |
3494 action="store_true", | 3501 action='store_true', |
3495 help='DEBUG: Don\'t perform performance tests.') | 3502 help='DEBUG: Don\'t perform performance tests.') |
3496 parser.add_option_group(group) | 3503 parser.add_option_group(group) |
3497 return parser | 3504 return parser |
3498 | 3505 |
3499 def ParseCommandLine(self): | 3506 def ParseCommandLine(self): |
3500 """Parses the command line for bisect options.""" | 3507 """Parses the command line for bisect options.""" |
3501 parser = self._CreateCommandLineParser() | 3508 parser = self._CreateCommandLineParser() |
3502 (opts, _) = parser.parse_args() | 3509 opts, _ = parser.parse_args() |
3503 | 3510 |
3504 try: | 3511 try: |
3505 if not opts.command: | 3512 if not opts.command: |
3506 raise RuntimeError('missing required parameter: --command') | 3513 raise RuntimeError('missing required parameter: --command') |
3507 | 3514 |
3508 if not opts.good_revision: | 3515 if not opts.good_revision: |
3509 raise RuntimeError('missing required parameter: --good_revision') | 3516 raise RuntimeError('missing required parameter: --good_revision') |
3510 | 3517 |
3511 if not opts.bad_revision: | 3518 if not opts.bad_revision: |
3512 raise RuntimeError('missing required parameter: --bad_revision') | 3519 raise RuntimeError('missing required parameter: --bad_revision') |
(...skipping 21 matching lines...) Expand all Loading... |
3534 | 3541 |
3535 if not opts.cros_remote_ip: | 3542 if not opts.cros_remote_ip: |
3536 raise RuntimeError('missing required parameter: --cros_remote_ip') | 3543 raise RuntimeError('missing required parameter: --cros_remote_ip') |
3537 | 3544 |
3538 if not opts.working_directory: | 3545 if not opts.working_directory: |
3539 raise RuntimeError('missing required parameter: --working_directory') | 3546 raise RuntimeError('missing required parameter: --working_directory') |
3540 | 3547 |
3541 metric_values = opts.metric.split('/') | 3548 metric_values = opts.metric.split('/') |
3542 if (len(metric_values) != 2 and | 3549 if (len(metric_values) != 2 and |
3543 opts.bisect_mode != BISECT_MODE_RETURN_CODE): | 3550 opts.bisect_mode != BISECT_MODE_RETURN_CODE): |
3544 raise RuntimeError("Invalid metric specified: [%s]" % opts.metric) | 3551 raise RuntimeError('Invalid metric specified: [%s]' % opts.metric) |
3545 | 3552 |
3546 opts.metric = metric_values | 3553 opts.metric = metric_values |
3547 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100) | 3554 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100) |
3548 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60) | 3555 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60) |
3549 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25) | 3556 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25) |
3550 opts.truncate_percent = opts.truncate_percent / 100.0 | 3557 opts.truncate_percent = opts.truncate_percent / 100.0 |
3551 | 3558 |
3552 for k, v in opts.__dict__.iteritems(): | 3559 for k, v in opts.__dict__.iteritems(): |
3553 assert hasattr(self, k), "Invalid %s attribute in BisectOptions." % k | 3560 assert hasattr(self, k), 'Invalid %s attribute in BisectOptions.' % k |
3554 setattr(self, k, v) | 3561 setattr(self, k, v) |
3555 except RuntimeError, e: | 3562 except RuntimeError, e: |
3556 output_string = StringIO.StringIO() | 3563 output_string = StringIO.StringIO() |
3557 parser.print_help(file=output_string) | 3564 parser.print_help(file=output_string) |
3558 error_message = '%s\n\n%s' % (e.message, output_string.getvalue()) | 3565 error_message = '%s\n\n%s' % (e.message, output_string.getvalue()) |
3559 output_string.close() | 3566 output_string.close() |
3560 raise RuntimeError(error_message) | 3567 raise RuntimeError(error_message) |
3561 | 3568 |
3562 @staticmethod | 3569 @staticmethod |
3563 def FromDict(values): | 3570 def FromDict(values): |
3564 """Creates an instance of BisectOptions with the values parsed from a | 3571 """Creates an instance of BisectOptions with the values parsed from a |
3565 .cfg file. | 3572 .cfg file. |
3566 | 3573 |
3567 Args: | 3574 Args: |
3568 values: a dict containing options to set. | 3575 values: a dict containing options to set. |
3569 | 3576 |
3570 Returns: | 3577 Returns: |
3571 An instance of BisectOptions. | 3578 An instance of BisectOptions. |
3572 """ | 3579 """ |
3573 opts = BisectOptions() | 3580 opts = BisectOptions() |
3574 for k, v in values.iteritems(): | 3581 for k, v in values.iteritems(): |
3575 assert hasattr(opts, k), 'Invalid %s attribute in '\ | 3582 assert hasattr(opts, k), 'Invalid %s attribute in BisectOptions.' % k |
3576 'BisectOptions.' % k | |
3577 setattr(opts, k, v) | 3583 setattr(opts, k, v) |
3578 | 3584 |
3579 metric_values = opts.metric.split('/') | 3585 metric_values = opts.metric.split('/') |
3580 if len(metric_values) != 2: | 3586 if len(metric_values) != 2: |
3581 raise RuntimeError("Invalid metric specified: [%s]" % opts.metric) | 3587 raise RuntimeError('Invalid metric specified: [%s]' % opts.metric) |
3582 | 3588 |
3583 opts.metric = metric_values | 3589 opts.metric = metric_values |
3584 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100) | 3590 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100) |
3585 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60) | 3591 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60) |
3586 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25) | 3592 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25) |
3587 opts.truncate_percent = opts.truncate_percent / 100.0 | 3593 opts.truncate_percent = opts.truncate_percent / 100.0 |
3588 | 3594 |
3589 return opts | 3595 return opts |
3590 | 3596 |
3591 | 3597 |
3592 def main(): | 3598 def main(): |
3593 | 3599 |
3594 try: | 3600 try: |
3595 opts = BisectOptions() | 3601 opts = BisectOptions() |
3596 opts.ParseCommandLine() | 3602 opts.ParseCommandLine() |
3597 | 3603 |
3598 if opts.extra_src: | 3604 if opts.extra_src: |
3599 extra_src = bisect_utils.LoadExtraSrc(opts.extra_src) | 3605 extra_src = bisect_utils.LoadExtraSrc(opts.extra_src) |
3600 if not extra_src: | 3606 if not extra_src: |
3601 raise RuntimeError("Invalid or missing --extra_src.") | 3607 raise RuntimeError('Invalid or missing --extra_src.') |
3602 _AddAdditionalDepotInfo(extra_src.GetAdditionalDepotInfo()) | 3608 _AddAdditionalDepotInfo(extra_src.GetAdditionalDepotInfo()) |
3603 | 3609 |
3604 if opts.working_directory: | 3610 if opts.working_directory: |
3605 custom_deps = bisect_utils.DEFAULT_GCLIENT_CUSTOM_DEPS | 3611 custom_deps = bisect_utils.DEFAULT_GCLIENT_CUSTOM_DEPS |
3606 if opts.no_custom_deps: | 3612 if opts.no_custom_deps: |
3607 custom_deps = None | 3613 custom_deps = None |
3608 bisect_utils.CreateBisectDirectoryAndSetupDepot(opts, custom_deps) | 3614 bisect_utils.CreateBisectDirectoryAndSetupDepot(opts, custom_deps) |
3609 | 3615 |
3610 os.chdir(os.path.join(os.getcwd(), 'src')) | 3616 os.chdir(os.path.join(os.getcwd(), 'src')) |
3611 | 3617 |
3612 if not RemoveBuildFiles(opts.target_build_type): | 3618 if not RemoveBuildFiles(opts.target_build_type): |
3613 raise RuntimeError('Something went wrong removing the build files.') | 3619 raise RuntimeError('Something went wrong removing the build files.') |
3614 | 3620 |
3615 if not IsPlatformSupported(opts): | 3621 if not _IsPlatformSupported(): |
3616 raise RuntimeError("Sorry, this platform isn't supported yet.") | 3622 raise RuntimeError('Sorry, this platform isn\'t supported yet.') |
3617 | 3623 |
3618 # Check what source control method is being used, and create a | 3624 # Check what source control method is being used, and create a |
3619 # SourceControl object if possible. | 3625 # SourceControl object if possible. |
3620 source_control = source_control_module.DetermineAndCreateSourceControl(opts) | 3626 source_control = source_control_module.DetermineAndCreateSourceControl(opts) |
3621 | 3627 |
3622 if not source_control: | 3628 if not source_control: |
3623 raise RuntimeError("Sorry, only the git workflow is supported at the " | 3629 raise RuntimeError( |
3624 "moment.") | 3630 'Sorry, only the git workflow is supported at the moment.') |
3625 | 3631 |
3626 # gClient sync seems to fail if you're not in master branch. | 3632 # gClient sync seems to fail if you're not in master branch. |
3627 if (not source_control.IsInProperBranch() and | 3633 if (not source_control.IsInProperBranch() and |
3628 not opts.debug_ignore_sync and | 3634 not opts.debug_ignore_sync and |
3629 not opts.working_directory): | 3635 not opts.working_directory): |
3630 raise RuntimeError("You must switch to master branch to run bisection.") | 3636 raise RuntimeError('You must switch to master branch to run bisection.') |
3631 bisect_test = BisectPerformanceMetrics(source_control, opts) | 3637 bisect_test = BisectPerformanceMetrics(source_control, opts) |
3632 try: | 3638 try: |
3633 bisect_results = bisect_test.Run(opts.command, | 3639 bisect_results = bisect_test.Run(opts.command, |
3634 opts.bad_revision, | 3640 opts.bad_revision, |
3635 opts.good_revision, | 3641 opts.good_revision, |
3636 opts.metric) | 3642 opts.metric) |
3637 if bisect_results['error']: | 3643 if bisect_results['error']: |
3638 raise RuntimeError(bisect_results['error']) | 3644 raise RuntimeError(bisect_results['error']) |
3639 bisect_test.FormatAndPrintResults(bisect_results) | 3645 bisect_test.FormatAndPrintResults(bisect_results) |
3640 return 0 | 3646 return 0 |
3641 finally: | 3647 finally: |
3642 bisect_test.PerformCleanup() | 3648 bisect_test.PerformCleanup() |
3643 except RuntimeError, e: | 3649 except RuntimeError, e: |
3644 if opts.output_buildbot_annotations: | 3650 if opts.output_buildbot_annotations: |
3645 # The perf dashboard scrapes the "results" step in order to comment on | 3651 # The perf dashboard scrapes the "results" step in order to comment on |
3646 # bugs. If you change this, please update the perf dashboard as well. | 3652 # bugs. If you change this, please update the perf dashboard as well. |
3647 bisect_utils.OutputAnnotationStepStart('Results') | 3653 bisect_utils.OutputAnnotationStepStart('Results') |
3648 print 'Error: %s' % e.message | 3654 print 'Error: %s' % e.message |
3649 if opts.output_buildbot_annotations: | 3655 if opts.output_buildbot_annotations: |
3650 bisect_utils.OutputAnnotationStepClosed() | 3656 bisect_utils.OutputAnnotationStepClosed() |
3651 return 1 | 3657 return 1 |
3652 | 3658 |
3653 | 3659 |
3654 if __name__ == '__main__': | 3660 if __name__ == '__main__': |
3655 sys.exit(main()) | 3661 sys.exit(main()) |
OLD | NEW |