OLD | NEW |
| (Empty) |
1 #!/usr/bin/env python | |
2 # Copyright (c) 2013 The Chromium Authors. All rights reserved. | |
3 # Use of this source code is governed by a BSD-style license that can be | |
4 # found in the LICENSE file. | |
5 | |
6 """Performance Test Bisect Tool | |
7 | |
8 This script bisects a series of changelists using binary search. It starts at | |
9 a bad revision where a performance metric has regressed, and asks for a last | |
10 known-good revision. It will then binary search across this revision range by | |
11 syncing, building, and running a performance test. If the change is | |
12 suspected to occur as a result of WebKit/V8 changes, the script will | |
13 further bisect changes to those depots and attempt to narrow down the revision | |
14 range. | |
15 | |
16 Example usage using SVN revisions: | |
17 | |
18 ./tools/bisect-perf-regression.py -c\ | |
19 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\ | |
20 -g 168222 -b 168232 -m shutdown/simple-user-quit | |
21 | |
22 Be aware that if you're using the git workflow and specify an SVN revision, | |
23 the script will attempt to find the git SHA1 where SVN changes up to that | |
24 revision were merged in. | |
25 | |
26 Example usage using git hashes: | |
27 | |
28 ./tools/bisect-perf-regression.py -c\ | |
29 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\ | |
30 -g 1f6e67861535121c5c819c16a666f2436c207e7b\ | |
31 -b b732f23b4f81c382db0b23b9035f3dadc7d925bb\ | |
32 -m shutdown/simple-user-quit | |
33 """ | |
34 | |
35 import copy | |
36 import datetime | |
37 import errno | |
38 import hashlib | |
39 import math | |
40 import optparse | |
41 import os | |
42 import re | |
43 import shlex | |
44 import shutil | |
45 import StringIO | |
46 import sys | |
47 import time | |
48 import zipfile | |
49 | |
50 sys.path.append(os.path.join(os.path.dirname(__file__), 'telemetry')) | |
51 | |
52 from auto_bisect import bisect_utils | |
53 from auto_bisect import builder | |
54 from auto_bisect import math_utils | |
55 from auto_bisect import request_build | |
56 from auto_bisect import source_control as source_control_module | |
57 from auto_bisect import ttest | |
58 from telemetry.util import cloud_storage | |
59 | |
60 # Below is the map of "depot" names to information about each depot. Each depot | |
61 # is a repository, and in the process of bisecting, revision ranges in these | |
62 # repositories may also be bisected. | |
63 # | |
64 # Each depot information dictionary may contain: | |
65 # src: Path to the working directory. | |
66 # recurse: True if this repository will get bisected. | |
67 # depends: A list of other repositories that are actually part of the same | |
68 # repository in svn. If the repository has any dependent repositories | |
69 # (e.g. skia/src needs skia/include and skia/gyp to be updated), then | |
70 # they are specified here. | |
71 # svn: URL of SVN repository. Needed for git workflow to resolve hashes to | |
72 # SVN revisions. | |
73 # from: Parent depot that must be bisected before this is bisected. | |
74 # deps_var: Key name in vars variable in DEPS file that has revision | |
75 # information. | |
76 DEPOT_DEPS_NAME = { | |
77 'chromium': { | |
78 'src': 'src', | |
79 'recurse': True, | |
80 'depends': None, | |
81 'from': ['cros', 'android-chrome'], | |
82 'viewvc': | |
83 'http://src.chromium.org/viewvc/chrome?view=revision&revision=', | |
84 'deps_var': 'chromium_rev' | |
85 }, | |
86 'webkit': { | |
87 'src': 'src/third_party/WebKit', | |
88 'recurse': True, | |
89 'depends': None, | |
90 'from': ['chromium'], | |
91 'viewvc': | |
92 'http://src.chromium.org/viewvc/blink?view=revision&revision=', | |
93 'deps_var': 'webkit_revision' | |
94 }, | |
95 'angle': { | |
96 'src': 'src/third_party/angle', | |
97 'src_old': 'src/third_party/angle_dx11', | |
98 'recurse': True, | |
99 'depends': None, | |
100 'from': ['chromium'], | |
101 'platform': 'nt', | |
102 'deps_var': 'angle_revision' | |
103 }, | |
104 'v8': { | |
105 'src': 'src/v8', | |
106 'recurse': True, | |
107 'depends': None, | |
108 'from': ['chromium'], | |
109 'custom_deps': bisect_utils.GCLIENT_CUSTOM_DEPS_V8, | |
110 'viewvc': 'https://code.google.com/p/v8/source/detail?r=', | |
111 'deps_var': 'v8_revision' | |
112 }, | |
113 'v8_bleeding_edge': { | |
114 'src': 'src/v8_bleeding_edge', | |
115 'recurse': True, | |
116 'depends': None, | |
117 'svn': 'https://v8.googlecode.com/svn/branches/bleeding_edge', | |
118 'from': ['v8'], | |
119 'viewvc': 'https://code.google.com/p/v8/source/detail?r=', | |
120 'deps_var': 'v8_revision' | |
121 }, | |
122 'skia/src': { | |
123 'src': 'src/third_party/skia/src', | |
124 'recurse': True, | |
125 'svn': 'http://skia.googlecode.com/svn/trunk/src', | |
126 'depends': ['skia/include', 'skia/gyp'], | |
127 'from': ['chromium'], | |
128 'viewvc': 'https://code.google.com/p/skia/source/detail?r=', | |
129 'deps_var': 'skia_revision' | |
130 }, | |
131 'skia/include': { | |
132 'src': 'src/third_party/skia/include', | |
133 'recurse': False, | |
134 'svn': 'http://skia.googlecode.com/svn/trunk/include', | |
135 'depends': None, | |
136 'from': ['chromium'], | |
137 'viewvc': 'https://code.google.com/p/skia/source/detail?r=', | |
138 'deps_var': 'None' | |
139 }, | |
140 'skia/gyp': { | |
141 'src': 'src/third_party/skia/gyp', | |
142 'recurse': False, | |
143 'svn': 'http://skia.googlecode.com/svn/trunk/gyp', | |
144 'depends': None, | |
145 'from': ['chromium'], | |
146 'viewvc': 'https://code.google.com/p/skia/source/detail?r=', | |
147 'deps_var': 'None' | |
148 } | |
149 } | |
150 | |
151 DEPOT_NAMES = DEPOT_DEPS_NAME.keys() | |
152 | |
153 CROS_CHROMEOS_PATTERN = 'chromeos-base/chromeos-chrome' | |
154 | |
155 # Possible return values from BisectPerformanceMetrics.RunTest. | |
156 BUILD_RESULT_SUCCEED = 0 | |
157 BUILD_RESULT_FAIL = 1 | |
158 BUILD_RESULT_SKIPPED = 2 | |
159 | |
160 # Maximum time in seconds to wait after posting build request to the try server. | |
161 # TODO: Change these values based on the actual time taken by buildbots on | |
162 # the try server. | |
163 MAX_MAC_BUILD_TIME = 14400 | |
164 MAX_WIN_BUILD_TIME = 14400 | |
165 MAX_LINUX_BUILD_TIME = 14400 | |
166 | |
167 # The confidence percentage at which confidence can be consider "high". | |
168 HIGH_CONFIDENCE = 95 | |
169 | |
170 # Patch template to add a new file, DEPS.sha under src folder. | |
171 # This file contains SHA1 value of the DEPS changes made while bisecting | |
172 # dependency repositories. This patch send along with DEPS patch to try server. | |
173 # When a build requested is posted with a patch, bisect builders on try server, | |
174 # once build is produced, it reads SHA value from this file and appends it | |
175 # to build archive filename. | |
176 DEPS_SHA_PATCH = """diff --git src/DEPS.sha src/DEPS.sha | |
177 new file mode 100644 | |
178 --- /dev/null | |
179 +++ src/DEPS.sha | |
180 @@ -0,0 +1 @@ | |
181 +%(deps_sha)s | |
182 """ | |
183 | |
184 # The possible values of the --bisect_mode flag, which determines what to | |
185 # use when classifying a revision as "good" or "bad". | |
186 BISECT_MODE_MEAN = 'mean' | |
187 BISECT_MODE_STD_DEV = 'std_dev' | |
188 BISECT_MODE_RETURN_CODE = 'return_code' | |
189 | |
190 # The perf dashboard looks for a string like "Estimated Confidence: 95%" | |
191 # to decide whether or not to cc the author(s). If you change this, please | |
192 # update the perf dashboard as well. | |
193 RESULTS_BANNER = """ | |
194 ===== BISECT JOB RESULTS ===== | |
195 Status: %(status)s | |
196 | |
197 Test Command: %(command)s | |
198 Test Metric: %(metrics)s | |
199 Relative Change: %(change)s | |
200 Estimated Confidence: %(confidence).02f%%""" | |
201 | |
202 # The perf dashboard specifically looks for the string | |
203 # "Author : " to parse out who to cc on a bug. If you change the | |
204 # formatting here, please update the perf dashboard as well. | |
205 RESULTS_REVISION_INFO = """ | |
206 ===== SUSPECTED CL(s) ===== | |
207 Subject : %(subject)s | |
208 Author : %(author)s%(email_info)s%(commit_info)s | |
209 Commit : %(cl)s | |
210 Date : %(cl_date)s""" | |
211 | |
212 REPRO_STEPS_LOCAL = """ | |
213 ==== INSTRUCTIONS TO REPRODUCE ==== | |
214 To run locally: | |
215 - Use the test command given under 'BISECT JOB RESULTS' above. | |
216 - Consider using a profiler. Pass --profiler=list to list available profilers. | |
217 """ | |
218 | |
219 REPRO_STEPS_TRYJOB = """ | |
220 To reproduce on a performance try bot: | |
221 1. Edit run-perf-test.cfg | |
222 2. Upload your patch with: $ git cl upload --bypass-hooks | |
223 3. Send to the try server: $ git cl try -m tryserver.chromium.perf -b <bot> | |
224 | |
225 Notes: | |
226 a) Follow the in-file instructions in run-perf-test.cfg. | |
227 b) run-perf-test.cfg is under tools/ or under third_party/WebKit/Tools. | |
228 c) Do your edits preferably under a new git branch. | |
229 d) --browser=release and --browser=android-chromium-testshell are supported | |
230 depending on the platform (desktop|android). | |
231 e) Strip any src/ directories from the head of relative path names. | |
232 f) Make sure to use the appropriate bot on step 3. | |
233 | |
234 For more details please visit | |
235 https://sites.google.com/a/chromium.org/dev/developers/performance-try-bots""" | |
236 | |
237 REPRO_STEPS_TRYJOB_TELEMETRY = """ | |
238 To reproduce on a performance try bot: | |
239 %(command)s | |
240 (Where <bot-name> comes from tools/perf/run_benchmark --browser=list) | |
241 | |
242 For more details please visit | |
243 https://sites.google.com/a/chromium.org/dev/developers/performance-try-bots | |
244 """ | |
245 | |
246 RESULTS_THANKYOU = """ | |
247 ===== THANK YOU FOR CHOOSING BISECT AIRLINES ===== | |
248 Visit http://www.chromium.org/developers/core-principles for Chrome's policy | |
249 on perf regressions. | |
250 Contact chrome-perf-dashboard-team with any questions or suggestions about | |
251 bisecting. | |
252 . .-----. | |
253 . .---. \ \==) | |
254 . |PERF\ \ \\ | |
255 . | ---------'-------'-----------. | |
256 . . 0 0 0 0 0 0 0 0 0 0 0 0 0 0 |_`-. | |
257 . \_____________.-------._______________) | |
258 . / / | |
259 . / / | |
260 . / /==) | |
261 . ._____.""" | |
262 | |
263 | |
264 def _AddAdditionalDepotInfo(depot_info): | |
265 """Adds additional depot info to the global depot variables.""" | |
266 global DEPOT_DEPS_NAME | |
267 global DEPOT_NAMES | |
268 DEPOT_DEPS_NAME = dict(DEPOT_DEPS_NAME.items() + depot_info.items()) | |
269 DEPOT_NAMES = DEPOT_DEPS_NAME.keys() | |
270 | |
271 | |
272 def ConfidenceScore(good_results_lists, bad_results_lists): | |
273 """Calculates a confidence score. | |
274 | |
275 This score is a percentage which represents our degree of confidence in the | |
276 proposition that the good results and bad results are distinct groups, and | |
277 their differences aren't due to chance alone. | |
278 | |
279 | |
280 Args: | |
281 good_results_lists: A list of lists of "good" result numbers. | |
282 bad_results_lists: A list of lists of "bad" result numbers. | |
283 | |
284 Returns: | |
285 A number in the range [0, 100]. | |
286 """ | |
287 # If there's only one item in either list, this means only one revision was | |
288 # classified good or bad; this isn't good enough evidence to make a decision. | |
289 # If an empty list was passed, that also implies zero confidence. | |
290 if len(good_results_lists) <= 1 or len(bad_results_lists) <= 1: | |
291 return 0.0 | |
292 | |
293 # Flatten the lists of results lists. | |
294 sample1 = sum(good_results_lists, []) | |
295 sample2 = sum(bad_results_lists, []) | |
296 | |
297 # If there were only empty lists in either of the lists (this is unexpected | |
298 # and normally shouldn't happen), then we also want to return 0. | |
299 if not sample1 or not sample2: | |
300 return 0.0 | |
301 | |
302 # The p-value is approximately the probability of obtaining the given set | |
303 # of good and bad values just by chance. | |
304 _, _, p_value = ttest.WelchsTTest(sample1, sample2) | |
305 return 100.0 * (1.0 - p_value) | |
306 | |
307 | |
308 def GetSHA1HexDigest(contents): | |
309 """Returns SHA1 hex digest of the given string.""" | |
310 return hashlib.sha1(contents).hexdigest() | |
311 | |
312 | |
313 def GetZipFileName(build_revision=None, target_arch='ia32', patch_sha=None): | |
314 """Gets the archive file name for the given revision.""" | |
315 def PlatformName(): | |
316 """Return a string to be used in paths for the platform.""" | |
317 if bisect_utils.IsWindowsHost(): | |
318 # Build archive for x64 is still stored with the "win32" suffix. | |
319 # See chromium_utils.PlatformName(). | |
320 if bisect_utils.Is64BitWindows() and target_arch == 'x64': | |
321 return 'win32' | |
322 return 'win32' | |
323 if bisect_utils.IsLinuxHost(): | |
324 # Android builds are also archived with the "full-build-linux prefix. | |
325 return 'linux' | |
326 if bisect_utils.IsMacHost(): | |
327 return 'mac' | |
328 raise NotImplementedError('Unknown platform "%s".' % sys.platform) | |
329 | |
330 base_name = 'full-build-%s' % PlatformName() | |
331 if not build_revision: | |
332 return base_name | |
333 if patch_sha: | |
334 build_revision = '%s_%s' % (build_revision , patch_sha) | |
335 return '%s_%s.zip' % (base_name, build_revision) | |
336 | |
337 | |
338 def GetRemoteBuildPath(build_revision, target_platform='chromium', | |
339 target_arch='ia32', patch_sha=None): | |
340 """Returns the URL to download the build from.""" | |
341 def GetGSRootFolderName(target_platform): | |
342 """Returns the Google Cloud Storage root folder name.""" | |
343 if bisect_utils.IsWindowsHost(): | |
344 if bisect_utils.Is64BitWindows() and target_arch == 'x64': | |
345 return 'Win x64 Builder' | |
346 return 'Win Builder' | |
347 if bisect_utils.IsLinuxHost(): | |
348 if target_platform == 'android': | |
349 return 'android_perf_rel' | |
350 return 'Linux Builder' | |
351 if bisect_utils.IsMacHost(): | |
352 return 'Mac Builder' | |
353 raise NotImplementedError('Unsupported Platform "%s".' % sys.platform) | |
354 | |
355 base_filename = GetZipFileName( | |
356 build_revision, target_arch, patch_sha) | |
357 builder_folder = GetGSRootFolderName(target_platform) | |
358 return '%s/%s' % (builder_folder, base_filename) | |
359 | |
360 | |
361 def FetchFromCloudStorage(bucket_name, source_path, destination_path): | |
362 """Fetches file(s) from the Google Cloud Storage. | |
363 | |
364 Args: | |
365 bucket_name: Google Storage bucket name. | |
366 source_path: Source file path. | |
367 destination_path: Destination file path. | |
368 | |
369 Returns: | |
370 Downloaded file path if exists, otherwise None. | |
371 """ | |
372 target_file = os.path.join(destination_path, os.path.basename(source_path)) | |
373 try: | |
374 if cloud_storage.Exists(bucket_name, source_path): | |
375 print 'Fetching file from gs//%s/%s ...' % (bucket_name, source_path) | |
376 cloud_storage.Get(bucket_name, source_path, destination_path) | |
377 if os.path.exists(target_file): | |
378 return target_file | |
379 else: | |
380 print ('File gs://%s/%s not found in cloud storage.' % ( | |
381 bucket_name, source_path)) | |
382 except Exception as e: | |
383 print 'Something went wrong while fetching file from cloud: %s' % e | |
384 if os.path.exists(target_file): | |
385 os.remove(target_file) | |
386 return None | |
387 | |
388 | |
389 # This is copied from build/scripts/common/chromium_utils.py. | |
390 def MaybeMakeDirectory(*path): | |
391 """Creates an entire path, if it doesn't already exist.""" | |
392 file_path = os.path.join(*path) | |
393 try: | |
394 os.makedirs(file_path) | |
395 except OSError as e: | |
396 if e.errno != errno.EEXIST: | |
397 return False | |
398 return True | |
399 | |
400 | |
401 # This was copied from build/scripts/common/chromium_utils.py. | |
402 def ExtractZip(filename, output_dir, verbose=True): | |
403 """ Extract the zip archive in the output directory.""" | |
404 MaybeMakeDirectory(output_dir) | |
405 | |
406 # On Linux and Mac, we use the unzip command as it will | |
407 # handle links and file bits (executable), which is much | |
408 # easier then trying to do that with ZipInfo options. | |
409 # | |
410 # The Mac Version of unzip unfortunately does not support Zip64, whereas | |
411 # the python module does, so we have to fall back to the python zip module | |
412 # on Mac if the file size is greater than 4GB. | |
413 # | |
414 # On Windows, try to use 7z if it is installed, otherwise fall back to python | |
415 # zip module and pray we don't have files larger than 512MB to unzip. | |
416 unzip_cmd = None | |
417 if ((bisect_utils.IsMacHost() | |
418 and os.path.getsize(filename) < 4 * 1024 * 1024 * 1024) | |
419 or bisect_utils.IsLinuxHost()): | |
420 unzip_cmd = ['unzip', '-o'] | |
421 elif (bisect_utils.IsWindowsHost() | |
422 and os.path.exists('C:\\Program Files\\7-Zip\\7z.exe')): | |
423 unzip_cmd = ['C:\\Program Files\\7-Zip\\7z.exe', 'x', '-y'] | |
424 | |
425 if unzip_cmd: | |
426 # Make sure path is absolute before changing directories. | |
427 filepath = os.path.abspath(filename) | |
428 saved_dir = os.getcwd() | |
429 os.chdir(output_dir) | |
430 command = unzip_cmd + [filepath] | |
431 result = bisect_utils.RunProcess(command) | |
432 os.chdir(saved_dir) | |
433 if result: | |
434 raise IOError('unzip failed: %s => %s' % (str(command), result)) | |
435 else: | |
436 assert bisect_utils.IsWindowsHost() or bisect_utils.IsMacHost() | |
437 zf = zipfile.ZipFile(filename) | |
438 for name in zf.namelist(): | |
439 if verbose: | |
440 print 'Extracting %s' % name | |
441 zf.extract(name, output_dir) | |
442 if bisect_utils.IsMacHost(): | |
443 # Restore permission bits. | |
444 os.chmod(os.path.join(output_dir, name), | |
445 zf.getinfo(name).external_attr >> 16L) | |
446 | |
447 | |
448 def WriteStringToFile(text, file_name): | |
449 """Writes text to a file, raising an RuntimeError on failure.""" | |
450 try: | |
451 with open(file_name, 'wb') as f: | |
452 f.write(text) | |
453 except IOError: | |
454 raise RuntimeError('Error writing to file [%s]' % file_name ) | |
455 | |
456 | |
457 def ReadStringFromFile(file_name): | |
458 """Writes text to a file, raising an RuntimeError on failure.""" | |
459 try: | |
460 with open(file_name) as f: | |
461 return f.read() | |
462 except IOError: | |
463 raise RuntimeError('Error reading file [%s]' % file_name ) | |
464 | |
465 | |
466 def ChangeBackslashToSlashInPatch(diff_text): | |
467 """Formats file paths in the given patch text to Unix-style paths.""" | |
468 if not diff_text: | |
469 return None | |
470 diff_lines = diff_text.split('\n') | |
471 for i in range(len(diff_lines)): | |
472 line = diff_lines[i] | |
473 if line.startswith('--- ') or line.startswith('+++ '): | |
474 diff_lines[i] = line.replace('\\', '/') | |
475 return '\n'.join(diff_lines) | |
476 | |
477 | |
478 def _ParseRevisionsFromDEPSFileManually(deps_file_contents): | |
479 """Parses the vars section of the DEPS file using regular expressions. | |
480 | |
481 Args: | |
482 deps_file_contents: The DEPS file contents as a string. | |
483 | |
484 Returns: | |
485 A dictionary in the format {depot: revision} if successful, otherwise None. | |
486 """ | |
487 # We'll parse the "vars" section of the DEPS file. | |
488 rxp = re.compile('vars = {(?P<vars_body>[^}]+)', re.MULTILINE) | |
489 re_results = rxp.search(deps_file_contents) | |
490 | |
491 if not re_results: | |
492 return None | |
493 | |
494 # We should be left with a series of entries in the vars component of | |
495 # the DEPS file with the following format: | |
496 # 'depot_name': 'revision', | |
497 vars_body = re_results.group('vars_body') | |
498 rxp = re.compile("'(?P<depot_body>[\w_-]+)':[\s]+'(?P<rev_body>[\w@]+)'", | |
499 re.MULTILINE) | |
500 re_results = rxp.findall(vars_body) | |
501 | |
502 return dict(re_results) | |
503 | |
504 | |
505 def _WaitUntilBuildIsReady( | |
506 fetch_build, bot_name, builder_host, builder_port, build_request_id, | |
507 max_timeout): | |
508 """Waits until build is produced by bisect builder on try server. | |
509 | |
510 Args: | |
511 fetch_build: Function to check and download build from cloud storage. | |
512 bot_name: Builder bot name on try server. | |
513 builder_host Try server host name. | |
514 builder_port: Try server port. | |
515 build_request_id: A unique ID of the build request posted to try server. | |
516 max_timeout: Maximum time to wait for the build. | |
517 | |
518 Returns: | |
519 Downloaded archive file path if exists, otherwise None. | |
520 """ | |
521 # Build number on the try server. | |
522 build_num = None | |
523 # Interval to check build on cloud storage. | |
524 poll_interval = 60 | |
525 # Interval to check build status on try server in seconds. | |
526 status_check_interval = 600 | |
527 last_status_check = time.time() | |
528 start_time = time.time() | |
529 while True: | |
530 # Checks for build on gs://chrome-perf and download if exists. | |
531 res = fetch_build() | |
532 if res: | |
533 return (res, 'Build successfully found') | |
534 elapsed_status_check = time.time() - last_status_check | |
535 # To avoid overloading try server with status check requests, we check | |
536 # build status for every 10 minutes. | |
537 if elapsed_status_check > status_check_interval: | |
538 last_status_check = time.time() | |
539 if not build_num: | |
540 # Get the build number on try server for the current build. | |
541 build_num = request_build.GetBuildNumFromBuilder( | |
542 build_request_id, bot_name, builder_host, builder_port) | |
543 # Check the status of build using the build number. | |
544 # Note: Build is treated as PENDING if build number is not found | |
545 # on the the try server. | |
546 build_status, status_link = request_build.GetBuildStatus( | |
547 build_num, bot_name, builder_host, builder_port) | |
548 if build_status == request_build.FAILED: | |
549 return (None, 'Failed to produce build, log: %s' % status_link) | |
550 elapsed_time = time.time() - start_time | |
551 if elapsed_time > max_timeout: | |
552 return (None, 'Timed out: %ss without build' % max_timeout) | |
553 | |
554 print 'Time elapsed: %ss without build.' % elapsed_time | |
555 time.sleep(poll_interval) | |
556 # For some reason, mac bisect bots were not flushing stdout periodically. | |
557 # As a result buildbot command is timed-out. Flush stdout on all platforms | |
558 # while waiting for build. | |
559 sys.stdout.flush() | |
560 | |
561 | |
562 def _UpdateV8Branch(deps_content): | |
563 """Updates V8 branch in DEPS file to process v8_bleeding_edge. | |
564 | |
565 Check for "v8_branch" in DEPS file if exists update its value | |
566 with v8_bleeding_edge branch. Note: "v8_branch" is added to DEPS | |
567 variable from DEPS revision 254916, therefore check for "src/v8": | |
568 <v8 source path> in DEPS in order to support prior DEPS revisions | |
569 and update it. | |
570 | |
571 Args: | |
572 deps_content: DEPS file contents to be modified. | |
573 | |
574 Returns: | |
575 Modified DEPS file contents as a string. | |
576 """ | |
577 new_branch = r'branches/bleeding_edge' | |
578 v8_branch_pattern = re.compile(r'(?<="v8_branch": ")(.*)(?=")') | |
579 if re.search(v8_branch_pattern, deps_content): | |
580 deps_content = re.sub(v8_branch_pattern, new_branch, deps_content) | |
581 else: | |
582 # Replaces the branch assigned to "src/v8" key in DEPS file. | |
583 # Format of "src/v8" in DEPS: | |
584 # "src/v8": | |
585 # (Var("googlecode_url") % "v8") + "/trunk@" + Var("v8_revision"), | |
586 # So, "/trunk@" is replace with "/branches/bleeding_edge@" | |
587 v8_src_pattern = re.compile( | |
588 r'(?<="v8"\) \+ "/)(.*)(?=@" \+ Var\("v8_revision"\))', re.MULTILINE) | |
589 if re.search(v8_src_pattern, deps_content): | |
590 deps_content = re.sub(v8_src_pattern, new_branch, deps_content) | |
591 return deps_content | |
592 | |
593 | |
594 def _UpdateDEPSForAngle(revision, depot, deps_file): | |
595 """Updates DEPS file with new revision for Angle repository. | |
596 | |
597 This is a hack for Angle depot case because, in DEPS file "vars" dictionary | |
598 variable contains "angle_revision" key that holds git hash instead of | |
599 SVN revision. | |
600 | |
601 And sometimes "angle_revision" key is not specified in "vars" variable, | |
602 in such cases check "deps" dictionary variable that matches | |
603 angle.git@[a-fA-F0-9]{40}$ and replace git hash. | |
604 """ | |
605 deps_var = DEPOT_DEPS_NAME[depot]['deps_var'] | |
606 try: | |
607 deps_contents = ReadStringFromFile(deps_file) | |
608 # Check whether the depot and revision pattern in DEPS file vars variable | |
609 # e.g. "angle_revision": "fa63e947cb3eccf463648d21a05d5002c9b8adfa". | |
610 angle_rev_pattern = re.compile(r'(?<="%s": ")([a-fA-F0-9]{40})(?=")' % | |
611 deps_var, re.MULTILINE) | |
612 match = re.search(angle_rev_pattern % deps_var, deps_contents) | |
613 if match: | |
614 # Update the revision information for the given depot | |
615 new_data = re.sub(angle_rev_pattern, revision, deps_contents) | |
616 else: | |
617 # Check whether the depot and revision pattern in DEPS file deps | |
618 # variable. e.g., | |
619 # "src/third_party/angle": Var("chromium_git") + | |
620 # "/angle/angle.git@fa63e947cb3eccf463648d21a05d5002c9b8adfa",. | |
621 angle_rev_pattern = re.compile( | |
622 r'(?<=angle\.git@)([a-fA-F0-9]{40})(?=")', re.MULTILINE) | |
623 match = re.search(angle_rev_pattern, deps_contents) | |
624 if not match: | |
625 print 'Could not find angle revision information in DEPS file.' | |
626 return False | |
627 new_data = re.sub(angle_rev_pattern, revision, deps_contents) | |
628 # Write changes to DEPS file | |
629 WriteStringToFile(new_data, deps_file) | |
630 return True | |
631 except IOError, e: | |
632 print 'Something went wrong while updating DEPS file, %s' % e | |
633 return False | |
634 | |
635 | |
636 def _TryParseHistogramValuesFromOutput(metric, text): | |
637 """Attempts to parse a metric in the format HISTOGRAM <graph: <trace>. | |
638 | |
639 Args: | |
640 metric: The metric as a list of [<trace>, <value>] strings. | |
641 text: The text to parse the metric values from. | |
642 | |
643 Returns: | |
644 A list of floating point numbers found, [] if none were found. | |
645 """ | |
646 metric_formatted = 'HISTOGRAM %s: %s= ' % (metric[0], metric[1]) | |
647 | |
648 text_lines = text.split('\n') | |
649 values_list = [] | |
650 | |
651 for current_line in text_lines: | |
652 if metric_formatted in current_line: | |
653 current_line = current_line[len(metric_formatted):] | |
654 | |
655 try: | |
656 histogram_values = eval(current_line) | |
657 | |
658 for b in histogram_values['buckets']: | |
659 average_for_bucket = float(b['high'] + b['low']) * 0.5 | |
660 # Extends the list with N-elements with the average for that bucket. | |
661 values_list.extend([average_for_bucket] * b['count']) | |
662 except Exception: | |
663 pass | |
664 | |
665 return values_list | |
666 | |
667 | |
668 def _TryParseResultValuesFromOutput(metric, text): | |
669 """Attempts to parse a metric in the format RESULT <graph>: <trace>= ... | |
670 | |
671 Args: | |
672 metric: The metric as a list of [<trace>, <value>] string pairs. | |
673 text: The text to parse the metric values from. | |
674 | |
675 Returns: | |
676 A list of floating point numbers found. | |
677 """ | |
678 # Format is: RESULT <graph>: <trace>= <value> <units> | |
679 metric_re = re.escape('RESULT %s: %s=' % (metric[0], metric[1])) | |
680 | |
681 # The log will be parsed looking for format: | |
682 # <*>RESULT <graph_name>: <trace_name>= <value> | |
683 single_result_re = re.compile( | |
684 metric_re + '\s*(?P<VALUE>[-]?\d*(\.\d*)?)') | |
685 | |
686 # The log will be parsed looking for format: | |
687 # <*>RESULT <graph_name>: <trace_name>= [<value>,value,value,...] | |
688 multi_results_re = re.compile( | |
689 metric_re + '\s*\[\s*(?P<VALUES>[-]?[\d\., ]+)\s*\]') | |
690 | |
691 # The log will be parsed looking for format: | |
692 # <*>RESULT <graph_name>: <trace_name>= {<mean>, <std deviation>} | |
693 mean_stddev_re = re.compile( | |
694 metric_re + | |
695 '\s*\{\s*(?P<MEAN>[-]?\d*(\.\d*)?),\s*(?P<STDDEV>\d+(\.\d*)?)\s*\}') | |
696 | |
697 text_lines = text.split('\n') | |
698 values_list = [] | |
699 for current_line in text_lines: | |
700 # Parse the output from the performance test for the metric we're | |
701 # interested in. | |
702 single_result_match = single_result_re.search(current_line) | |
703 multi_results_match = multi_results_re.search(current_line) | |
704 mean_stddev_match = mean_stddev_re.search(current_line) | |
705 if (not single_result_match is None and | |
706 single_result_match.group('VALUE')): | |
707 values_list += [single_result_match.group('VALUE')] | |
708 elif (not multi_results_match is None and | |
709 multi_results_match.group('VALUES')): | |
710 metric_values = multi_results_match.group('VALUES') | |
711 values_list += metric_values.split(',') | |
712 elif (not mean_stddev_match is None and | |
713 mean_stddev_match.group('MEAN')): | |
714 values_list += [mean_stddev_match.group('MEAN')] | |
715 | |
716 values_list = [float(v) for v in values_list | |
717 if bisect_utils.IsStringFloat(v)] | |
718 | |
719 # If the metric is times/t, we need to sum the timings in order to get | |
720 # similar regression results as the try-bots. | |
721 metrics_to_sum = [ | |
722 ['times', 't'], | |
723 ['times', 'page_load_time'], | |
724 ['cold_times', 'page_load_time'], | |
725 ['warm_times', 'page_load_time'], | |
726 ] | |
727 | |
728 if metric in metrics_to_sum: | |
729 if values_list: | |
730 values_list = [reduce(lambda x, y: float(x) + float(y), values_list)] | |
731 | |
732 return values_list | |
733 | |
734 | |
735 def _ParseMetricValuesFromOutput(metric, text): | |
736 """Parses output from performance_ui_tests and retrieves the results for | |
737 a given metric. | |
738 | |
739 Args: | |
740 metric: The metric as a list of [<trace>, <value>] strings. | |
741 text: The text to parse the metric values from. | |
742 | |
743 Returns: | |
744 A list of floating point numbers found. | |
745 """ | |
746 metric_values = _TryParseResultValuesFromOutput(metric, text) | |
747 | |
748 if not metric_values: | |
749 metric_values = _TryParseHistogramValuesFromOutput(metric, text) | |
750 | |
751 return metric_values | |
752 | |
753 | |
754 def _GenerateProfileIfNecessary(command_args): | |
755 """Checks the command line of the performance test for dependencies on | |
756 profile generation, and runs tools/perf/generate_profile as necessary. | |
757 | |
758 Args: | |
759 command_args: Command line being passed to performance test, as a list. | |
760 | |
761 Returns: | |
762 False if profile generation was necessary and failed, otherwise True. | |
763 """ | |
764 if '--profile-dir' in ' '.join(command_args): | |
765 # If we were using python 2.7+, we could just use the argparse | |
766 # module's parse_known_args to grab --profile-dir. Since some of the | |
767 # bots still run 2.6, have to grab the arguments manually. | |
768 arg_dict = {} | |
769 args_to_parse = ['--profile-dir', '--browser'] | |
770 | |
771 for arg_to_parse in args_to_parse: | |
772 for i, current_arg in enumerate(command_args): | |
773 if arg_to_parse in current_arg: | |
774 current_arg_split = current_arg.split('=') | |
775 | |
776 # Check 2 cases, --arg=<val> and --arg <val> | |
777 if len(current_arg_split) == 2: | |
778 arg_dict[arg_to_parse] = current_arg_split[1] | |
779 elif i + 1 < len(command_args): | |
780 arg_dict[arg_to_parse] = command_args[i+1] | |
781 | |
782 path_to_generate = os.path.join('tools', 'perf', 'generate_profile') | |
783 | |
784 if arg_dict.has_key('--profile-dir') and arg_dict.has_key('--browser'): | |
785 profile_path, profile_type = os.path.split(arg_dict['--profile-dir']) | |
786 return not bisect_utils.RunProcess(['python', path_to_generate, | |
787 '--profile-type-to-generate', profile_type, | |
788 '--browser', arg_dict['--browser'], '--output-dir', profile_path]) | |
789 return False | |
790 return True | |
791 | |
792 | |
793 def _AddRevisionsIntoRevisionData(revisions, depot, sort, revision_data): | |
794 """Adds new revisions to the revision_data dictionary and initializes them. | |
795 | |
796 Args: | |
797 revisions: List of revisions to add. | |
798 depot: Depot that's currently in use (src, webkit, etc...) | |
799 sort: Sorting key for displaying revisions. | |
800 revision_data: A dictionary to add the new revisions into. | |
801 Existing revisions will have their sort keys adjusted. | |
802 """ | |
803 num_depot_revisions = len(revisions) | |
804 | |
805 for _, v in revision_data.iteritems(): | |
806 if v['sort'] > sort: | |
807 v['sort'] += num_depot_revisions | |
808 | |
809 for i in xrange(num_depot_revisions): | |
810 r = revisions[i] | |
811 revision_data[r] = { | |
812 'revision' : r, | |
813 'depot' : depot, | |
814 'value' : None, | |
815 'perf_time' : 0, | |
816 'build_time' : 0, | |
817 'passed' : '?', | |
818 'sort' : i + sort + 1, | |
819 } | |
820 | |
821 | |
822 def _PrintThankYou(): | |
823 print RESULTS_THANKYOU | |
824 | |
825 | |
826 def _PrintTableRow(column_widths, row_data): | |
827 """Prints out a row in a formatted table that has columns aligned. | |
828 | |
829 Args: | |
830 column_widths: A list of column width numbers. | |
831 row_data: A list of items for each column in this row. | |
832 """ | |
833 assert len(column_widths) == len(row_data) | |
834 text = '' | |
835 for i in xrange(len(column_widths)): | |
836 current_row_data = row_data[i].center(column_widths[i], ' ') | |
837 text += ('%%%ds' % column_widths[i]) % current_row_data | |
838 print text | |
839 | |
840 | |
841 def _PrintStepTime(revision_data_sorted): | |
842 """Prints information about how long various steps took. | |
843 | |
844 Args: | |
845 revision_data_sorted: The sorted list of revision data dictionaries.""" | |
846 step_perf_time_avg = 0.0 | |
847 step_build_time_avg = 0.0 | |
848 step_count = 0.0 | |
849 for _, current_data in revision_data_sorted: | |
850 if current_data['value']: | |
851 step_perf_time_avg += current_data['perf_time'] | |
852 step_build_time_avg += current_data['build_time'] | |
853 step_count += 1 | |
854 if step_count: | |
855 step_perf_time_avg = step_perf_time_avg / step_count | |
856 step_build_time_avg = step_build_time_avg / step_count | |
857 print | |
858 print 'Average build time : %s' % datetime.timedelta( | |
859 seconds=int(step_build_time_avg)) | |
860 print 'Average test time : %s' % datetime.timedelta( | |
861 seconds=int(step_perf_time_avg)) | |
862 | |
863 | |
864 def _FindOtherRegressions(revision_data_sorted, bad_greater_than_good): | |
865 """Compiles a list of other possible regressions from the revision data. | |
866 | |
867 Args: | |
868 revision_data_sorted: Sorted list of (revision, revision data) pairs. | |
869 bad_greater_than_good: Whether the result value at the "bad" revision is | |
870 numerically greater than the result value at the "good" revision. | |
871 | |
872 Returns: | |
873 A list of [current_rev, previous_rev, confidence] for other places where | |
874 there may have been a regression. | |
875 """ | |
876 other_regressions = [] | |
877 previous_values = [] | |
878 previous_id = None | |
879 for current_id, current_data in revision_data_sorted: | |
880 current_values = current_data['value'] | |
881 if current_values: | |
882 current_values = current_values['values'] | |
883 if previous_values: | |
884 confidence = ConfidenceScore(previous_values, [current_values]) | |
885 mean_of_prev_runs = math_utils.Mean(sum(previous_values, [])) | |
886 mean_of_current_runs = math_utils.Mean(current_values) | |
887 | |
888 # Check that the potential regression is in the same direction as | |
889 # the overall regression. If the mean of the previous runs < the | |
890 # mean of the current runs, this local regression is in same | |
891 # direction. | |
892 prev_less_than_current = mean_of_prev_runs < mean_of_current_runs | |
893 is_same_direction = (prev_less_than_current if | |
894 bad_greater_than_good else not prev_less_than_current) | |
895 | |
896 # Only report potential regressions with high confidence. | |
897 if is_same_direction and confidence > 50: | |
898 other_regressions.append([current_id, previous_id, confidence]) | |
899 previous_values.append(current_values) | |
900 previous_id = current_id | |
901 return other_regressions | |
902 | |
903 | |
904 class BisectPerformanceMetrics(object): | |
905 """This class contains functionality to perform a bisection of a range of | |
906 revisions to narrow down where performance regressions may have occurred. | |
907 | |
908 The main entry-point is the Run method. | |
909 """ | |
910 | |
911 def __init__(self, source_control, opts): | |
912 super(BisectPerformanceMetrics, self).__init__() | |
913 | |
914 self.opts = opts | |
915 self.source_control = source_control | |
916 self.src_cwd = os.getcwd() | |
917 self.cros_cwd = os.path.join(os.getcwd(), '..', 'cros') | |
918 self.depot_cwd = {} | |
919 self.cleanup_commands = [] | |
920 self.warnings = [] | |
921 self.builder = builder.Builder.FromOpts(opts) | |
922 | |
923 for d in DEPOT_NAMES: | |
924 # The working directory of each depot is just the path to the depot, but | |
925 # since we're already in 'src', we can skip that part. | |
926 | |
927 self.depot_cwd[d] = os.path.join( | |
928 self.src_cwd, DEPOT_DEPS_NAME[d]['src'][4:]) | |
929 | |
930 def PerformCleanup(self): | |
931 """Performs cleanup when script is finished.""" | |
932 os.chdir(self.src_cwd) | |
933 for c in self.cleanup_commands: | |
934 if c[0] == 'mv': | |
935 shutil.move(c[1], c[2]) | |
936 else: | |
937 assert False, 'Invalid cleanup command.' | |
938 | |
939 def GetRevisionList(self, depot, bad_revision, good_revision): | |
940 """Retrieves a list of all the commits between the bad revision and | |
941 last known good revision.""" | |
942 | |
943 revision_work_list = [] | |
944 | |
945 if depot == 'cros': | |
946 revision_range_start = good_revision | |
947 revision_range_end = bad_revision | |
948 | |
949 cwd = os.getcwd() | |
950 self.ChangeToDepotWorkingDirectory('cros') | |
951 | |
952 # Print the commit timestamps for every commit in the revision time | |
953 # range. We'll sort them and bisect by that. There is a remote chance that | |
954 # 2 (or more) commits will share the exact same timestamp, but it's | |
955 # probably safe to ignore that case. | |
956 cmd = ['repo', 'forall', '-c', | |
957 'git log --format=%%ct --before=%d --after=%d' % ( | |
958 revision_range_end, revision_range_start)] | |
959 output, return_code = bisect_utils.RunProcessAndRetrieveOutput(cmd) | |
960 | |
961 assert not return_code, ('An error occurred while running ' | |
962 '"%s"' % ' '.join(cmd)) | |
963 | |
964 os.chdir(cwd) | |
965 | |
966 revision_work_list = list(set( | |
967 [int(o) for o in output.split('\n') if bisect_utils.IsStringInt(o)])) | |
968 revision_work_list = sorted(revision_work_list, reverse=True) | |
969 else: | |
970 cwd = self._GetDepotDirectory(depot) | |
971 revision_work_list = self.source_control.GetRevisionList(bad_revision, | |
972 good_revision, cwd=cwd) | |
973 | |
974 return revision_work_list | |
975 | |
976 def _GetV8BleedingEdgeFromV8TrunkIfMappable(self, revision): | |
977 commit_position = self.source_control.GetCommitPosition(revision) | |
978 | |
979 if bisect_utils.IsStringInt(commit_position): | |
980 # V8 is tricky to bisect, in that there are only a few instances when | |
981 # we can dive into bleeding_edge and get back a meaningful result. | |
982 # Try to detect a V8 "business as usual" case, which is when: | |
983 # 1. trunk revision N has description "Version X.Y.Z" | |
984 # 2. bleeding_edge revision (N-1) has description "Prepare push to | |
985 # trunk. Now working on X.Y.(Z+1)." | |
986 # | |
987 # As of 01/24/2014, V8 trunk descriptions are formatted: | |
988 # "Version 3.X.Y (based on bleeding_edge revision rZ)" | |
989 # So we can just try parsing that out first and fall back to the old way. | |
990 v8_dir = self._GetDepotDirectory('v8') | |
991 v8_bleeding_edge_dir = self._GetDepotDirectory('v8_bleeding_edge') | |
992 | |
993 revision_info = self.source_control.QueryRevisionInfo(revision, | |
994 cwd=v8_dir) | |
995 | |
996 version_re = re.compile("Version (?P<values>[0-9,.]+)") | |
997 | |
998 regex_results = version_re.search(revision_info['subject']) | |
999 | |
1000 if regex_results: | |
1001 git_revision = None | |
1002 | |
1003 # Look for "based on bleeding_edge" and parse out revision | |
1004 if 'based on bleeding_edge' in revision_info['subject']: | |
1005 try: | |
1006 bleeding_edge_revision = revision_info['subject'].split( | |
1007 'bleeding_edge revision r')[1] | |
1008 bleeding_edge_revision = int(bleeding_edge_revision.split(')')[0]) | |
1009 git_revision = self.source_control.ResolveToRevision( | |
1010 bleeding_edge_revision, 'v8_bleeding_edge', DEPOT_DEPS_NAME, 1, | |
1011 cwd=v8_bleeding_edge_dir) | |
1012 return git_revision | |
1013 except (IndexError, ValueError): | |
1014 pass | |
1015 | |
1016 if not git_revision: | |
1017 # Wasn't successful, try the old way of looking for "Prepare push to" | |
1018 git_revision = self.source_control.ResolveToRevision( | |
1019 int(commit_position) - 1, 'v8_bleeding_edge', DEPOT_DEPS_NAME, -1, | |
1020 cwd=v8_bleeding_edge_dir) | |
1021 | |
1022 if git_revision: | |
1023 revision_info = self.source_control.QueryRevisionInfo(git_revision, | |
1024 cwd=v8_bleeding_edge_dir) | |
1025 | |
1026 if 'Prepare push to trunk' in revision_info['subject']: | |
1027 return git_revision | |
1028 return None | |
1029 | |
1030 def _GetNearestV8BleedingEdgeFromTrunk(self, revision, search_forward=True): | |
1031 cwd = self._GetDepotDirectory('v8') | |
1032 cmd = ['log', '--format=%ct', '-1', revision] | |
1033 output = bisect_utils.CheckRunGit(cmd, cwd=cwd) | |
1034 commit_time = int(output) | |
1035 commits = [] | |
1036 | |
1037 if search_forward: | |
1038 cmd = ['log', '--format=%H', '-10', '--after=%d' % commit_time, | |
1039 'origin/master'] | |
1040 output = bisect_utils.CheckRunGit(cmd, cwd=cwd) | |
1041 output = output.split() | |
1042 commits = output | |
1043 commits = reversed(commits) | |
1044 else: | |
1045 cmd = ['log', '--format=%H', '-10', '--before=%d' % commit_time, | |
1046 'origin/master'] | |
1047 output = bisect_utils.CheckRunGit(cmd, cwd=cwd) | |
1048 output = output.split() | |
1049 commits = output | |
1050 | |
1051 bleeding_edge_revision = None | |
1052 | |
1053 for c in commits: | |
1054 bleeding_edge_revision = self._GetV8BleedingEdgeFromV8TrunkIfMappable(c) | |
1055 if bleeding_edge_revision: | |
1056 break | |
1057 | |
1058 return bleeding_edge_revision | |
1059 | |
1060 def _ParseRevisionsFromDEPSFile(self, depot): | |
1061 """Parses the local DEPS file to determine blink/skia/v8 revisions which may | |
1062 be needed if the bisect recurses into those depots later. | |
1063 | |
1064 Args: | |
1065 depot: Name of depot being bisected. | |
1066 | |
1067 Returns: | |
1068 A dict in the format {depot:revision} if successful, otherwise None. | |
1069 """ | |
1070 try: | |
1071 deps_data = { | |
1072 'Var': lambda _: deps_data["vars"][_], | |
1073 'From': lambda *args: None, | |
1074 } | |
1075 | |
1076 deps_file = bisect_utils.FILE_DEPS_GIT | |
1077 if not os.path.exists(deps_file): | |
1078 deps_file = bisect_utils.FILE_DEPS | |
1079 execfile(deps_file, {}, deps_data) | |
1080 deps_data = deps_data['deps'] | |
1081 | |
1082 rxp = re.compile(".git@(?P<revision>[a-fA-F0-9]+)") | |
1083 results = {} | |
1084 for depot_name, depot_data in DEPOT_DEPS_NAME.iteritems(): | |
1085 if (depot_data.get('platform') and | |
1086 depot_data.get('platform') != os.name): | |
1087 continue | |
1088 | |
1089 if (depot_data.get('recurse') and depot in depot_data.get('from')): | |
1090 depot_data_src = depot_data.get('src') or depot_data.get('src_old') | |
1091 src_dir = deps_data.get(depot_data_src) | |
1092 if src_dir: | |
1093 self.depot_cwd[depot_name] = os.path.join(self.src_cwd, | |
1094 depot_data_src[4:]) | |
1095 re_results = rxp.search(src_dir) | |
1096 if re_results: | |
1097 results[depot_name] = re_results.group('revision') | |
1098 else: | |
1099 warning_text = ('Could not parse revision for %s while bisecting ' | |
1100 '%s' % (depot_name, depot)) | |
1101 if not warning_text in self.warnings: | |
1102 self.warnings.append(warning_text) | |
1103 else: | |
1104 results[depot_name] = None | |
1105 return results | |
1106 except ImportError: | |
1107 deps_file_contents = ReadStringFromFile(deps_file) | |
1108 parse_results = _ParseRevisionsFromDEPSFileManually(deps_file_contents) | |
1109 results = {} | |
1110 for depot_name, depot_revision in parse_results.iteritems(): | |
1111 depot_revision = depot_revision.strip('@') | |
1112 print depot_name, depot_revision | |
1113 for current_name, current_data in DEPOT_DEPS_NAME.iteritems(): | |
1114 if (current_data.has_key('deps_var') and | |
1115 current_data['deps_var'] == depot_name): | |
1116 src_name = current_name | |
1117 results[src_name] = depot_revision | |
1118 break | |
1119 return results | |
1120 | |
1121 def _Get3rdPartyRevisions(self, depot): | |
1122 """Parses the DEPS file to determine WebKit/v8/etc... versions. | |
1123 | |
1124 Args: | |
1125 depot: A depot name. Should be in the DEPOT_NAMES list. | |
1126 | |
1127 Returns: | |
1128 A dict in the format {depot: revision} if successful, otherwise None. | |
1129 """ | |
1130 cwd = os.getcwd() | |
1131 self.ChangeToDepotWorkingDirectory(depot) | |
1132 | |
1133 results = {} | |
1134 | |
1135 if depot == 'chromium' or depot == 'android-chrome': | |
1136 results = self._ParseRevisionsFromDEPSFile(depot) | |
1137 os.chdir(cwd) | |
1138 | |
1139 if depot == 'cros': | |
1140 cmd = [ | |
1141 bisect_utils.CROS_SDK_PATH, | |
1142 '--', | |
1143 'portageq-%s' % self.opts.cros_board, | |
1144 'best_visible', | |
1145 '/build/%s' % self.opts.cros_board, | |
1146 'ebuild', | |
1147 CROS_CHROMEOS_PATTERN | |
1148 ] | |
1149 output, return_code = bisect_utils.RunProcessAndRetrieveOutput(cmd) | |
1150 | |
1151 assert not return_code, ('An error occurred while running ' | |
1152 '"%s"' % ' '.join(cmd)) | |
1153 | |
1154 if len(output) > CROS_CHROMEOS_PATTERN: | |
1155 output = output[len(CROS_CHROMEOS_PATTERN):] | |
1156 | |
1157 if len(output) > 1: | |
1158 output = output.split('_')[0] | |
1159 | |
1160 if len(output) > 3: | |
1161 contents = output.split('.') | |
1162 | |
1163 version = contents[2] | |
1164 | |
1165 if contents[3] != '0': | |
1166 warningText = ('Chrome version: %s.%s but using %s.0 to bisect.' % | |
1167 (version, contents[3], version)) | |
1168 if not warningText in self.warnings: | |
1169 self.warnings.append(warningText) | |
1170 | |
1171 cwd = os.getcwd() | |
1172 self.ChangeToDepotWorkingDirectory('chromium') | |
1173 cmd = ['log', '-1', '--format=%H', | |
1174 '--author=chrome-release@google.com', | |
1175 '--grep=to %s' % version, 'origin/master'] | |
1176 return_code = bisect_utils.CheckRunGit(cmd) | |
1177 os.chdir(cwd) | |
1178 | |
1179 results['chromium'] = output.strip() | |
1180 | |
1181 if depot == 'v8': | |
1182 # We can't try to map the trunk revision to bleeding edge yet, because | |
1183 # we don't know which direction to try to search in. Have to wait until | |
1184 # the bisect has narrowed the results down to 2 v8 rolls. | |
1185 results['v8_bleeding_edge'] = None | |
1186 | |
1187 return results | |
1188 | |
1189 def BackupOrRestoreOutputDirectory(self, restore=False, build_type='Release'): | |
1190 """Backs up or restores build output directory based on restore argument. | |
1191 | |
1192 Args: | |
1193 restore: Indicates whether to restore or backup. Default is False(Backup) | |
1194 build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.) | |
1195 | |
1196 Returns: | |
1197 Path to backup or restored location as string. otherwise None if it fails. | |
1198 """ | |
1199 build_dir = os.path.abspath( | |
1200 builder.GetBuildOutputDirectory(self.opts, self.src_cwd)) | |
1201 source_dir = os.path.join(build_dir, build_type) | |
1202 destination_dir = os.path.join(build_dir, '%s.bak' % build_type) | |
1203 if restore: | |
1204 source_dir, destination_dir = destination_dir, source_dir | |
1205 if os.path.exists(source_dir): | |
1206 RmTreeAndMkDir(destination_dir, skip_makedir=True) | |
1207 shutil.move(source_dir, destination_dir) | |
1208 return destination_dir | |
1209 return None | |
1210 | |
1211 def GetBuildArchiveForRevision(self, revision, gs_bucket, target_arch, | |
1212 patch_sha, out_dir): | |
1213 """Checks and downloads build archive for a given revision. | |
1214 | |
1215 Checks for build archive with Git hash or SVN revision. If either of the | |
1216 file exists, then downloads the archive file. | |
1217 | |
1218 Args: | |
1219 revision: A Git hash revision. | |
1220 gs_bucket: Cloud storage bucket name | |
1221 target_arch: 32 or 64 bit build target | |
1222 patch: A DEPS patch (used while bisecting 3rd party repositories). | |
1223 out_dir: Build output directory where downloaded file is stored. | |
1224 | |
1225 Returns: | |
1226 Downloaded archive file path if exists, otherwise None. | |
1227 """ | |
1228 # Source archive file path on cloud storage using Git revision. | |
1229 source_file = GetRemoteBuildPath( | |
1230 revision, self.opts.target_platform, target_arch, patch_sha) | |
1231 downloaded_archive = FetchFromCloudStorage(gs_bucket, source_file, out_dir) | |
1232 if not downloaded_archive: | |
1233 # Get commit position for the given SHA. | |
1234 commit_position = self.source_control.GetCommitPosition(revision) | |
1235 if commit_position: | |
1236 # Source archive file path on cloud storage using SVN revision. | |
1237 source_file = GetRemoteBuildPath( | |
1238 commit_position, self.opts.target_platform, target_arch, patch_sha) | |
1239 return FetchFromCloudStorage(gs_bucket, source_file, out_dir) | |
1240 return downloaded_archive | |
1241 | |
1242 def DownloadCurrentBuild(self, revision, build_type='Release', patch=None): | |
1243 """Downloads the build archive for the given revision. | |
1244 | |
1245 Args: | |
1246 revision: The Git revision to download or build. | |
1247 build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.) | |
1248 patch: A DEPS patch (used while bisecting 3rd party repositories). | |
1249 | |
1250 Returns: | |
1251 True if download succeeds, otherwise False. | |
1252 """ | |
1253 patch_sha = None | |
1254 if patch: | |
1255 # Get the SHA of the DEPS changes patch. | |
1256 patch_sha = GetSHA1HexDigest(patch) | |
1257 | |
1258 # Update the DEPS changes patch with a patch to create a new file named | |
1259 # 'DEPS.sha' and add patch_sha evaluated above to it. | |
1260 patch = '%s\n%s' % (patch, DEPS_SHA_PATCH % {'deps_sha': patch_sha}) | |
1261 | |
1262 # Get Build output directory | |
1263 abs_build_dir = os.path.abspath( | |
1264 builder.GetBuildOutputDirectory(self.opts, self.src_cwd)) | |
1265 | |
1266 fetch_build_func = lambda: self.GetBuildArchiveForRevision( | |
1267 revision, self.opts.gs_bucket, self.opts.target_arch, | |
1268 patch_sha, abs_build_dir) | |
1269 | |
1270 # Downloaded archive file path, downloads build archive for given revision. | |
1271 downloaded_file = fetch_build_func() | |
1272 | |
1273 # When build archive doesn't exists, post a build request to tryserver | |
1274 # and wait for the build to be produced. | |
1275 if not downloaded_file: | |
1276 downloaded_file = self.PostBuildRequestAndWait( | |
1277 revision, fetch_build=fetch_build_func, patch=patch) | |
1278 if not downloaded_file: | |
1279 return False | |
1280 | |
1281 # Generic name for the archive, created when archive file is extracted. | |
1282 output_dir = os.path.join( | |
1283 abs_build_dir, GetZipFileName(target_arch=self.opts.target_arch)) | |
1284 # Unzip build archive directory. | |
1285 try: | |
1286 RmTreeAndMkDir(output_dir, skip_makedir=True) | |
1287 self.BackupOrRestoreOutputDirectory(restore=False) | |
1288 # Build output directory based on target(e.g. out/Release, out/Debug). | |
1289 target_build_output_dir = os.path.join(abs_build_dir, build_type) | |
1290 ExtractZip(downloaded_file, abs_build_dir) | |
1291 if not os.path.exists(output_dir): | |
1292 # Due to recipe changes, the builds extract folder contains | |
1293 # out/Release instead of full-build-<platform>/Release. | |
1294 if os.path.exists(os.path.join(abs_build_dir, 'out', build_type)): | |
1295 output_dir = os.path.join(abs_build_dir, 'out', build_type) | |
1296 else: | |
1297 raise IOError('Missing extracted folder %s ' % output_dir) | |
1298 | |
1299 print 'Moving build from %s to %s' % ( | |
1300 output_dir, target_build_output_dir) | |
1301 shutil.move(output_dir, target_build_output_dir) | |
1302 return True | |
1303 except Exception as e: | |
1304 print 'Something went wrong while extracting archive file: %s' % e | |
1305 self.BackupOrRestoreOutputDirectory(restore=True) | |
1306 # Cleanup any leftovers from unzipping. | |
1307 if os.path.exists(output_dir): | |
1308 RmTreeAndMkDir(output_dir, skip_makedir=True) | |
1309 finally: | |
1310 # Delete downloaded archive | |
1311 if os.path.exists(downloaded_file): | |
1312 os.remove(downloaded_file) | |
1313 return False | |
1314 | |
1315 def PostBuildRequestAndWait(self, git_revision, fetch_build, patch=None): | |
1316 """POSTs the build request job to the try server instance. | |
1317 | |
1318 A try job build request is posted to tryserver.chromium.perf master, | |
1319 and waits for the binaries to be produced and archived on cloud storage. | |
1320 Once the build is ready and stored onto cloud, build archive is downloaded | |
1321 into the output folder. | |
1322 | |
1323 Args: | |
1324 git_revision: A Git hash revision. | |
1325 fetch_build: Function to check and download build from cloud storage. | |
1326 patch: A DEPS patch (used while bisecting 3rd party repositories). | |
1327 | |
1328 Returns: | |
1329 Downloaded archive file path when requested build exists and download is | |
1330 successful, otherwise None. | |
1331 """ | |
1332 def GetBuilderNameAndBuildTime(target_platform, target_arch='ia32'): | |
1333 """Gets builder bot name and build time in seconds based on platform.""" | |
1334 # Bot names should match the one listed in tryserver.chromium's | |
1335 # master.cfg which produces builds for bisect. | |
1336 if bisect_utils.IsWindowsHost(): | |
1337 if bisect_utils.Is64BitWindows() and target_arch == 'x64': | |
1338 return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME) | |
1339 return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME) | |
1340 if bisect_utils.IsLinuxHost(): | |
1341 if target_platform == 'android': | |
1342 return ('android_perf_bisect_builder', MAX_LINUX_BUILD_TIME) | |
1343 return ('linux_perf_bisect_builder', MAX_LINUX_BUILD_TIME) | |
1344 if bisect_utils.IsMacHost(): | |
1345 return ('mac_perf_bisect_builder', MAX_MAC_BUILD_TIME) | |
1346 raise NotImplementedError('Unsupported Platform "%s".' % sys.platform) | |
1347 if not fetch_build: | |
1348 return False | |
1349 | |
1350 bot_name, build_timeout = GetBuilderNameAndBuildTime( | |
1351 self.opts.target_platform, self.opts.target_arch) | |
1352 builder_host = self.opts.builder_host | |
1353 builder_port = self.opts.builder_port | |
1354 # Create a unique ID for each build request posted to try server builders. | |
1355 # This ID is added to "Reason" property of the build. | |
1356 build_request_id = GetSHA1HexDigest( | |
1357 '%s-%s-%s' % (git_revision, patch, time.time())) | |
1358 | |
1359 # Creates a try job description. | |
1360 # Always use Git hash to post build request since Commit positions are | |
1361 # not supported by builders to build. | |
1362 job_args = { | |
1363 'revision': 'src@%s' % git_revision, | |
1364 'bot': bot_name, | |
1365 'name': build_request_id, | |
1366 } | |
1367 # Update patch information if supplied. | |
1368 if patch: | |
1369 job_args['patch'] = patch | |
1370 # Posts job to build the revision on the server. | |
1371 if request_build.PostTryJob(builder_host, builder_port, job_args): | |
1372 target_file, error_msg = _WaitUntilBuildIsReady( | |
1373 fetch_build, bot_name, builder_host, builder_port, build_request_id, | |
1374 build_timeout) | |
1375 if not target_file: | |
1376 print '%s [revision: %s]' % (error_msg, git_revision) | |
1377 return None | |
1378 return target_file | |
1379 print 'Failed to post build request for revision: [%s]' % git_revision | |
1380 return None | |
1381 | |
1382 def IsDownloadable(self, depot): | |
1383 """Checks if build can be downloaded based on target platform and depot.""" | |
1384 if (self.opts.target_platform in ['chromium', 'android'] and | |
1385 self.opts.gs_bucket): | |
1386 return (depot == 'chromium' or | |
1387 'chromium' in DEPOT_DEPS_NAME[depot]['from'] or | |
1388 'v8' in DEPOT_DEPS_NAME[depot]['from']) | |
1389 return False | |
1390 | |
1391 def UpdateDepsContents(self, deps_contents, depot, git_revision, deps_key): | |
1392 """Returns modified version of DEPS file contents. | |
1393 | |
1394 Args: | |
1395 deps_contents: DEPS file content. | |
1396 depot: Current depot being bisected. | |
1397 git_revision: A git hash to be updated in DEPS. | |
1398 deps_key: Key in vars section of DEPS file to be searched. | |
1399 | |
1400 Returns: | |
1401 Updated DEPS content as string if deps key is found, otherwise None. | |
1402 """ | |
1403 # Check whether the depot and revision pattern in DEPS file vars | |
1404 # e.g. for webkit the format is "webkit_revision": "12345". | |
1405 deps_revision = re.compile(r'(?<="%s": ")([0-9]+)(?=")' % deps_key, | |
1406 re.MULTILINE) | |
1407 new_data = None | |
1408 if re.search(deps_revision, deps_contents): | |
1409 commit_position = self.source_control.GetCommitPosition( | |
1410 git_revision, self._GetDepotDirectory(depot)) | |
1411 if not commit_position: | |
1412 print 'Could not determine commit position for %s' % git_revision | |
1413 return None | |
1414 # Update the revision information for the given depot | |
1415 new_data = re.sub(deps_revision, str(commit_position), deps_contents) | |
1416 else: | |
1417 # Check whether the depot and revision pattern in DEPS file vars | |
1418 # e.g. for webkit the format is "webkit_revision": "559a6d4ab7a84c539..". | |
1419 deps_revision = re.compile( | |
1420 r'(?<=["\']%s["\']: ["\'])([a-fA-F0-9]{40})(?=["\'])' % deps_key, | |
1421 re.MULTILINE) | |
1422 if re.search(deps_revision, deps_contents): | |
1423 new_data = re.sub(deps_revision, git_revision, deps_contents) | |
1424 if new_data: | |
1425 # For v8_bleeding_edge revisions change V8 branch in order | |
1426 # to fetch bleeding edge revision. | |
1427 if depot == 'v8_bleeding_edge': | |
1428 new_data = _UpdateV8Branch(new_data) | |
1429 if not new_data: | |
1430 return None | |
1431 return new_data | |
1432 | |
1433 def UpdateDeps(self, revision, depot, deps_file): | |
1434 """Updates DEPS file with new revision of dependency repository. | |
1435 | |
1436 This method search DEPS for a particular pattern in which depot revision | |
1437 is specified (e.g "webkit_revision": "123456"). If a match is found then | |
1438 it resolves the given git hash to SVN revision and replace it in DEPS file. | |
1439 | |
1440 Args: | |
1441 revision: A git hash revision of the dependency repository. | |
1442 depot: Current depot being bisected. | |
1443 deps_file: Path to DEPS file. | |
1444 | |
1445 Returns: | |
1446 True if DEPS file is modified successfully, otherwise False. | |
1447 """ | |
1448 if not os.path.exists(deps_file): | |
1449 return False | |
1450 | |
1451 deps_var = DEPOT_DEPS_NAME[depot]['deps_var'] | |
1452 # Don't update DEPS file if deps_var is not set in DEPOT_DEPS_NAME. | |
1453 if not deps_var: | |
1454 print 'DEPS update not supported for Depot: %s', depot | |
1455 return False | |
1456 | |
1457 # Hack for Angle repository. In the DEPS file, "vars" dictionary variable | |
1458 # contains "angle_revision" key that holds git hash instead of SVN revision. | |
1459 # And sometime "angle_revision" key is not specified in "vars" variable. | |
1460 # In such cases check, "deps" dictionary variable that matches | |
1461 # angle.git@[a-fA-F0-9]{40}$ and replace git hash. | |
1462 if depot == 'angle': | |
1463 return _UpdateDEPSForAngle(revision, depot, deps_file) | |
1464 | |
1465 try: | |
1466 deps_contents = ReadStringFromFile(deps_file) | |
1467 updated_deps_content = self.UpdateDepsContents( | |
1468 deps_contents, depot, revision, deps_var) | |
1469 # Write changes to DEPS file | |
1470 if updated_deps_content: | |
1471 WriteStringToFile(updated_deps_content, deps_file) | |
1472 return True | |
1473 except IOError, e: | |
1474 print 'Something went wrong while updating DEPS file. [%s]' % e | |
1475 return False | |
1476 | |
1477 def CreateDEPSPatch(self, depot, revision): | |
1478 """Modifies DEPS and returns diff as text. | |
1479 | |
1480 Args: | |
1481 depot: Current depot being bisected. | |
1482 revision: A git hash revision of the dependency repository. | |
1483 | |
1484 Returns: | |
1485 A tuple with git hash of chromium revision and DEPS patch text. | |
1486 """ | |
1487 deps_file_path = os.path.join(self.src_cwd, bisect_utils.FILE_DEPS) | |
1488 if not os.path.exists(deps_file_path): | |
1489 raise RuntimeError('DEPS file does not exists.[%s]' % deps_file_path) | |
1490 # Get current chromium revision (git hash). | |
1491 cmd = ['rev-parse', 'HEAD'] | |
1492 chromium_sha = bisect_utils.CheckRunGit(cmd).strip() | |
1493 if not chromium_sha: | |
1494 raise RuntimeError('Failed to determine Chromium revision for %s' % | |
1495 revision) | |
1496 if ('chromium' in DEPOT_DEPS_NAME[depot]['from'] or | |
1497 'v8' in DEPOT_DEPS_NAME[depot]['from']): | |
1498 # Checkout DEPS file for the current chromium revision. | |
1499 if self.source_control.CheckoutFileAtRevision( | |
1500 bisect_utils.FILE_DEPS, chromium_sha, cwd=self.src_cwd): | |
1501 if self.UpdateDeps(revision, depot, deps_file_path): | |
1502 diff_command = [ | |
1503 'diff', | |
1504 '--src-prefix=src/', | |
1505 '--dst-prefix=src/', | |
1506 '--no-ext-diff', | |
1507 bisect_utils.FILE_DEPS, | |
1508 ] | |
1509 diff_text = bisect_utils.CheckRunGit(diff_command, cwd=self.src_cwd) | |
1510 return (chromium_sha, ChangeBackslashToSlashInPatch(diff_text)) | |
1511 else: | |
1512 raise RuntimeError( | |
1513 'Failed to update DEPS file for chromium: [%s]' % chromium_sha) | |
1514 else: | |
1515 raise RuntimeError( | |
1516 'DEPS checkout Failed for chromium revision : [%s]' % chromium_sha) | |
1517 return (None, None) | |
1518 | |
1519 def BuildCurrentRevision(self, depot, revision=None): | |
1520 """Builds chrome and performance_ui_tests on the current revision. | |
1521 | |
1522 Returns: | |
1523 True if the build was successful. | |
1524 """ | |
1525 if self.opts.debug_ignore_build: | |
1526 return True | |
1527 | |
1528 build_success = False | |
1529 cwd = os.getcwd() | |
1530 os.chdir(self.src_cwd) | |
1531 # Fetch build archive for the given revision from the cloud storage when | |
1532 # the storage bucket is passed. | |
1533 if self.IsDownloadable(depot) and revision: | |
1534 deps_patch = None | |
1535 if depot != 'chromium': | |
1536 # Create a DEPS patch with new revision for dependency repository. | |
1537 revision, deps_patch = self.CreateDEPSPatch(depot, revision) | |
1538 if self.DownloadCurrentBuild(revision, patch=deps_patch): | |
1539 if deps_patch: | |
1540 # Reverts the changes to DEPS file. | |
1541 self.source_control.CheckoutFileAtRevision( | |
1542 bisect_utils.FILE_DEPS, revision, cwd=self.src_cwd) | |
1543 build_success = True | |
1544 else: | |
1545 # These codes are executed when bisect bots builds binaries locally. | |
1546 build_success = self.builder.Build(depot, self.opts) | |
1547 os.chdir(cwd) | |
1548 return build_success | |
1549 | |
1550 def RunGClientHooks(self): | |
1551 """Runs gclient with runhooks command. | |
1552 | |
1553 Returns: | |
1554 True if gclient reports no errors. | |
1555 """ | |
1556 if self.opts.debug_ignore_build: | |
1557 return True | |
1558 return not bisect_utils.RunGClient(['runhooks'], cwd=self.src_cwd) | |
1559 | |
1560 def _IsBisectModeUsingMetric(self): | |
1561 return self.opts.bisect_mode in [BISECT_MODE_MEAN, BISECT_MODE_STD_DEV] | |
1562 | |
1563 def _IsBisectModeReturnCode(self): | |
1564 return self.opts.bisect_mode in [BISECT_MODE_RETURN_CODE] | |
1565 | |
1566 def _IsBisectModeStandardDeviation(self): | |
1567 return self.opts.bisect_mode in [BISECT_MODE_STD_DEV] | |
1568 | |
1569 def GetCompatibleCommand(self, command_to_run, revision, depot): | |
1570 # Prior to crrev.com/274857 *only* android-chromium-testshell | |
1571 # Then until crrev.com/276628 *both* (android-chromium-testshell and | |
1572 # android-chrome-shell) work. After that rev 276628 *only* | |
1573 # android-chrome-shell works. bisect-perf-regression.py script should | |
1574 # handle these cases and set appropriate browser type based on revision. | |
1575 if self.opts.target_platform in ['android']: | |
1576 # When its a third_party depot, get the chromium revision. | |
1577 if depot != 'chromium': | |
1578 revision = bisect_utils.CheckRunGit( | |
1579 ['rev-parse', 'HEAD'], cwd=self.src_cwd).strip() | |
1580 commit_position = self.source_control.GetCommitPosition(revision, | |
1581 cwd=self.src_cwd) | |
1582 if not commit_position: | |
1583 return command_to_run | |
1584 cmd_re = re.compile('--browser=(?P<browser_type>\S+)') | |
1585 matches = cmd_re.search(command_to_run) | |
1586 if bisect_utils.IsStringInt(commit_position) and matches: | |
1587 cmd_browser = matches.group('browser_type') | |
1588 if commit_position <= 274857 and cmd_browser == 'android-chrome-shell': | |
1589 return command_to_run.replace(cmd_browser, | |
1590 'android-chromium-testshell') | |
1591 elif (commit_position >= 276628 and | |
1592 cmd_browser == 'android-chromium-testshell'): | |
1593 return command_to_run.replace(cmd_browser, | |
1594 'android-chrome-shell') | |
1595 return command_to_run | |
1596 | |
1597 def RunPerformanceTestAndParseResults( | |
1598 self, command_to_run, metric, reset_on_first_run=False, | |
1599 upload_on_last_run=False, results_label=None): | |
1600 """Runs a performance test on the current revision and parses the results. | |
1601 | |
1602 Args: | |
1603 command_to_run: The command to be run to execute the performance test. | |
1604 metric: The metric to parse out from the results of the performance test. | |
1605 This is the result chart name and trace name, separated by slash. | |
1606 May be None for perf try jobs. | |
1607 reset_on_first_run: If True, pass the flag --reset-results on first run. | |
1608 upload_on_last_run: If True, pass the flag --upload-results on last run. | |
1609 results_label: A value for the option flag --results-label. | |
1610 The arguments reset_on_first_run, upload_on_last_run and results_label | |
1611 are all ignored if the test is not a Telemetry test. | |
1612 | |
1613 Returns: | |
1614 (values dict, 0) if --debug_ignore_perf_test was passed. | |
1615 (values dict, 0, test output) if the test was run successfully. | |
1616 (error message, -1) if the test couldn't be run. | |
1617 (error message, -1, test output) if the test ran but there was an error. | |
1618 """ | |
1619 success_code, failure_code = 0, -1 | |
1620 | |
1621 if self.opts.debug_ignore_perf_test: | |
1622 fake_results = { | |
1623 'mean': 0.0, | |
1624 'std_err': 0.0, | |
1625 'std_dev': 0.0, | |
1626 'values': [0.0] | |
1627 } | |
1628 return (fake_results, success_code) | |
1629 | |
1630 # For Windows platform set posix=False, to parse windows paths correctly. | |
1631 # On Windows, path separators '\' or '\\' are replace by '' when posix=True, | |
1632 # refer to http://bugs.python.org/issue1724822. By default posix=True. | |
1633 args = shlex.split(command_to_run, posix=not bisect_utils.IsWindowsHost()) | |
1634 | |
1635 if not _GenerateProfileIfNecessary(args): | |
1636 err_text = 'Failed to generate profile for performance test.' | |
1637 return (err_text, failure_code) | |
1638 | |
1639 # If running a Telemetry test for Chrome OS, insert the remote IP and | |
1640 # identity parameters. | |
1641 is_telemetry = bisect_utils.IsTelemetryCommand(command_to_run) | |
1642 if self.opts.target_platform == 'cros' and is_telemetry: | |
1643 args.append('--remote=%s' % self.opts.cros_remote_ip) | |
1644 args.append('--identity=%s' % bisect_utils.CROS_TEST_KEY_PATH) | |
1645 | |
1646 start_time = time.time() | |
1647 | |
1648 metric_values = [] | |
1649 output_of_all_runs = '' | |
1650 for i in xrange(self.opts.repeat_test_count): | |
1651 # Can ignore the return code since if the tests fail, it won't return 0. | |
1652 current_args = copy.copy(args) | |
1653 if is_telemetry: | |
1654 if i == 0 and reset_on_first_run: | |
1655 current_args.append('--reset-results') | |
1656 elif i == self.opts.repeat_test_count - 1 and upload_on_last_run: | |
1657 current_args.append('--upload-results') | |
1658 if results_label: | |
1659 current_args.append('--results-label=%s' % results_label) | |
1660 try: | |
1661 output, return_code = bisect_utils.RunProcessAndRetrieveOutput( | |
1662 current_args, cwd=self.src_cwd) | |
1663 except OSError, e: | |
1664 if e.errno == errno.ENOENT: | |
1665 err_text = ('Something went wrong running the performance test. ' | |
1666 'Please review the command line:\n\n') | |
1667 if 'src/' in ' '.join(args): | |
1668 err_text += ('Check that you haven\'t accidentally specified a ' | |
1669 'path with src/ in the command.\n\n') | |
1670 err_text += ' '.join(args) | |
1671 err_text += '\n' | |
1672 | |
1673 return (err_text, failure_code) | |
1674 raise | |
1675 | |
1676 output_of_all_runs += output | |
1677 if self.opts.output_buildbot_annotations: | |
1678 print output | |
1679 | |
1680 if metric and self._IsBisectModeUsingMetric(): | |
1681 metric_values += _ParseMetricValuesFromOutput(metric, output) | |
1682 # If we're bisecting on a metric (ie, changes in the mean or | |
1683 # standard deviation) and no metric values are produced, bail out. | |
1684 if not metric_values: | |
1685 break | |
1686 elif self._IsBisectModeReturnCode(): | |
1687 metric_values.append(return_code) | |
1688 | |
1689 elapsed_minutes = (time.time() - start_time) / 60.0 | |
1690 if elapsed_minutes >= self.opts.max_time_minutes: | |
1691 break | |
1692 | |
1693 if metric and len(metric_values) == 0: | |
1694 err_text = 'Metric %s was not found in the test output.' % metric | |
1695 # TODO(qyearsley): Consider also getting and displaying a list of metrics | |
1696 # that were found in the output here. | |
1697 return (err_text, failure_code, output_of_all_runs) | |
1698 | |
1699 # If we're bisecting on return codes, we're really just looking for zero vs | |
1700 # non-zero. | |
1701 values = {} | |
1702 if self._IsBisectModeReturnCode(): | |
1703 # If any of the return codes is non-zero, output 1. | |
1704 overall_return_code = 0 if ( | |
1705 all(current_value == 0 for current_value in metric_values)) else 1 | |
1706 | |
1707 values = { | |
1708 'mean': overall_return_code, | |
1709 'std_err': 0.0, | |
1710 'std_dev': 0.0, | |
1711 'values': metric_values, | |
1712 } | |
1713 | |
1714 print 'Results of performance test: Command returned with %d' % ( | |
1715 overall_return_code) | |
1716 print | |
1717 elif metric: | |
1718 # Need to get the average value if there were multiple values. | |
1719 truncated_mean = math_utils.TruncatedMean( | |
1720 metric_values, self.opts.truncate_percent) | |
1721 standard_err = math_utils.StandardError(metric_values) | |
1722 standard_dev = math_utils.StandardDeviation(metric_values) | |
1723 | |
1724 if self._IsBisectModeStandardDeviation(): | |
1725 metric_values = [standard_dev] | |
1726 | |
1727 values = { | |
1728 'mean': truncated_mean, | |
1729 'std_err': standard_err, | |
1730 'std_dev': standard_dev, | |
1731 'values': metric_values, | |
1732 } | |
1733 | |
1734 print 'Results of performance test: %12f %12f' % ( | |
1735 truncated_mean, standard_err) | |
1736 print | |
1737 return (values, success_code, output_of_all_runs) | |
1738 | |
1739 def _FindAllRevisionsToSync(self, revision, depot): | |
1740 """Finds all dependent revisions and depots that need to be synced. | |
1741 | |
1742 For example skia is broken up into 3 git mirrors over skia/src, | |
1743 skia/gyp, and skia/include. To sync skia/src properly, one has to find | |
1744 the proper revisions in skia/gyp and skia/include. | |
1745 | |
1746 This is only useful in the git workflow, as an SVN depot may be split into | |
1747 multiple mirrors. | |
1748 | |
1749 Args: | |
1750 revision: The revision to sync to. | |
1751 depot: The depot in use at the moment (probably skia). | |
1752 | |
1753 Returns: | |
1754 A list of [depot, revision] pairs that need to be synced. | |
1755 """ | |
1756 revisions_to_sync = [[depot, revision]] | |
1757 | |
1758 is_base = ((depot == 'chromium') or (depot == 'cros') or | |
1759 (depot == 'android-chrome')) | |
1760 | |
1761 # Some SVN depots were split into multiple git depots, so we need to | |
1762 # figure out for each mirror which git revision to grab. There's no | |
1763 # guarantee that the SVN revision will exist for each of the dependent | |
1764 # depots, so we have to grep the git logs and grab the next earlier one. | |
1765 if (not is_base | |
1766 and DEPOT_DEPS_NAME[depot]['depends'] | |
1767 and self.source_control.IsGit()): | |
1768 commit_position = self.source_control.GetCommitPosition(revision) | |
1769 | |
1770 for d in DEPOT_DEPS_NAME[depot]['depends']: | |
1771 self.ChangeToDepotWorkingDirectory(d) | |
1772 | |
1773 dependant_rev = self.source_control.ResolveToRevision( | |
1774 commit_position, d, DEPOT_DEPS_NAME, -1000) | |
1775 | |
1776 if dependant_rev: | |
1777 revisions_to_sync.append([d, dependant_rev]) | |
1778 | |
1779 num_resolved = len(revisions_to_sync) | |
1780 num_needed = len(DEPOT_DEPS_NAME[depot]['depends']) | |
1781 | |
1782 self.ChangeToDepotWorkingDirectory(depot) | |
1783 | |
1784 if not ((num_resolved - 1) == num_needed): | |
1785 return None | |
1786 | |
1787 return revisions_to_sync | |
1788 | |
1789 def PerformPreBuildCleanup(self): | |
1790 """Performs cleanup between runs.""" | |
1791 print 'Cleaning up between runs.' | |
1792 print | |
1793 | |
1794 # Leaving these .pyc files around between runs may disrupt some perf tests. | |
1795 for (path, _, files) in os.walk(self.src_cwd): | |
1796 for cur_file in files: | |
1797 if cur_file.endswith('.pyc'): | |
1798 path_to_file = os.path.join(path, cur_file) | |
1799 os.remove(path_to_file) | |
1800 | |
1801 def PerformCrosChrootCleanup(self): | |
1802 """Deletes the chroot. | |
1803 | |
1804 Returns: | |
1805 True if successful. | |
1806 """ | |
1807 cwd = os.getcwd() | |
1808 self.ChangeToDepotWorkingDirectory('cros') | |
1809 cmd = [bisect_utils.CROS_SDK_PATH, '--delete'] | |
1810 return_code = bisect_utils.RunProcess(cmd) | |
1811 os.chdir(cwd) | |
1812 return not return_code | |
1813 | |
1814 def CreateCrosChroot(self): | |
1815 """Creates a new chroot. | |
1816 | |
1817 Returns: | |
1818 True if successful. | |
1819 """ | |
1820 cwd = os.getcwd() | |
1821 self.ChangeToDepotWorkingDirectory('cros') | |
1822 cmd = [bisect_utils.CROS_SDK_PATH, '--create'] | |
1823 return_code = bisect_utils.RunProcess(cmd) | |
1824 os.chdir(cwd) | |
1825 return not return_code | |
1826 | |
1827 def _PerformPreSyncCleanup(self, depot): | |
1828 """Performs any necessary cleanup before syncing. | |
1829 | |
1830 Args: | |
1831 depot: Depot name. | |
1832 | |
1833 Returns: | |
1834 True if successful. | |
1835 """ | |
1836 if depot == 'chromium' or depot == 'android-chrome': | |
1837 # Removes third_party/libjingle. At some point, libjingle was causing | |
1838 # issues syncing when using the git workflow (crbug.com/266324). | |
1839 os.chdir(self.src_cwd) | |
1840 if not bisect_utils.RemoveThirdPartyDirectory('libjingle'): | |
1841 return False | |
1842 # Removes third_party/skia. At some point, skia was causing | |
1843 # issues syncing when using the git workflow (crbug.com/377951). | |
1844 if not bisect_utils.RemoveThirdPartyDirectory('skia'): | |
1845 return False | |
1846 elif depot == 'cros': | |
1847 return self.PerformCrosChrootCleanup() | |
1848 return True | |
1849 | |
1850 def _RunPostSync(self, depot): | |
1851 """Performs any work after syncing. | |
1852 | |
1853 Args: | |
1854 depot: Depot name. | |
1855 | |
1856 Returns: | |
1857 True if successful. | |
1858 """ | |
1859 if self.opts.target_platform == 'android': | |
1860 if not builder.SetupAndroidBuildEnvironment(self.opts, | |
1861 path_to_src=self.src_cwd): | |
1862 return False | |
1863 | |
1864 if depot == 'cros': | |
1865 return self.CreateCrosChroot() | |
1866 else: | |
1867 return self.RunGClientHooks() | |
1868 return True | |
1869 | |
1870 def ShouldSkipRevision(self, depot, revision): | |
1871 """Checks whether a particular revision can be safely skipped. | |
1872 | |
1873 Some commits can be safely skipped (such as a DEPS roll), since the tool | |
1874 is git based those changes would have no effect. | |
1875 | |
1876 Args: | |
1877 depot: The depot being bisected. | |
1878 revision: Current revision we're synced to. | |
1879 | |
1880 Returns: | |
1881 True if we should skip building/testing this revision. | |
1882 """ | |
1883 if depot == 'chromium': | |
1884 if self.source_control.IsGit(): | |
1885 cmd = ['diff-tree', '--no-commit-id', '--name-only', '-r', revision] | |
1886 output = bisect_utils.CheckRunGit(cmd) | |
1887 | |
1888 files = output.splitlines() | |
1889 | |
1890 if len(files) == 1 and files[0] == 'DEPS': | |
1891 return True | |
1892 | |
1893 return False | |
1894 | |
1895 def RunTest(self, revision, depot, command, metric, skippable=False): | |
1896 """Performs a full sync/build/run of the specified revision. | |
1897 | |
1898 Args: | |
1899 revision: The revision to sync to. | |
1900 depot: The depot that's being used at the moment (src, webkit, etc.) | |
1901 command: The command to execute the performance test. | |
1902 metric: The performance metric being tested. | |
1903 | |
1904 Returns: | |
1905 On success, a tuple containing the results of the performance test. | |
1906 Otherwise, a tuple with the error message. | |
1907 """ | |
1908 # Decide which sync program to use. | |
1909 sync_client = None | |
1910 if depot == 'chromium' or depot == 'android-chrome': | |
1911 sync_client = 'gclient' | |
1912 elif depot == 'cros': | |
1913 sync_client = 'repo' | |
1914 | |
1915 # Decide what depots will need to be synced to what revisions. | |
1916 revisions_to_sync = self._FindAllRevisionsToSync(revision, depot) | |
1917 if not revisions_to_sync: | |
1918 return ('Failed to resolve dependent depots.', BUILD_RESULT_FAIL) | |
1919 | |
1920 if not self._PerformPreSyncCleanup(depot): | |
1921 return ('Failed to perform pre-sync cleanup.', BUILD_RESULT_FAIL) | |
1922 | |
1923 # Do the syncing for all depots. | |
1924 if not self.opts.debug_ignore_sync: | |
1925 if not self._SyncAllRevisions(revisions_to_sync, sync_client): | |
1926 return ('Failed to sync: [%s]' % str(revision), BUILD_RESULT_FAIL) | |
1927 | |
1928 # Try to do any post-sync steps. This may include "gclient runhooks". | |
1929 if not self._RunPostSync(depot): | |
1930 return ('Failed to run [gclient runhooks].', BUILD_RESULT_FAIL) | |
1931 | |
1932 # Skip this revision if it can be skipped. | |
1933 if skippable and self.ShouldSkipRevision(depot, revision): | |
1934 return ('Skipped revision: [%s]' % str(revision), | |
1935 BUILD_RESULT_SKIPPED) | |
1936 | |
1937 # Obtain a build for this revision. This may be done by requesting a build | |
1938 # from another builder, waiting for it and downloading it. | |
1939 start_build_time = time.time() | |
1940 build_success = self.BuildCurrentRevision(depot, revision) | |
1941 if not build_success: | |
1942 return ('Failed to build revision: [%s]' % str(revision), | |
1943 BUILD_RESULT_FAIL) | |
1944 after_build_time = time.time() | |
1945 | |
1946 # Possibly alter the command. | |
1947 command = self.GetCompatibleCommand(command, revision, depot) | |
1948 | |
1949 # Run the command and get the results. | |
1950 results = self.RunPerformanceTestAndParseResults(command, metric) | |
1951 | |
1952 # Restore build output directory once the tests are done, to avoid | |
1953 # any discrepancies. | |
1954 if self.IsDownloadable(depot) and revision: | |
1955 self.BackupOrRestoreOutputDirectory(restore=True) | |
1956 | |
1957 # A value other than 0 indicates that the test couldn't be run, and results | |
1958 # should also include an error message. | |
1959 if results[1] != 0: | |
1960 return results | |
1961 | |
1962 external_revisions = self._Get3rdPartyRevisions(depot) | |
1963 | |
1964 if not external_revisions is None: | |
1965 return (results[0], results[1], external_revisions, | |
1966 time.time() - after_build_time, after_build_time - | |
1967 start_build_time) | |
1968 else: | |
1969 return ('Failed to parse DEPS file for external revisions.', | |
1970 BUILD_RESULT_FAIL) | |
1971 | |
1972 def _SyncAllRevisions(self, revisions_to_sync, sync_client): | |
1973 """Syncs multiple depots to particular revisions. | |
1974 | |
1975 Args: | |
1976 revisions_to_sync: A list of (depot, revision) pairs to be synced. | |
1977 sync_client: Program used to sync, e.g. "gclient", "repo". Can be None. | |
1978 | |
1979 Returns: | |
1980 True if successful, False otherwise. | |
1981 """ | |
1982 for depot, revision in revisions_to_sync: | |
1983 self.ChangeToDepotWorkingDirectory(depot) | |
1984 | |
1985 if sync_client: | |
1986 self.PerformPreBuildCleanup() | |
1987 | |
1988 # When using gclient to sync, you need to specify the depot you | |
1989 # want so that all the dependencies sync properly as well. | |
1990 # i.e. gclient sync src@<SHA1> | |
1991 if sync_client == 'gclient': | |
1992 revision = '%s@%s' % (DEPOT_DEPS_NAME[depot]['src'], revision) | |
1993 | |
1994 sync_success = self.source_control.SyncToRevision(revision, sync_client) | |
1995 if not sync_success: | |
1996 return False | |
1997 | |
1998 return True | |
1999 | |
2000 def _CheckIfRunPassed(self, current_value, known_good_value, known_bad_value): | |
2001 """Given known good and bad values, decide if the current_value passed | |
2002 or failed. | |
2003 | |
2004 Args: | |
2005 current_value: The value of the metric being checked. | |
2006 known_bad_value: The reference value for a "failed" run. | |
2007 known_good_value: The reference value for a "passed" run. | |
2008 | |
2009 Returns: | |
2010 True if the current_value is closer to the known_good_value than the | |
2011 known_bad_value. | |
2012 """ | |
2013 if self.opts.bisect_mode == BISECT_MODE_STD_DEV: | |
2014 dist_to_good_value = abs(current_value['std_dev'] - | |
2015 known_good_value['std_dev']) | |
2016 dist_to_bad_value = abs(current_value['std_dev'] - | |
2017 known_bad_value['std_dev']) | |
2018 else: | |
2019 dist_to_good_value = abs(current_value['mean'] - known_good_value['mean']) | |
2020 dist_to_bad_value = abs(current_value['mean'] - known_bad_value['mean']) | |
2021 | |
2022 return dist_to_good_value < dist_to_bad_value | |
2023 | |
2024 def _GetDepotDirectory(self, depot_name): | |
2025 if depot_name == 'chromium': | |
2026 return self.src_cwd | |
2027 elif depot_name == 'cros': | |
2028 return self.cros_cwd | |
2029 elif depot_name in DEPOT_NAMES: | |
2030 return self.depot_cwd[depot_name] | |
2031 else: | |
2032 assert False, ('Unknown depot [ %s ] encountered. Possibly a new one ' | |
2033 'was added without proper support?' % depot_name) | |
2034 | |
2035 def ChangeToDepotWorkingDirectory(self, depot_name): | |
2036 """Given a depot, changes to the appropriate working directory. | |
2037 | |
2038 Args: | |
2039 depot_name: The name of the depot (see DEPOT_NAMES). | |
2040 """ | |
2041 os.chdir(self._GetDepotDirectory(depot_name)) | |
2042 | |
2043 def _FillInV8BleedingEdgeInfo(self, min_revision_data, max_revision_data): | |
2044 r1 = self._GetNearestV8BleedingEdgeFromTrunk(min_revision_data['revision'], | |
2045 search_forward=True) | |
2046 r2 = self._GetNearestV8BleedingEdgeFromTrunk(max_revision_data['revision'], | |
2047 search_forward=False) | |
2048 min_revision_data['external']['v8_bleeding_edge'] = r1 | |
2049 max_revision_data['external']['v8_bleeding_edge'] = r2 | |
2050 | |
2051 if (not self._GetV8BleedingEdgeFromV8TrunkIfMappable( | |
2052 min_revision_data['revision']) | |
2053 or not self._GetV8BleedingEdgeFromV8TrunkIfMappable( | |
2054 max_revision_data['revision'])): | |
2055 self.warnings.append( | |
2056 'Trunk revisions in V8 did not map directly to bleeding_edge. ' | |
2057 'Attempted to expand the range to find V8 rolls which did map ' | |
2058 'directly to bleeding_edge revisions, but results might not be ' | |
2059 'valid.') | |
2060 | |
2061 def _FindNextDepotToBisect( | |
2062 self, current_depot, min_revision_data, max_revision_data): | |
2063 """Decides which depot the script should dive into next (if any). | |
2064 | |
2065 Args: | |
2066 current_depot: Current depot being bisected. | |
2067 min_revision_data: Data about the earliest revision in the bisect range. | |
2068 max_revision_data: Data about the latest revision in the bisect range. | |
2069 | |
2070 Returns: | |
2071 Name of the depot to bisect next, or None. | |
2072 """ | |
2073 external_depot = None | |
2074 for next_depot in DEPOT_NAMES: | |
2075 if DEPOT_DEPS_NAME[next_depot].has_key('platform'): | |
2076 if DEPOT_DEPS_NAME[next_depot]['platform'] != os.name: | |
2077 continue | |
2078 | |
2079 if not (DEPOT_DEPS_NAME[next_depot]['recurse'] | |
2080 and min_revision_data['depot'] | |
2081 in DEPOT_DEPS_NAME[next_depot]['from']): | |
2082 continue | |
2083 | |
2084 if current_depot == 'v8': | |
2085 # We grab the bleeding_edge info here rather than earlier because we | |
2086 # finally have the revision range. From that we can search forwards and | |
2087 # backwards to try to match trunk revisions to bleeding_edge. | |
2088 self._FillInV8BleedingEdgeInfo(min_revision_data, max_revision_data) | |
2089 | |
2090 if (min_revision_data['external'].get(next_depot) == | |
2091 max_revision_data['external'].get(next_depot)): | |
2092 continue | |
2093 | |
2094 if (min_revision_data['external'].get(next_depot) and | |
2095 max_revision_data['external'].get(next_depot)): | |
2096 external_depot = next_depot | |
2097 break | |
2098 | |
2099 return external_depot | |
2100 | |
2101 def PrepareToBisectOnDepot( | |
2102 self, current_depot, end_revision, start_revision, previous_revision): | |
2103 """Changes to the appropriate directory and gathers a list of revisions | |
2104 to bisect between |start_revision| and |end_revision|. | |
2105 | |
2106 Args: | |
2107 current_depot: The depot we want to bisect. | |
2108 end_revision: End of the revision range. | |
2109 start_revision: Start of the revision range. | |
2110 previous_revision: The last revision we synced to on |previous_depot|. | |
2111 | |
2112 Returns: | |
2113 A list containing the revisions between |start_revision| and | |
2114 |end_revision| inclusive. | |
2115 """ | |
2116 # Change into working directory of external library to run | |
2117 # subsequent commands. | |
2118 self.ChangeToDepotWorkingDirectory(current_depot) | |
2119 | |
2120 # V8 (and possibly others) is merged in periodically. Bisecting | |
2121 # this directory directly won't give much good info. | |
2122 if DEPOT_DEPS_NAME[current_depot].has_key('custom_deps'): | |
2123 config_path = os.path.join(self.src_cwd, '..') | |
2124 if bisect_utils.RunGClientAndCreateConfig(self.opts, | |
2125 DEPOT_DEPS_NAME[current_depot]['custom_deps'], cwd=config_path): | |
2126 return [] | |
2127 if bisect_utils.RunGClient( | |
2128 ['sync', '--revision', previous_revision], cwd=self.src_cwd): | |
2129 return [] | |
2130 | |
2131 if current_depot == 'v8_bleeding_edge': | |
2132 self.ChangeToDepotWorkingDirectory('chromium') | |
2133 | |
2134 shutil.move('v8', 'v8.bak') | |
2135 shutil.move('v8_bleeding_edge', 'v8') | |
2136 | |
2137 self.cleanup_commands.append(['mv', 'v8', 'v8_bleeding_edge']) | |
2138 self.cleanup_commands.append(['mv', 'v8.bak', 'v8']) | |
2139 | |
2140 self.depot_cwd['v8_bleeding_edge'] = os.path.join(self.src_cwd, 'v8') | |
2141 self.depot_cwd['v8'] = os.path.join(self.src_cwd, 'v8.bak') | |
2142 | |
2143 self.ChangeToDepotWorkingDirectory(current_depot) | |
2144 | |
2145 depot_revision_list = self.GetRevisionList(current_depot, | |
2146 end_revision, | |
2147 start_revision) | |
2148 | |
2149 self.ChangeToDepotWorkingDirectory('chromium') | |
2150 | |
2151 return depot_revision_list | |
2152 | |
2153 def GatherReferenceValues(self, good_rev, bad_rev, cmd, metric, target_depot): | |
2154 """Gathers reference values by running the performance tests on the | |
2155 known good and bad revisions. | |
2156 | |
2157 Args: | |
2158 good_rev: The last known good revision where the performance regression | |
2159 has not occurred yet. | |
2160 bad_rev: A revision where the performance regression has already occurred. | |
2161 cmd: The command to execute the performance test. | |
2162 metric: The metric being tested for regression. | |
2163 | |
2164 Returns: | |
2165 A tuple with the results of building and running each revision. | |
2166 """ | |
2167 bad_run_results = self.RunTest(bad_rev, target_depot, cmd, metric) | |
2168 | |
2169 good_run_results = None | |
2170 | |
2171 if not bad_run_results[1]: | |
2172 good_run_results = self.RunTest(good_rev, target_depot, cmd, metric) | |
2173 | |
2174 return (bad_run_results, good_run_results) | |
2175 | |
2176 def PrintRevisionsToBisectMessage(self, revision_list, depot): | |
2177 if self.opts.output_buildbot_annotations: | |
2178 step_name = 'Bisection Range: [%s - %s]' % ( | |
2179 revision_list[len(revision_list)-1], revision_list[0]) | |
2180 bisect_utils.OutputAnnotationStepStart(step_name) | |
2181 | |
2182 print | |
2183 print 'Revisions to bisect on [%s]:' % depot | |
2184 for revision_id in revision_list: | |
2185 print ' -> %s' % (revision_id, ) | |
2186 print | |
2187 | |
2188 if self.opts.output_buildbot_annotations: | |
2189 bisect_utils.OutputAnnotationStepClosed() | |
2190 | |
2191 def NudgeRevisionsIfDEPSChange(self, bad_revision, good_revision, | |
2192 good_svn_revision=None): | |
2193 """Checks to see if changes to DEPS file occurred, and that the revision | |
2194 range also includes the change to .DEPS.git. If it doesn't, attempts to | |
2195 expand the revision range to include it. | |
2196 | |
2197 Args: | |
2198 bad_revision: First known bad git revision. | |
2199 good_revision: Last known good git revision. | |
2200 good_svn_revision: Last known good svn revision. | |
2201 | |
2202 Returns: | |
2203 A tuple with the new bad and good revisions. | |
2204 """ | |
2205 # DONOT perform nudge because at revision 291563 .DEPS.git was removed | |
2206 # and source contain only DEPS file for dependency changes. | |
2207 if good_svn_revision >= 291563: | |
2208 return (bad_revision, good_revision) | |
2209 | |
2210 if self.source_control.IsGit() and self.opts.target_platform == 'chromium': | |
2211 changes_to_deps = self.source_control.QueryFileRevisionHistory( | |
2212 bisect_utils.FILE_DEPS, good_revision, bad_revision) | |
2213 | |
2214 if changes_to_deps: | |
2215 # DEPS file was changed, search from the oldest change to DEPS file to | |
2216 # bad_revision to see if there are matching .DEPS.git changes. | |
2217 oldest_deps_change = changes_to_deps[-1] | |
2218 changes_to_gitdeps = self.source_control.QueryFileRevisionHistory( | |
2219 bisect_utils.FILE_DEPS_GIT, oldest_deps_change, bad_revision) | |
2220 | |
2221 if len(changes_to_deps) != len(changes_to_gitdeps): | |
2222 # Grab the timestamp of the last DEPS change | |
2223 cmd = ['log', '--format=%ct', '-1', changes_to_deps[0]] | |
2224 output = bisect_utils.CheckRunGit(cmd) | |
2225 commit_time = int(output) | |
2226 | |
2227 # Try looking for a commit that touches the .DEPS.git file in the | |
2228 # next 15 minutes after the DEPS file change. | |
2229 cmd = ['log', '--format=%H', '-1', | |
2230 '--before=%d' % (commit_time + 900), '--after=%d' % commit_time, | |
2231 'origin/master', '--', bisect_utils.FILE_DEPS_GIT] | |
2232 output = bisect_utils.CheckRunGit(cmd) | |
2233 output = output.strip() | |
2234 if output: | |
2235 self.warnings.append('Detected change to DEPS and modified ' | |
2236 'revision range to include change to .DEPS.git') | |
2237 return (output, good_revision) | |
2238 else: | |
2239 self.warnings.append('Detected change to DEPS but couldn\'t find ' | |
2240 'matching change to .DEPS.git') | |
2241 return (bad_revision, good_revision) | |
2242 | |
2243 def CheckIfRevisionsInProperOrder( | |
2244 self, target_depot, good_revision, bad_revision): | |
2245 """Checks that |good_revision| is an earlier revision than |bad_revision|. | |
2246 | |
2247 Args: | |
2248 good_revision: Number/tag of the known good revision. | |
2249 bad_revision: Number/tag of the known bad revision. | |
2250 | |
2251 Returns: | |
2252 True if the revisions are in the proper order (good earlier than bad). | |
2253 """ | |
2254 if self.source_control.IsGit() and target_depot != 'cros': | |
2255 cwd = self._GetDepotDirectory(target_depot) | |
2256 | |
2257 cmd = ['log', '--format=%ct', '-1', good_revision] | |
2258 output = bisect_utils.CheckRunGit(cmd, cwd=cwd) | |
2259 good_commit_time = int(output) | |
2260 | |
2261 cmd = ['log', '--format=%ct', '-1', bad_revision] | |
2262 output = bisect_utils.CheckRunGit(cmd, cwd=cwd) | |
2263 bad_commit_time = int(output) | |
2264 | |
2265 return good_commit_time <= bad_commit_time | |
2266 else: | |
2267 # CrOS and SVN use integers. | |
2268 return int(good_revision) <= int(bad_revision) | |
2269 | |
2270 def CanPerformBisect(self, good_revision, bad_revision): | |
2271 """Checks whether a given revision is bisectable. | |
2272 | |
2273 Checks for following: | |
2274 1. Non-bisectable revsions for android bots (refer to crbug.com/385324). | |
2275 2. Non-bisectable revsions for Windows bots (refer to crbug.com/405274). | |
2276 | |
2277 Args: | |
2278 good_revision: Known good revision. | |
2279 bad_revision: Known bad revision. | |
2280 | |
2281 Returns: | |
2282 A dictionary indicating the result. If revision is not bisectable, | |
2283 this will contain the field "error", otherwise None. | |
2284 """ | |
2285 if self.opts.target_platform == 'android': | |
2286 revision_to_check = self.source_control.GetCommitPosition(good_revision) | |
2287 if (bisect_utils.IsStringInt(good_revision) | |
2288 and good_revision < 265549): | |
2289 return {'error': ( | |
2290 'Bisect cannot continue for the given revision range.\n' | |
2291 'It is impossible to bisect Android regressions ' | |
2292 'prior to r265549, which allows the bisect bot to ' | |
2293 'rely on Telemetry to do apk installation of the most recently ' | |
2294 'built local ChromeShell(refer to crbug.com/385324).\n' | |
2295 'Please try bisecting revisions greater than or equal to r265549.')} | |
2296 | |
2297 if bisect_utils.IsWindowsHost(): | |
2298 good_revision = self.source_control.GetCommitPosition(good_revision) | |
2299 bad_revision = self.source_control.GetCommitPosition(bad_revision) | |
2300 if (bisect_utils.IsStringInt(good_revision) and | |
2301 bisect_utils.IsStringInt(bad_revision)): | |
2302 if (289987 <= good_revision < 290716 or | |
2303 289987 <= bad_revision < 290716): | |
2304 return {'error': ('Oops! Revision between r289987 and r290716 are ' | |
2305 'marked as dead zone for Windows due to ' | |
2306 'crbug.com/405274. Please try another range.')} | |
2307 | |
2308 return None | |
2309 | |
2310 def Run(self, command_to_run, bad_revision_in, good_revision_in, metric): | |
2311 """Given known good and bad revisions, run a binary search on all | |
2312 intermediate revisions to determine the CL where the performance regression | |
2313 occurred. | |
2314 | |
2315 Args: | |
2316 command_to_run: Specify the command to execute the performance test. | |
2317 good_revision: Number/tag of the known good revision. | |
2318 bad_revision: Number/tag of the known bad revision. | |
2319 metric: The performance metric to monitor. | |
2320 | |
2321 Returns: | |
2322 A dict with 2 members, 'revision_data' and 'error'. On success, | |
2323 'revision_data' will contain a dict mapping revision ids to | |
2324 data about that revision. Each piece of revision data consists of a | |
2325 dict with the following keys: | |
2326 | |
2327 'passed': Represents whether the performance test was successful at | |
2328 that revision. Possible values include: 1 (passed), 0 (failed), | |
2329 '?' (skipped), 'F' (build failed). | |
2330 'depot': The depot that this revision is from (i.e. WebKit) | |
2331 'external': If the revision is a 'src' revision, 'external' contains | |
2332 the revisions of each of the external libraries. | |
2333 'sort': A sort value for sorting the dict in order of commits. | |
2334 | |
2335 For example: | |
2336 { | |
2337 'error':None, | |
2338 'revision_data': | |
2339 { | |
2340 'CL #1': | |
2341 { | |
2342 'passed': False, | |
2343 'depot': 'chromium', | |
2344 'external': None, | |
2345 'sort': 0 | |
2346 } | |
2347 } | |
2348 } | |
2349 | |
2350 If an error occurred, the 'error' field will contain the message and | |
2351 'revision_data' will be empty. | |
2352 """ | |
2353 results = { | |
2354 'revision_data' : {}, | |
2355 'error' : None, | |
2356 } | |
2357 | |
2358 # Choose depot to bisect first | |
2359 target_depot = 'chromium' | |
2360 if self.opts.target_platform == 'cros': | |
2361 target_depot = 'cros' | |
2362 elif self.opts.target_platform == 'android-chrome': | |
2363 target_depot = 'android-chrome' | |
2364 | |
2365 cwd = os.getcwd() | |
2366 self.ChangeToDepotWorkingDirectory(target_depot) | |
2367 | |
2368 # If they passed SVN revisions, we can try match them to git SHA1 hashes. | |
2369 bad_revision = self.source_control.ResolveToRevision( | |
2370 bad_revision_in, target_depot, DEPOT_DEPS_NAME, 100) | |
2371 good_revision = self.source_control.ResolveToRevision( | |
2372 good_revision_in, target_depot, DEPOT_DEPS_NAME, -100) | |
2373 | |
2374 os.chdir(cwd) | |
2375 if bad_revision is None: | |
2376 results['error'] = 'Couldn\'t resolve [%s] to SHA1.' % bad_revision_in | |
2377 return results | |
2378 | |
2379 if good_revision is None: | |
2380 results['error'] = 'Couldn\'t resolve [%s] to SHA1.' % good_revision_in | |
2381 return results | |
2382 | |
2383 # Check that they didn't accidentally swap good and bad revisions. | |
2384 if not self.CheckIfRevisionsInProperOrder( | |
2385 target_depot, good_revision, bad_revision): | |
2386 results['error'] = ('bad_revision < good_revision, did you swap these ' | |
2387 'by mistake?') | |
2388 return results | |
2389 bad_revision, good_revision = self.NudgeRevisionsIfDEPSChange( | |
2390 bad_revision, good_revision, good_revision_in) | |
2391 if self.opts.output_buildbot_annotations: | |
2392 bisect_utils.OutputAnnotationStepStart('Gathering Revisions') | |
2393 | |
2394 cannot_bisect = self.CanPerformBisect(good_revision, bad_revision) | |
2395 if cannot_bisect: | |
2396 results['error'] = cannot_bisect.get('error') | |
2397 return results | |
2398 | |
2399 print 'Gathering revision range for bisection.' | |
2400 # Retrieve a list of revisions to do bisection on. | |
2401 src_revision_list = self.GetRevisionList( | |
2402 target_depot, bad_revision, good_revision) | |
2403 | |
2404 if self.opts.output_buildbot_annotations: | |
2405 bisect_utils.OutputAnnotationStepClosed() | |
2406 | |
2407 if src_revision_list: | |
2408 # revision_data will store information about a revision such as the | |
2409 # depot it came from, the webkit/V8 revision at that time, | |
2410 # performance timing, build state, etc... | |
2411 revision_data = results['revision_data'] | |
2412 | |
2413 # revision_list is the list we're binary searching through at the moment. | |
2414 revision_list = [] | |
2415 | |
2416 sort_key_ids = 0 | |
2417 | |
2418 for current_revision_id in src_revision_list: | |
2419 sort_key_ids += 1 | |
2420 | |
2421 revision_data[current_revision_id] = { | |
2422 'value' : None, | |
2423 'passed' : '?', | |
2424 'depot' : target_depot, | |
2425 'external' : None, | |
2426 'perf_time' : 0, | |
2427 'build_time' : 0, | |
2428 'sort' : sort_key_ids, | |
2429 } | |
2430 revision_list.append(current_revision_id) | |
2431 | |
2432 min_revision = 0 | |
2433 max_revision = len(revision_list) - 1 | |
2434 | |
2435 self.PrintRevisionsToBisectMessage(revision_list, target_depot) | |
2436 | |
2437 if self.opts.output_buildbot_annotations: | |
2438 bisect_utils.OutputAnnotationStepStart('Gathering Reference Values') | |
2439 | |
2440 print 'Gathering reference values for bisection.' | |
2441 | |
2442 # Perform the performance tests on the good and bad revisions, to get | |
2443 # reference values. | |
2444 bad_results, good_results = self.GatherReferenceValues(good_revision, | |
2445 bad_revision, | |
2446 command_to_run, | |
2447 metric, | |
2448 target_depot) | |
2449 | |
2450 if self.opts.output_buildbot_annotations: | |
2451 bisect_utils.OutputAnnotationStepClosed() | |
2452 | |
2453 if bad_results[1]: | |
2454 results['error'] = ('An error occurred while building and running ' | |
2455 'the \'bad\' reference value. The bisect cannot continue without ' | |
2456 'a working \'bad\' revision to start from.\n\nError: %s' % | |
2457 bad_results[0]) | |
2458 return results | |
2459 | |
2460 if good_results[1]: | |
2461 results['error'] = ('An error occurred while building and running ' | |
2462 'the \'good\' reference value. The bisect cannot continue without ' | |
2463 'a working \'good\' revision to start from.\n\nError: %s' % | |
2464 good_results[0]) | |
2465 return results | |
2466 | |
2467 | |
2468 # We need these reference values to determine if later runs should be | |
2469 # classified as pass or fail. | |
2470 known_bad_value = bad_results[0] | |
2471 known_good_value = good_results[0] | |
2472 | |
2473 # Can just mark the good and bad revisions explicitly here since we | |
2474 # already know the results. | |
2475 bad_revision_data = revision_data[revision_list[0]] | |
2476 bad_revision_data['external'] = bad_results[2] | |
2477 bad_revision_data['perf_time'] = bad_results[3] | |
2478 bad_revision_data['build_time'] = bad_results[4] | |
2479 bad_revision_data['passed'] = False | |
2480 bad_revision_data['value'] = known_bad_value | |
2481 | |
2482 good_revision_data = revision_data[revision_list[max_revision]] | |
2483 good_revision_data['external'] = good_results[2] | |
2484 good_revision_data['perf_time'] = good_results[3] | |
2485 good_revision_data['build_time'] = good_results[4] | |
2486 good_revision_data['passed'] = True | |
2487 good_revision_data['value'] = known_good_value | |
2488 | |
2489 next_revision_depot = target_depot | |
2490 | |
2491 while True: | |
2492 if not revision_list: | |
2493 break | |
2494 | |
2495 min_revision_data = revision_data[revision_list[min_revision]] | |
2496 max_revision_data = revision_data[revision_list[max_revision]] | |
2497 | |
2498 if max_revision - min_revision <= 1: | |
2499 current_depot = min_revision_data['depot'] | |
2500 if min_revision_data['passed'] == '?': | |
2501 next_revision_index = min_revision | |
2502 elif max_revision_data['passed'] == '?': | |
2503 next_revision_index = max_revision | |
2504 elif current_depot in ['android-chrome', 'cros', 'chromium', 'v8']: | |
2505 previous_revision = revision_list[min_revision] | |
2506 # If there were changes to any of the external libraries we track, | |
2507 # should bisect the changes there as well. | |
2508 external_depot = self._FindNextDepotToBisect( | |
2509 current_depot, min_revision_data, max_revision_data) | |
2510 # If there was no change in any of the external depots, the search | |
2511 # is over. | |
2512 if not external_depot: | |
2513 if current_depot == 'v8': | |
2514 self.warnings.append('Unfortunately, V8 bisection couldn\'t ' | |
2515 'continue any further. The script can only bisect into ' | |
2516 'V8\'s bleeding_edge repository if both the current and ' | |
2517 'previous revisions in trunk map directly to revisions in ' | |
2518 'bleeding_edge.') | |
2519 break | |
2520 | |
2521 earliest_revision = max_revision_data['external'][external_depot] | |
2522 latest_revision = min_revision_data['external'][external_depot] | |
2523 | |
2524 new_revision_list = self.PrepareToBisectOnDepot( | |
2525 external_depot, latest_revision, earliest_revision, | |
2526 previous_revision) | |
2527 | |
2528 if not new_revision_list: | |
2529 results['error'] = ('An error occurred attempting to retrieve ' | |
2530 'revision range: [%s..%s]' % | |
2531 (earliest_revision, latest_revision)) | |
2532 return results | |
2533 | |
2534 _AddRevisionsIntoRevisionData( | |
2535 new_revision_list, external_depot, min_revision_data['sort'], | |
2536 revision_data) | |
2537 | |
2538 # Reset the bisection and perform it on the newly inserted | |
2539 # changelists. | |
2540 revision_list = new_revision_list | |
2541 min_revision = 0 | |
2542 max_revision = len(revision_list) - 1 | |
2543 sort_key_ids += len(revision_list) | |
2544 | |
2545 print ('Regression in metric %s appears to be the result of ' | |
2546 'changes in [%s].' % (metric, external_depot)) | |
2547 | |
2548 self.PrintRevisionsToBisectMessage(revision_list, external_depot) | |
2549 | |
2550 continue | |
2551 else: | |
2552 break | |
2553 else: | |
2554 next_revision_index = (int((max_revision - min_revision) / 2) + | |
2555 min_revision) | |
2556 | |
2557 next_revision_id = revision_list[next_revision_index] | |
2558 next_revision_data = revision_data[next_revision_id] | |
2559 next_revision_depot = next_revision_data['depot'] | |
2560 | |
2561 self.ChangeToDepotWorkingDirectory(next_revision_depot) | |
2562 | |
2563 if self.opts.output_buildbot_annotations: | |
2564 step_name = 'Working on [%s]' % next_revision_id | |
2565 bisect_utils.OutputAnnotationStepStart(step_name) | |
2566 | |
2567 print 'Working on revision: [%s]' % next_revision_id | |
2568 | |
2569 run_results = self.RunTest( | |
2570 next_revision_id, next_revision_depot, command_to_run, metric, | |
2571 skippable=True) | |
2572 | |
2573 # If the build is successful, check whether or not the metric | |
2574 # had regressed. | |
2575 if not run_results[1]: | |
2576 if len(run_results) > 2: | |
2577 next_revision_data['external'] = run_results[2] | |
2578 next_revision_data['perf_time'] = run_results[3] | |
2579 next_revision_data['build_time'] = run_results[4] | |
2580 | |
2581 passed_regression = self._CheckIfRunPassed(run_results[0], | |
2582 known_good_value, | |
2583 known_bad_value) | |
2584 | |
2585 next_revision_data['passed'] = passed_regression | |
2586 next_revision_data['value'] = run_results[0] | |
2587 | |
2588 if passed_regression: | |
2589 max_revision = next_revision_index | |
2590 else: | |
2591 min_revision = next_revision_index | |
2592 else: | |
2593 if run_results[1] == BUILD_RESULT_SKIPPED: | |
2594 next_revision_data['passed'] = 'Skipped' | |
2595 elif run_results[1] == BUILD_RESULT_FAIL: | |
2596 next_revision_data['passed'] = 'Build Failed' | |
2597 | |
2598 print run_results[0] | |
2599 | |
2600 # If the build is broken, remove it and redo search. | |
2601 revision_list.pop(next_revision_index) | |
2602 | |
2603 max_revision -= 1 | |
2604 | |
2605 if self.opts.output_buildbot_annotations: | |
2606 self._PrintPartialResults(results) | |
2607 bisect_utils.OutputAnnotationStepClosed() | |
2608 else: | |
2609 # Weren't able to sync and retrieve the revision range. | |
2610 results['error'] = ('An error occurred attempting to retrieve revision ' | |
2611 'range: [%s..%s]' % (good_revision, bad_revision)) | |
2612 | |
2613 return results | |
2614 | |
2615 def _PrintPartialResults(self, results_dict): | |
2616 revision_data = results_dict['revision_data'] | |
2617 revision_data_sorted = sorted(revision_data.iteritems(), | |
2618 key = lambda x: x[1]['sort']) | |
2619 results_dict = self._GetResultsDict(revision_data, revision_data_sorted) | |
2620 | |
2621 self._PrintTestedCommitsTable(revision_data_sorted, | |
2622 results_dict['first_working_revision'], | |
2623 results_dict['last_broken_revision'], | |
2624 100, final_step=False) | |
2625 | |
2626 def _ConfidenceLevelStatus(self, results_dict): | |
2627 if not results_dict['confidence']: | |
2628 return None | |
2629 confidence_status = 'Successful with %(level)s confidence%(warning)s.' | |
2630 if results_dict['confidence'] >= HIGH_CONFIDENCE: | |
2631 level = 'high' | |
2632 else: | |
2633 level = 'low' | |
2634 warning = ' and warnings' | |
2635 if not self.warnings: | |
2636 warning = '' | |
2637 return confidence_status % {'level': level, 'warning': warning} | |
2638 | |
2639 def _GetViewVCLinkFromDepotAndHash(self, cl, depot): | |
2640 info = self.source_control.QueryRevisionInfo(cl, | |
2641 self._GetDepotDirectory(depot)) | |
2642 if depot and DEPOT_DEPS_NAME[depot].has_key('viewvc'): | |
2643 try: | |
2644 # Format is "git-svn-id: svn://....@123456 <other data>" | |
2645 svn_line = [i for i in info['body'].splitlines() if 'git-svn-id:' in i] | |
2646 svn_revision = svn_line[0].split('@') | |
2647 svn_revision = svn_revision[1].split(' ')[0] | |
2648 return DEPOT_DEPS_NAME[depot]['viewvc'] + svn_revision | |
2649 except IndexError: | |
2650 return '' | |
2651 return '' | |
2652 | |
2653 def _PrintRevisionInfo(self, cl, info, depot=None): | |
2654 email_info = '' | |
2655 if not info['email'].startswith(info['author']): | |
2656 email_info = '\nEmail : %s' % info['email'] | |
2657 commit_link = self._GetViewVCLinkFromDepotAndHash(cl, depot) | |
2658 if commit_link: | |
2659 commit_info = '\nLink : %s' % commit_link | |
2660 else: | |
2661 commit_info = ('\nFailed to parse SVN revision from body:\n%s' % | |
2662 info['body']) | |
2663 print RESULTS_REVISION_INFO % { | |
2664 'subject': info['subject'], | |
2665 'author': info['author'], | |
2666 'email_info': email_info, | |
2667 'commit_info': commit_info, | |
2668 'cl': cl, | |
2669 'cl_date': info['date'] | |
2670 } | |
2671 | |
2672 def _PrintTestedCommitsHeader(self): | |
2673 if self.opts.bisect_mode == BISECT_MODE_MEAN: | |
2674 _PrintTableRow( | |
2675 [20, 70, 14, 12, 13], | |
2676 ['Depot', 'Commit SHA', 'Mean', 'Std. Error', 'State']) | |
2677 elif self.opts.bisect_mode == BISECT_MODE_STD_DEV: | |
2678 _PrintTableRow( | |
2679 [20, 70, 14, 12, 13], | |
2680 ['Depot', 'Commit SHA', 'Std. Error', 'Mean', 'State']) | |
2681 elif self.opts.bisect_mode == BISECT_MODE_RETURN_CODE: | |
2682 _PrintTableRow( | |
2683 [20, 70, 14, 13], | |
2684 ['Depot', 'Commit SHA', 'Return Code', 'State']) | |
2685 else: | |
2686 assert False, 'Invalid bisect_mode specified.' | |
2687 | |
2688 def _PrintTestedCommitsEntry(self, current_data, cl_link, state_str): | |
2689 if self.opts.bisect_mode == BISECT_MODE_MEAN: | |
2690 std_error = '+-%.02f' % current_data['value']['std_err'] | |
2691 mean = '%.02f' % current_data['value']['mean'] | |
2692 _PrintTableRow( | |
2693 [20, 70, 12, 14, 13], | |
2694 [current_data['depot'], cl_link, mean, std_error, state_str]) | |
2695 elif self.opts.bisect_mode == BISECT_MODE_STD_DEV: | |
2696 std_error = '+-%.02f' % current_data['value']['std_err'] | |
2697 mean = '%.02f' % current_data['value']['mean'] | |
2698 _PrintTableRow( | |
2699 [20, 70, 12, 14, 13], | |
2700 [current_data['depot'], cl_link, std_error, mean, state_str]) | |
2701 elif self.opts.bisect_mode == BISECT_MODE_RETURN_CODE: | |
2702 mean = '%d' % current_data['value']['mean'] | |
2703 _PrintTableRow( | |
2704 [20, 70, 14, 13], | |
2705 [current_data['depot'], cl_link, mean, state_str]) | |
2706 | |
2707 def _PrintTestedCommitsTable( | |
2708 self, revision_data_sorted, first_working_revision, last_broken_revision, | |
2709 confidence, final_step=True): | |
2710 print | |
2711 if final_step: | |
2712 print '===== TESTED COMMITS =====' | |
2713 else: | |
2714 print '===== PARTIAL RESULTS =====' | |
2715 self._PrintTestedCommitsHeader() | |
2716 state = 0 | |
2717 for current_id, current_data in revision_data_sorted: | |
2718 if current_data['value']: | |
2719 if (current_id == last_broken_revision or | |
2720 current_id == first_working_revision): | |
2721 # If confidence is too low, don't add this empty line since it's | |
2722 # used to put focus on a suspected CL. | |
2723 if confidence and final_step: | |
2724 print | |
2725 state += 1 | |
2726 if state == 2 and not final_step: | |
2727 # Just want a separation between "bad" and "good" cl's. | |
2728 print | |
2729 | |
2730 state_str = 'Bad' | |
2731 if state == 1 and final_step: | |
2732 state_str = 'Suspected CL' | |
2733 elif state == 2: | |
2734 state_str = 'Good' | |
2735 | |
2736 # If confidence is too low, don't bother outputting good/bad. | |
2737 if not confidence: | |
2738 state_str = '' | |
2739 state_str = state_str.center(13, ' ') | |
2740 | |
2741 cl_link = self._GetViewVCLinkFromDepotAndHash(current_id, | |
2742 current_data['depot']) | |
2743 if not cl_link: | |
2744 cl_link = current_id | |
2745 self._PrintTestedCommitsEntry(current_data, cl_link, state_str) | |
2746 | |
2747 def _PrintReproSteps(self): | |
2748 """Prints out a section of the results explaining how to run the test. | |
2749 | |
2750 This message includes the command used to run the test. | |
2751 """ | |
2752 command = '$ ' + self.opts.command | |
2753 if bisect_utils.IsTelemetryCommand(self.opts.command): | |
2754 command += ('\nAlso consider passing --profiler=list to see available ' | |
2755 'profilers.') | |
2756 print REPRO_STEPS_LOCAL | |
2757 if bisect_utils.IsTelemetryCommand(self.opts.command): | |
2758 telemetry_command = re.sub(r'--browser=[^\s]+', | |
2759 '--browser=<bot-name>', | |
2760 command) | |
2761 print REPRO_STEPS_TRYJOB_TELEMETRY % {'command': telemetry_command} | |
2762 else: | |
2763 print REPRO_STEPS_TRYJOB | |
2764 | |
2765 def _PrintOtherRegressions(self, other_regressions, revision_data): | |
2766 """Prints a section of the results about other potential regressions.""" | |
2767 print | |
2768 print 'Other regressions may have occurred:' | |
2769 print ' %8s %70s %10s' % ('Depot'.center(8, ' '), | |
2770 'Range'.center(70, ' '), 'Confidence'.center(10, ' ')) | |
2771 for regression in other_regressions: | |
2772 current_id, previous_id, confidence = regression | |
2773 current_data = revision_data[current_id] | |
2774 previous_data = revision_data[previous_id] | |
2775 | |
2776 current_link = self._GetViewVCLinkFromDepotAndHash(current_id, | |
2777 current_data['depot']) | |
2778 previous_link = self._GetViewVCLinkFromDepotAndHash(previous_id, | |
2779 previous_data['depot']) | |
2780 | |
2781 # If we can't map it to a viewable URL, at least show the original hash. | |
2782 if not current_link: | |
2783 current_link = current_id | |
2784 if not previous_link: | |
2785 previous_link = previous_id | |
2786 | |
2787 print ' %8s %70s %s' % ( | |
2788 current_data['depot'], current_link, | |
2789 ('%d%%' % confidence).center(10, ' ')) | |
2790 print ' %8s %70s' % ( | |
2791 previous_data['depot'], previous_link) | |
2792 print | |
2793 | |
2794 def _GetResultsDict(self, revision_data, revision_data_sorted): | |
2795 # Find range where it possibly broke. | |
2796 first_working_revision = None | |
2797 first_working_revision_index = -1 | |
2798 last_broken_revision = None | |
2799 last_broken_revision_index = -1 | |
2800 | |
2801 culprit_revisions = [] | |
2802 other_regressions = [] | |
2803 regression_size = 0.0 | |
2804 regression_std_err = 0.0 | |
2805 confidence = 0.0 | |
2806 | |
2807 for i in xrange(len(revision_data_sorted)): | |
2808 k, v = revision_data_sorted[i] | |
2809 if v['passed'] == 1: | |
2810 if not first_working_revision: | |
2811 first_working_revision = k | |
2812 first_working_revision_index = i | |
2813 | |
2814 if not v['passed']: | |
2815 last_broken_revision = k | |
2816 last_broken_revision_index = i | |
2817 | |
2818 if last_broken_revision != None and first_working_revision != None: | |
2819 broken_means = [] | |
2820 for i in xrange(0, last_broken_revision_index + 1): | |
2821 if revision_data_sorted[i][1]['value']: | |
2822 broken_means.append(revision_data_sorted[i][1]['value']['values']) | |
2823 | |
2824 working_means = [] | |
2825 for i in xrange(first_working_revision_index, len(revision_data_sorted)): | |
2826 if revision_data_sorted[i][1]['value']: | |
2827 working_means.append(revision_data_sorted[i][1]['value']['values']) | |
2828 | |
2829 # Flatten the lists to calculate mean of all values. | |
2830 working_mean = sum(working_means, []) | |
2831 broken_mean = sum(broken_means, []) | |
2832 | |
2833 # Calculate the approximate size of the regression | |
2834 mean_of_bad_runs = math_utils.Mean(broken_mean) | |
2835 mean_of_good_runs = math_utils.Mean(working_mean) | |
2836 | |
2837 regression_size = 100 * math_utils.RelativeChange(mean_of_good_runs, | |
2838 mean_of_bad_runs) | |
2839 if math.isnan(regression_size): | |
2840 regression_size = 'zero-to-nonzero' | |
2841 | |
2842 regression_std_err = math.fabs(math_utils.PooledStandardError( | |
2843 [working_mean, broken_mean]) / | |
2844 max(0.0001, min(mean_of_good_runs, mean_of_bad_runs))) * 100.0 | |
2845 | |
2846 # Give a "confidence" in the bisect. At the moment we use how distinct the | |
2847 # values are before and after the last broken revision, and how noisy the | |
2848 # overall graph is. | |
2849 confidence = ConfidenceScore(working_means, broken_means) | |
2850 | |
2851 culprit_revisions = [] | |
2852 | |
2853 cwd = os.getcwd() | |
2854 self.ChangeToDepotWorkingDirectory( | |
2855 revision_data[last_broken_revision]['depot']) | |
2856 | |
2857 if revision_data[last_broken_revision]['depot'] == 'cros': | |
2858 # Want to get a list of all the commits and what depots they belong | |
2859 # to so that we can grab info about each. | |
2860 cmd = ['repo', 'forall', '-c', | |
2861 'pwd ; git log --pretty=oneline --before=%d --after=%d' % ( | |
2862 last_broken_revision, first_working_revision + 1)] | |
2863 output, return_code = bisect_utils.RunProcessAndRetrieveOutput(cmd) | |
2864 | |
2865 changes = [] | |
2866 assert not return_code, ('An error occurred while running ' | |
2867 '"%s"' % ' '.join(cmd)) | |
2868 last_depot = None | |
2869 cwd = os.getcwd() | |
2870 for l in output.split('\n'): | |
2871 if l: | |
2872 # Output will be in form: | |
2873 # /path_to_depot | |
2874 # /path_to_other_depot | |
2875 # <SHA1> | |
2876 # /path_again | |
2877 # <SHA1> | |
2878 # etc. | |
2879 if l[0] == '/': | |
2880 last_depot = l | |
2881 else: | |
2882 contents = l.split(' ') | |
2883 if len(contents) > 1: | |
2884 changes.append([last_depot, contents[0]]) | |
2885 for c in changes: | |
2886 os.chdir(c[0]) | |
2887 info = self.source_control.QueryRevisionInfo(c[1]) | |
2888 culprit_revisions.append((c[1], info, None)) | |
2889 else: | |
2890 for i in xrange(last_broken_revision_index, len(revision_data_sorted)): | |
2891 k, v = revision_data_sorted[i] | |
2892 if k == first_working_revision: | |
2893 break | |
2894 self.ChangeToDepotWorkingDirectory(v['depot']) | |
2895 info = self.source_control.QueryRevisionInfo(k) | |
2896 culprit_revisions.append((k, info, v['depot'])) | |
2897 os.chdir(cwd) | |
2898 | |
2899 # Check for any other possible regression ranges. | |
2900 other_regressions = _FindOtherRegressions( | |
2901 revision_data_sorted, mean_of_bad_runs > mean_of_good_runs) | |
2902 | |
2903 return { | |
2904 'first_working_revision': first_working_revision, | |
2905 'last_broken_revision': last_broken_revision, | |
2906 'culprit_revisions': culprit_revisions, | |
2907 'other_regressions': other_regressions, | |
2908 'regression_size': regression_size, | |
2909 'regression_std_err': regression_std_err, | |
2910 'confidence': confidence, | |
2911 } | |
2912 | |
2913 def _CheckForWarnings(self, results_dict): | |
2914 if len(results_dict['culprit_revisions']) > 1: | |
2915 self.warnings.append('Due to build errors, regression range could ' | |
2916 'not be narrowed down to a single commit.') | |
2917 if self.opts.repeat_test_count == 1: | |
2918 self.warnings.append('Tests were only set to run once. This may ' | |
2919 'be insufficient to get meaningful results.') | |
2920 if 0 < results_dict['confidence'] < HIGH_CONFIDENCE: | |
2921 self.warnings.append('Confidence is not high. Try bisecting again ' | |
2922 'with increased repeat_count, larger range, or ' | |
2923 'on another metric.') | |
2924 if not results_dict['confidence']: | |
2925 self.warnings.append('Confidence score is 0%. Try bisecting again on ' | |
2926 'another platform or another metric.') | |
2927 | |
2928 def FormatAndPrintResults(self, bisect_results): | |
2929 """Prints the results from a bisection run in a readable format. | |
2930 | |
2931 Args: | |
2932 bisect_results: The results from a bisection test run. | |
2933 """ | |
2934 revision_data = bisect_results['revision_data'] | |
2935 revision_data_sorted = sorted(revision_data.iteritems(), | |
2936 key = lambda x: x[1]['sort']) | |
2937 results_dict = self._GetResultsDict(revision_data, revision_data_sorted) | |
2938 | |
2939 self._CheckForWarnings(results_dict) | |
2940 | |
2941 if self.opts.output_buildbot_annotations: | |
2942 bisect_utils.OutputAnnotationStepStart('Build Status Per Revision') | |
2943 | |
2944 print | |
2945 print 'Full results of bisection:' | |
2946 for current_id, current_data in revision_data_sorted: | |
2947 build_status = current_data['passed'] | |
2948 | |
2949 if type(build_status) is bool: | |
2950 if build_status: | |
2951 build_status = 'Good' | |
2952 else: | |
2953 build_status = 'Bad' | |
2954 | |
2955 print ' %20s %40s %s' % (current_data['depot'], | |
2956 current_id, build_status) | |
2957 print | |
2958 | |
2959 if self.opts.output_buildbot_annotations: | |
2960 bisect_utils.OutputAnnotationStepClosed() | |
2961 # The perf dashboard scrapes the "results" step in order to comment on | |
2962 # bugs. If you change this, please update the perf dashboard as well. | |
2963 bisect_utils.OutputAnnotationStepStart('Results') | |
2964 | |
2965 self._PrintBanner(results_dict) | |
2966 self._PrintWarnings() | |
2967 | |
2968 if results_dict['culprit_revisions'] and results_dict['confidence']: | |
2969 for culprit in results_dict['culprit_revisions']: | |
2970 cl, info, depot = culprit | |
2971 self._PrintRevisionInfo(cl, info, depot) | |
2972 if results_dict['other_regressions']: | |
2973 self._PrintOtherRegressions(results_dict['other_regressions'], | |
2974 revision_data) | |
2975 self._PrintTestedCommitsTable(revision_data_sorted, | |
2976 results_dict['first_working_revision'], | |
2977 results_dict['last_broken_revision'], | |
2978 results_dict['confidence']) | |
2979 _PrintStepTime(revision_data_sorted) | |
2980 self._PrintReproSteps() | |
2981 _PrintThankYou() | |
2982 if self.opts.output_buildbot_annotations: | |
2983 bisect_utils.OutputAnnotationStepClosed() | |
2984 | |
2985 def _PrintBanner(self, results_dict): | |
2986 if self._IsBisectModeReturnCode(): | |
2987 metrics = 'N/A' | |
2988 change = 'Yes' | |
2989 else: | |
2990 metrics = '/'.join(self.opts.metric) | |
2991 change = '%.02f%% (+/-%.02f%%)' % ( | |
2992 results_dict['regression_size'], results_dict['regression_std_err']) | |
2993 | |
2994 if results_dict['culprit_revisions'] and results_dict['confidence']: | |
2995 status = self._ConfidenceLevelStatus(results_dict) | |
2996 else: | |
2997 status = 'Failure, could not reproduce.' | |
2998 change = 'Bisect could not reproduce a change.' | |
2999 | |
3000 print RESULTS_BANNER % { | |
3001 'status': status, | |
3002 'command': self.opts.command, | |
3003 'metrics': metrics, | |
3004 'change': change, | |
3005 'confidence': results_dict['confidence'], | |
3006 } | |
3007 | |
3008 def _PrintWarnings(self): | |
3009 """Prints a list of warning strings if there are any.""" | |
3010 if not self.warnings: | |
3011 return | |
3012 print | |
3013 print 'WARNINGS:' | |
3014 for w in set(self.warnings): | |
3015 print ' ! %s' % w | |
3016 | |
3017 | |
3018 def _IsPlatformSupported(): | |
3019 """Checks that this platform and build system are supported. | |
3020 | |
3021 Args: | |
3022 opts: The options parsed from the command line. | |
3023 | |
3024 Returns: | |
3025 True if the platform and build system are supported. | |
3026 """ | |
3027 # Haven't tested the script out on any other platforms yet. | |
3028 supported = ['posix', 'nt'] | |
3029 return os.name in supported | |
3030 | |
3031 | |
3032 def RmTreeAndMkDir(path_to_dir, skip_makedir=False): | |
3033 """Removes the directory tree specified, and then creates an empty | |
3034 directory in the same location (if not specified to skip). | |
3035 | |
3036 Args: | |
3037 path_to_dir: Path to the directory tree. | |
3038 skip_makedir: Whether to skip creating empty directory, default is False. | |
3039 | |
3040 Returns: | |
3041 True if successful, False if an error occurred. | |
3042 """ | |
3043 try: | |
3044 if os.path.exists(path_to_dir): | |
3045 shutil.rmtree(path_to_dir) | |
3046 except OSError, e: | |
3047 if e.errno != errno.ENOENT: | |
3048 return False | |
3049 | |
3050 if not skip_makedir: | |
3051 return MaybeMakeDirectory(path_to_dir) | |
3052 | |
3053 return True | |
3054 | |
3055 | |
3056 def RemoveBuildFiles(build_type): | |
3057 """Removes build files from previous runs.""" | |
3058 if RmTreeAndMkDir(os.path.join('out', build_type)): | |
3059 if RmTreeAndMkDir(os.path.join('build', build_type)): | |
3060 return True | |
3061 return False | |
3062 | |
3063 | |
3064 class BisectOptions(object): | |
3065 """Options to be used when running bisection.""" | |
3066 def __init__(self): | |
3067 super(BisectOptions, self).__init__() | |
3068 | |
3069 self.target_platform = 'chromium' | |
3070 self.build_preference = None | |
3071 self.good_revision = None | |
3072 self.bad_revision = None | |
3073 self.use_goma = None | |
3074 self.goma_dir = None | |
3075 self.cros_board = None | |
3076 self.cros_remote_ip = None | |
3077 self.repeat_test_count = 20 | |
3078 self.truncate_percent = 25 | |
3079 self.max_time_minutes = 20 | |
3080 self.metric = None | |
3081 self.command = None | |
3082 self.output_buildbot_annotations = None | |
3083 self.no_custom_deps = False | |
3084 self.working_directory = None | |
3085 self.extra_src = None | |
3086 self.debug_ignore_build = None | |
3087 self.debug_ignore_sync = None | |
3088 self.debug_ignore_perf_test = None | |
3089 self.gs_bucket = None | |
3090 self.target_arch = 'ia32' | |
3091 self.target_build_type = 'Release' | |
3092 self.builder_host = None | |
3093 self.builder_port = None | |
3094 self.bisect_mode = BISECT_MODE_MEAN | |
3095 | |
3096 @staticmethod | |
3097 def _CreateCommandLineParser(): | |
3098 """Creates a parser with bisect options. | |
3099 | |
3100 Returns: | |
3101 An instance of optparse.OptionParser. | |
3102 """ | |
3103 usage = ('%prog [options] [-- chromium-options]\n' | |
3104 'Perform binary search on revision history to find a minimal ' | |
3105 'range of revisions where a performance metric regressed.\n') | |
3106 | |
3107 parser = optparse.OptionParser(usage=usage) | |
3108 | |
3109 group = optparse.OptionGroup(parser, 'Bisect options') | |
3110 group.add_option('-c', '--command', | |
3111 type='str', | |
3112 help='A command to execute your performance test at' + | |
3113 ' each point in the bisection.') | |
3114 group.add_option('-b', '--bad_revision', | |
3115 type='str', | |
3116 help='A bad revision to start bisection. ' + | |
3117 'Must be later than good revision. May be either a git' + | |
3118 ' or svn revision.') | |
3119 group.add_option('-g', '--good_revision', | |
3120 type='str', | |
3121 help='A revision to start bisection where performance' + | |
3122 ' test is known to pass. Must be earlier than the ' + | |
3123 'bad revision. May be either a git or svn revision.') | |
3124 group.add_option('-m', '--metric', | |
3125 type='str', | |
3126 help='The desired metric to bisect on. For example ' + | |
3127 '"vm_rss_final_b/vm_rss_f_b"') | |
3128 group.add_option('-r', '--repeat_test_count', | |
3129 type='int', | |
3130 default=20, | |
3131 help='The number of times to repeat the performance ' | |
3132 'test. Values will be clamped to range [1, 100]. ' | |
3133 'Default value is 20.') | |
3134 group.add_option('--max_time_minutes', | |
3135 type='int', | |
3136 default=20, | |
3137 help='The maximum time (in minutes) to take running the ' | |
3138 'performance tests. The script will run the performance ' | |
3139 'tests according to --repeat_test_count, so long as it ' | |
3140 'doesn\'t exceed --max_time_minutes. Values will be ' | |
3141 'clamped to range [1, 60].' | |
3142 'Default value is 20.') | |
3143 group.add_option('-t', '--truncate_percent', | |
3144 type='int', | |
3145 default=25, | |
3146 help='The highest/lowest % are discarded to form a ' | |
3147 'truncated mean. Values will be clamped to range [0, ' | |
3148 '25]. Default value is 25 (highest/lowest 25% will be ' | |
3149 'discarded).') | |
3150 group.add_option('--bisect_mode', | |
3151 type='choice', | |
3152 choices=[BISECT_MODE_MEAN, BISECT_MODE_STD_DEV, | |
3153 BISECT_MODE_RETURN_CODE], | |
3154 default=BISECT_MODE_MEAN, | |
3155 help='The bisect mode. Choices are to bisect on the ' | |
3156 'difference in mean, std_dev, or return_code.') | |
3157 parser.add_option_group(group) | |
3158 | |
3159 group = optparse.OptionGroup(parser, 'Build options') | |
3160 group.add_option('-w', '--working_directory', | |
3161 type='str', | |
3162 help='Path to the working directory where the script ' | |
3163 'will do an initial checkout of the chromium depot. The ' | |
3164 'files will be placed in a subdirectory "bisect" under ' | |
3165 'working_directory and that will be used to perform the ' | |
3166 'bisection. This parameter is optional, if it is not ' | |
3167 'supplied, the script will work from the current depot.') | |
3168 group.add_option('--build_preference', | |
3169 type='choice', | |
3170 choices=['msvs', 'ninja', 'make'], | |
3171 help='The preferred build system to use. On linux/mac ' | |
3172 'the options are make/ninja. On Windows, the options ' | |
3173 'are msvs/ninja.') | |
3174 group.add_option('--target_platform', | |
3175 type='choice', | |
3176 choices=['chromium', 'cros', 'android', 'android-chrome'], | |
3177 default='chromium', | |
3178 help='The target platform. Choices are "chromium" ' | |
3179 '(current platform), "cros", or "android". If you ' | |
3180 'specify something other than "chromium", you must be ' | |
3181 'properly set up to build that platform.') | |
3182 group.add_option('--no_custom_deps', | |
3183 dest='no_custom_deps', | |
3184 action='store_true', | |
3185 default=False, | |
3186 help='Run the script with custom_deps or not.') | |
3187 group.add_option('--extra_src', | |
3188 type='str', | |
3189 help='Path to a script which can be used to modify ' | |
3190 'the bisect script\'s behavior.') | |
3191 group.add_option('--cros_board', | |
3192 type='str', | |
3193 help='The cros board type to build.') | |
3194 group.add_option('--cros_remote_ip', | |
3195 type='str', | |
3196 help='The remote machine to image to.') | |
3197 group.add_option('--use_goma', | |
3198 action='store_true', | |
3199 help='Add a bunch of extra threads for goma, and enable ' | |
3200 'goma') | |
3201 group.add_option('--goma_dir', | |
3202 help='Path to goma tools (or system default if not ' | |
3203 'specified).') | |
3204 group.add_option('--output_buildbot_annotations', | |
3205 action='store_true', | |
3206 help='Add extra annotation output for buildbot.') | |
3207 group.add_option('--gs_bucket', | |
3208 default='', | |
3209 dest='gs_bucket', | |
3210 type='str', | |
3211 help=('Name of Google Storage bucket to upload or ' | |
3212 'download build. e.g., chrome-perf')) | |
3213 group.add_option('--target_arch', | |
3214 type='choice', | |
3215 choices=['ia32', 'x64', 'arm'], | |
3216 default='ia32', | |
3217 dest='target_arch', | |
3218 help=('The target build architecture. Choices are "ia32" ' | |
3219 '(default), "x64" or "arm".')) | |
3220 group.add_option('--target_build_type', | |
3221 type='choice', | |
3222 choices=['Release', 'Debug'], | |
3223 default='Release', | |
3224 help='The target build type. Choices are "Release" ' | |
3225 '(default), or "Debug".') | |
3226 group.add_option('--builder_host', | |
3227 dest='builder_host', | |
3228 type='str', | |
3229 help=('Host address of server to produce build by posting' | |
3230 ' try job request.')) | |
3231 group.add_option('--builder_port', | |
3232 dest='builder_port', | |
3233 type='int', | |
3234 help=('HTTP port of the server to produce build by posting' | |
3235 ' try job request.')) | |
3236 parser.add_option_group(group) | |
3237 | |
3238 group = optparse.OptionGroup(parser, 'Debug options') | |
3239 group.add_option('--debug_ignore_build', | |
3240 action='store_true', | |
3241 help='DEBUG: Don\'t perform builds.') | |
3242 group.add_option('--debug_ignore_sync', | |
3243 action='store_true', | |
3244 help='DEBUG: Don\'t perform syncs.') | |
3245 group.add_option('--debug_ignore_perf_test', | |
3246 action='store_true', | |
3247 help='DEBUG: Don\'t perform performance tests.') | |
3248 parser.add_option_group(group) | |
3249 return parser | |
3250 | |
3251 def ParseCommandLine(self): | |
3252 """Parses the command line for bisect options.""" | |
3253 parser = self._CreateCommandLineParser() | |
3254 opts, _ = parser.parse_args() | |
3255 | |
3256 try: | |
3257 if not opts.command: | |
3258 raise RuntimeError('missing required parameter: --command') | |
3259 | |
3260 if not opts.good_revision: | |
3261 raise RuntimeError('missing required parameter: --good_revision') | |
3262 | |
3263 if not opts.bad_revision: | |
3264 raise RuntimeError('missing required parameter: --bad_revision') | |
3265 | |
3266 if not opts.metric and opts.bisect_mode != BISECT_MODE_RETURN_CODE: | |
3267 raise RuntimeError('missing required parameter: --metric') | |
3268 | |
3269 if opts.gs_bucket: | |
3270 if not cloud_storage.List(opts.gs_bucket): | |
3271 raise RuntimeError('Invalid Google Storage: gs://%s' % opts.gs_bucket) | |
3272 if not opts.builder_host: | |
3273 raise RuntimeError('Must specify try server host name using ' | |
3274 '--builder_host when gs_bucket is used.') | |
3275 if not opts.builder_port: | |
3276 raise RuntimeError('Must specify try server port number using ' | |
3277 '--builder_port when gs_bucket is used.') | |
3278 if opts.target_platform == 'cros': | |
3279 # Run sudo up front to make sure credentials are cached for later. | |
3280 print 'Sudo is required to build cros:' | |
3281 print | |
3282 bisect_utils.RunProcess(['sudo', 'true']) | |
3283 | |
3284 if not opts.cros_board: | |
3285 raise RuntimeError('missing required parameter: --cros_board') | |
3286 | |
3287 if not opts.cros_remote_ip: | |
3288 raise RuntimeError('missing required parameter: --cros_remote_ip') | |
3289 | |
3290 if not opts.working_directory: | |
3291 raise RuntimeError('missing required parameter: --working_directory') | |
3292 | |
3293 if opts.bisect_mode != BISECT_MODE_RETURN_CODE: | |
3294 metric_values = opts.metric.split('/') | |
3295 if len(metric_values) != 2: | |
3296 raise RuntimeError('Invalid metric specified: [%s]' % opts.metric) | |
3297 opts.metric = metric_values | |
3298 | |
3299 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100) | |
3300 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60) | |
3301 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25) | |
3302 opts.truncate_percent = opts.truncate_percent / 100.0 | |
3303 | |
3304 for k, v in opts.__dict__.iteritems(): | |
3305 assert hasattr(self, k), 'Invalid %s attribute in BisectOptions.' % k | |
3306 setattr(self, k, v) | |
3307 except RuntimeError, e: | |
3308 output_string = StringIO.StringIO() | |
3309 parser.print_help(file=output_string) | |
3310 error_message = '%s\n\n%s' % (e.message, output_string.getvalue()) | |
3311 output_string.close() | |
3312 raise RuntimeError(error_message) | |
3313 | |
3314 @staticmethod | |
3315 def FromDict(values): | |
3316 """Creates an instance of BisectOptions from a dictionary. | |
3317 | |
3318 Args: | |
3319 values: a dict containing options to set. | |
3320 | |
3321 Returns: | |
3322 An instance of BisectOptions. | |
3323 """ | |
3324 opts = BisectOptions() | |
3325 for k, v in values.iteritems(): | |
3326 assert hasattr(opts, k), 'Invalid %s attribute in BisectOptions.' % k | |
3327 setattr(opts, k, v) | |
3328 | |
3329 if opts.metric and opts.bisect_mode != BISECT_MODE_RETURN_CODE: | |
3330 metric_values = opts.metric.split('/') | |
3331 if len(metric_values) != 2: | |
3332 raise RuntimeError('Invalid metric specified: [%s]' % opts.metric) | |
3333 opts.metric = metric_values | |
3334 | |
3335 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100) | |
3336 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60) | |
3337 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25) | |
3338 opts.truncate_percent = opts.truncate_percent / 100.0 | |
3339 | |
3340 return opts | |
3341 | |
3342 | |
3343 def main(): | |
3344 | |
3345 try: | |
3346 opts = BisectOptions() | |
3347 opts.ParseCommandLine() | |
3348 | |
3349 if opts.extra_src: | |
3350 extra_src = bisect_utils.LoadExtraSrc(opts.extra_src) | |
3351 if not extra_src: | |
3352 raise RuntimeError('Invalid or missing --extra_src.') | |
3353 _AddAdditionalDepotInfo(extra_src.GetAdditionalDepotInfo()) | |
3354 | |
3355 if opts.working_directory: | |
3356 custom_deps = bisect_utils.DEFAULT_GCLIENT_CUSTOM_DEPS | |
3357 if opts.no_custom_deps: | |
3358 custom_deps = None | |
3359 bisect_utils.CreateBisectDirectoryAndSetupDepot(opts, custom_deps) | |
3360 | |
3361 os.chdir(os.path.join(os.getcwd(), 'src')) | |
3362 | |
3363 if not RemoveBuildFiles(opts.target_build_type): | |
3364 raise RuntimeError('Something went wrong removing the build files.') | |
3365 | |
3366 if not _IsPlatformSupported(): | |
3367 raise RuntimeError('Sorry, this platform isn\'t supported yet.') | |
3368 | |
3369 # Check what source control method is being used, and create a | |
3370 # SourceControl object if possible. | |
3371 source_control = source_control_module.DetermineAndCreateSourceControl(opts) | |
3372 | |
3373 if not source_control: | |
3374 raise RuntimeError( | |
3375 'Sorry, only the git workflow is supported at the moment.') | |
3376 | |
3377 # gClient sync seems to fail if you're not in master branch. | |
3378 if (not source_control.IsInProperBranch() and | |
3379 not opts.debug_ignore_sync and | |
3380 not opts.working_directory): | |
3381 raise RuntimeError('You must switch to master branch to run bisection.') | |
3382 bisect_test = BisectPerformanceMetrics(source_control, opts) | |
3383 try: | |
3384 bisect_results = bisect_test.Run(opts.command, | |
3385 opts.bad_revision, | |
3386 opts.good_revision, | |
3387 opts.metric) | |
3388 if bisect_results['error']: | |
3389 raise RuntimeError(bisect_results['error']) | |
3390 bisect_test.FormatAndPrintResults(bisect_results) | |
3391 return 0 | |
3392 finally: | |
3393 bisect_test.PerformCleanup() | |
3394 except RuntimeError, e: | |
3395 if opts.output_buildbot_annotations: | |
3396 # The perf dashboard scrapes the "results" step in order to comment on | |
3397 # bugs. If you change this, please update the perf dashboard as well. | |
3398 bisect_utils.OutputAnnotationStepStart('Results') | |
3399 print 'Error: %s' % e.message | |
3400 if opts.output_buildbot_annotations: | |
3401 bisect_utils.OutputAnnotationStepClosed() | |
3402 return 1 | |
3403 | |
3404 | |
3405 if __name__ == '__main__': | |
3406 sys.exit(main()) | |
OLD | NEW |