OLD | NEW |
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 # Copyright (c) 2013 The Chromium Authors. All rights reserved. | 2 # Copyright (c) 2013 The Chromium Authors. All rights reserved. |
3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
5 | 5 |
6 """Performance Test Bisect Tool | 6 """Performance Test Bisect Tool |
7 | 7 |
8 This script bisects a series of changelists using binary search. It starts at | 8 This script bisects a series of changelists using binary search. It starts at |
9 a bad revision where a performance metric has regressed, and asks for a last | 9 a bad revision where a performance metric has regressed, and asks for a last |
10 known-good revision. It will then binary search across this revision range by | 10 known-good revision. It will then binary search across this revision range by |
11 syncing, building, and running a performance test. If the change is | 11 syncing, building, and running a performance test. If the change is |
12 suspected to occur as a result of WebKit/V8 changes, the script will | 12 suspected to occur as a result of WebKit/V8 changes, the script will |
13 further bisect changes to those depots and attempt to narrow down the revision | 13 further bisect changes to those depots and attempt to narrow down the revision |
14 range. | 14 range. |
15 | 15 |
16 | 16 Example usage using SVN revisions: |
17 An example usage (using svn cl's): | |
18 | 17 |
19 ./tools/bisect-perf-regression.py -c\ | 18 ./tools/bisect-perf-regression.py -c\ |
20 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\ | 19 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\ |
21 -g 168222 -b 168232 -m shutdown/simple-user-quit | 20 -g 168222 -b 168232 -m shutdown/simple-user-quit |
22 | 21 |
23 Be aware that if you're using the git workflow and specify an svn revision, | 22 Be aware that if you're using the git workflow and specify an SVN revision, |
24 the script will attempt to find the git SHA1 where svn changes up to that | 23 the script will attempt to find the git SHA1 where SVN changes up to that |
25 revision were merged in. | 24 revision were merged in. |
26 | 25 |
27 | 26 Example usage using git hashes: |
28 An example usage (using git hashes): | |
29 | 27 |
30 ./tools/bisect-perf-regression.py -c\ | 28 ./tools/bisect-perf-regression.py -c\ |
31 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\ | 29 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\ |
32 -g 1f6e67861535121c5c819c16a666f2436c207e7b\ | 30 -g 1f6e67861535121c5c819c16a666f2436c207e7b\ |
33 -b b732f23b4f81c382db0b23b9035f3dadc7d925bb\ | 31 -b b732f23b4f81c382db0b23b9035f3dadc7d925bb\ |
34 -m shutdown/simple-user-quit | 32 -m shutdown/simple-user-quit |
35 """ | 33 """ |
36 | 34 |
37 import copy | 35 import copy |
38 import datetime | 36 import datetime |
(...skipping 113 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
152 | 150 |
153 DEPOT_NAMES = DEPOT_DEPS_NAME.keys() | 151 DEPOT_NAMES = DEPOT_DEPS_NAME.keys() |
154 | 152 |
155 CROS_CHROMEOS_PATTERN = 'chromeos-base/chromeos-chrome' | 153 CROS_CHROMEOS_PATTERN = 'chromeos-base/chromeos-chrome' |
156 | 154 |
157 # Possible return values from BisectPerformanceMetrics.SyncBuildAndRunRevision. | 155 # Possible return values from BisectPerformanceMetrics.SyncBuildAndRunRevision. |
158 BUILD_RESULT_SUCCEED = 0 | 156 BUILD_RESULT_SUCCEED = 0 |
159 BUILD_RESULT_FAIL = 1 | 157 BUILD_RESULT_FAIL = 1 |
160 BUILD_RESULT_SKIPPED = 2 | 158 BUILD_RESULT_SKIPPED = 2 |
161 | 159 |
162 # Maximum time in seconds to wait after posting build request to tryserver. | 160 # Maximum time in seconds to wait after posting build request to the try server. |
163 # TODO: Change these values based on the actual time taken by buildbots on | 161 # TODO: Change these values based on the actual time taken by buildbots on |
164 # the tryserver. | 162 # the try server. |
165 MAX_MAC_BUILD_TIME = 14400 | 163 MAX_MAC_BUILD_TIME = 14400 |
166 MAX_WIN_BUILD_TIME = 14400 | 164 MAX_WIN_BUILD_TIME = 14400 |
167 MAX_LINUX_BUILD_TIME = 14400 | 165 MAX_LINUX_BUILD_TIME = 14400 |
168 | 166 |
169 # The confidence percentage at which confidence can be consider "high". | 167 # The confidence percentage at which confidence can be consider "high". |
170 HIGH_CONFIDENCE = 95 | 168 HIGH_CONFIDENCE = 95 |
171 | 169 |
172 # Patch template to add a new file, DEPS.sha under src folder. | 170 # Patch template to add a new file, DEPS.sha under src folder. |
173 # This file contains SHA1 value of the DEPS changes made while bisecting | 171 # This file contains SHA1 value of the DEPS changes made while bisecting |
174 # dependency repositories. This patch send along with DEPS patch to tryserver. | 172 # dependency repositories. This patch send along with DEPS patch to try server. |
175 # When a build requested is posted with a patch, bisect builders on tryserver, | 173 # When a build requested is posted with a patch, bisect builders on try server, |
176 # once build is produced, it reads SHA value from this file and appends it | 174 # once build is produced, it reads SHA value from this file and appends it |
177 # to build archive filename. | 175 # to build archive filename. |
178 DEPS_SHA_PATCH = """diff --git src/DEPS.sha src/DEPS.sha | 176 DEPS_SHA_PATCH = """diff --git src/DEPS.sha src/DEPS.sha |
179 new file mode 100644 | 177 new file mode 100644 |
180 --- /dev/null | 178 --- /dev/null |
181 +++ src/DEPS.sha | 179 +++ src/DEPS.sha |
182 @@ -0,0 +1 @@ | 180 @@ -0,0 +1 @@ |
183 +%(deps_sha)s | 181 +%(deps_sha)s |
184 """ | 182 """ |
185 | 183 |
(...skipping 24 matching lines...) Expand all Loading... |
210 Author : %(author)s%(email_info)s%(commit_info)s | 208 Author : %(author)s%(email_info)s%(commit_info)s |
211 Commit : %(cl)s | 209 Commit : %(cl)s |
212 Date : %(cl_date)s""" | 210 Date : %(cl_date)s""" |
213 | 211 |
214 REPRO_STEPS_LOCAL = """ | 212 REPRO_STEPS_LOCAL = """ |
215 ==== INSTRUCTIONS TO REPRODUCE ==== | 213 ==== INSTRUCTIONS TO REPRODUCE ==== |
216 To run locally: | 214 To run locally: |
217 $%(command)s""" | 215 $%(command)s""" |
218 | 216 |
219 REPRO_STEPS_TRYJOB = """ | 217 REPRO_STEPS_TRYJOB = """ |
220 To reproduce on Performance trybot: | 218 To reproduce on a performance try bot: |
221 1. Create new git branch or check out existing branch. | 219 1. Create new git branch or check out existing branch. |
222 2. Edit tools/run-perf-test.cfg (instructions in file) or \ | 220 2. Edit tools/run-perf-test.cfg (instructions in file) or \ |
223 third_party/WebKit/Tools/run-perf-test.cfg. | 221 third_party/WebKit/Tools/run-perf-test.cfg. |
224 a) Take care to strip any src/ directories from the head of \ | 222 a) Take care to strip any src/ directories from the head of \ |
225 relative path names. | 223 relative path names. |
226 b) On desktop, only --browser=release is supported, on android \ | 224 b) On desktop, only --browser=release is supported, on android \ |
227 --browser=android-chromium-testshell. | 225 --browser=android-chromium-testshell. |
228 c) Test command to use: %(command)s | 226 c) Test command to use: %(command)s |
229 3. Upload your patch. --bypass-hooks is necessary to upload the changes you \ | 227 3. Upload your patch. --bypass-hooks is necessary to upload the changes you \ |
230 committed locally to run-perf-test.cfg. | 228 committed locally to run-perf-test.cfg. |
231 Note: *DO NOT* commit run-perf-test.cfg changes to the project repository. | 229 Note: *DO NOT* commit run-perf-test.cfg changes to the project repository. |
232 $ git cl upload --bypass-hooks | 230 $ git cl upload --bypass-hooks |
233 4. Send your try job to the tryserver. \ | 231 4. Send your try job to the try server. \ |
234 [Please make sure to use appropriate bot to reproduce] | 232 [Please make sure to use appropriate bot to reproduce] |
235 $ git cl try -m tryserver.chromium.perf -b <bot> | 233 $ git cl try -m tryserver.chromium.perf -b <bot> |
236 | 234 |
237 For more details please visit | 235 For more details please visit |
238 https://sites.google.com/a/chromium.org/dev/developers/performance-try-bots""" | 236 https://sites.google.com/a/chromium.org/dev/developers/performance-try-bots""" |
239 | 237 |
240 RESULTS_THANKYOU = """ | 238 RESULTS_THANKYOU = """ |
241 ===== THANK YOU FOR CHOOSING BISECT AIRLINES ===== | 239 ===== THANK YOU FOR CHOOSING BISECT AIRLINES ===== |
242 Visit http://www.chromium.org/developers/core-principles for Chrome's policy | 240 Visit http://www.chromium.org/developers/core-principles for Chrome's policy |
243 on perf regressions. | 241 on perf regressions. |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
302 def GetSHA1HexDigest(contents): | 300 def GetSHA1HexDigest(contents): |
303 """Returns SHA1 hex digest of the given string.""" | 301 """Returns SHA1 hex digest of the given string.""" |
304 return hashlib.sha1(contents).hexdigest() | 302 return hashlib.sha1(contents).hexdigest() |
305 | 303 |
306 | 304 |
307 def GetZipFileName(build_revision=None, target_arch='ia32', patch_sha=None): | 305 def GetZipFileName(build_revision=None, target_arch='ia32', patch_sha=None): |
308 """Gets the archive file name for the given revision.""" | 306 """Gets the archive file name for the given revision.""" |
309 def PlatformName(): | 307 def PlatformName(): |
310 """Return a string to be used in paths for the platform.""" | 308 """Return a string to be used in paths for the platform.""" |
311 if bisect_utils.IsWindowsHost(): | 309 if bisect_utils.IsWindowsHost(): |
312 # Build archive for x64 is still stored with 'win32'suffix | 310 # Build archive for x64 is still stored with the "win32" suffix. |
313 # (chromium_utils.PlatformName()). | 311 # See chromium_utils.PlatformName(). |
314 if bisect_utils.Is64BitWindows() and target_arch == 'x64': | 312 if bisect_utils.Is64BitWindows() and target_arch == 'x64': |
315 return 'win32' | 313 return 'win32' |
316 return 'win32' | 314 return 'win32' |
317 if bisect_utils.IsLinuxHost(): | 315 if bisect_utils.IsLinuxHost(): |
318 # Android builds too are archived with full-build-linux* prefix. | 316 # Android builds are also archived with the "full-build-linux prefix. |
319 return 'linux' | 317 return 'linux' |
320 if bisect_utils.IsMacHost(): | 318 if bisect_utils.IsMacHost(): |
321 return 'mac' | 319 return 'mac' |
322 raise NotImplementedError('Unknown platform "%s".' % sys.platform) | 320 raise NotImplementedError('Unknown platform "%s".' % sys.platform) |
323 | 321 |
324 base_name = 'full-build-%s' % PlatformName() | 322 base_name = 'full-build-%s' % PlatformName() |
325 if not build_revision: | 323 if not build_revision: |
326 return base_name | 324 return base_name |
327 if patch_sha: | 325 if patch_sha: |
328 build_revision = '%s_%s' % (build_revision , patch_sha) | 326 build_revision = '%s_%s' % (build_revision , patch_sha) |
329 return '%s_%s.zip' % (base_name, build_revision) | 327 return '%s_%s.zip' % (base_name, build_revision) |
330 | 328 |
331 | 329 |
332 def GetRemoteBuildPath(build_revision, target_platform='chromium', | 330 def GetRemoteBuildPath(build_revision, target_platform='chromium', |
333 target_arch='ia32', patch_sha=None): | 331 target_arch='ia32', patch_sha=None): |
334 """Compute the url to download the build from.""" | 332 """Returns the URL to download the build from.""" |
335 def GetGSRootFolderName(target_platform): | 333 def GetGSRootFolderName(target_platform): |
336 """Gets Google Cloud Storage root folder names""" | 334 """Returns the Google Cloud Storage root folder name.""" |
337 if bisect_utils.IsWindowsHost(): | 335 if bisect_utils.IsWindowsHost(): |
338 if bisect_utils.Is64BitWindows() and target_arch == 'x64': | 336 if bisect_utils.Is64BitWindows() and target_arch == 'x64': |
339 return 'Win x64 Builder' | 337 return 'Win x64 Builder' |
340 return 'Win Builder' | 338 return 'Win Builder' |
341 if bisect_utils.IsLinuxHost(): | 339 if bisect_utils.IsLinuxHost(): |
342 if target_platform == 'android': | 340 if target_platform == 'android': |
343 return 'android_perf_rel' | 341 return 'android_perf_rel' |
344 return 'Linux Builder' | 342 return 'Linux Builder' |
345 if bisect_utils.IsMacHost(): | 343 if bisect_utils.IsMacHost(): |
346 return 'Mac Builder' | 344 return 'Mac Builder' |
347 raise NotImplementedError('Unsupported Platform "%s".' % sys.platform) | 345 raise NotImplementedError('Unsupported Platform "%s".' % sys.platform) |
348 | 346 |
349 base_filename = GetZipFileName( | 347 base_filename = GetZipFileName( |
350 build_revision, target_arch, patch_sha) | 348 build_revision, target_arch, patch_sha) |
351 builder_folder = GetGSRootFolderName(target_platform) | 349 builder_folder = GetGSRootFolderName(target_platform) |
352 return '%s/%s' % (builder_folder, base_filename) | 350 return '%s/%s' % (builder_folder, base_filename) |
353 | 351 |
354 | 352 |
355 def FetchFromCloudStorage(bucket_name, source_path, destination_path): | 353 def FetchFromCloudStorage(bucket_name, source_path, destination_path): |
356 """Fetches file(s) from the Google Cloud Storage. | 354 """Fetches file(s) from the Google Cloud Storage. |
357 | 355 |
358 Args: | 356 Args: |
359 bucket_name: Google Storage bucket name. | 357 bucket_name: Google Storage bucket name. |
360 source_path: Source file path. | 358 source_path: Source file path. |
361 destination_path: Destination file path. | 359 destination_path: Destination file path. |
362 | 360 |
363 Returns: | 361 Returns: |
364 Downloaded file path if exisits, otherwise None. | 362 Downloaded file path if exists, otherwise None. |
365 """ | 363 """ |
366 target_file = os.path.join(destination_path, os.path.basename(source_path)) | 364 target_file = os.path.join(destination_path, os.path.basename(source_path)) |
367 try: | 365 try: |
368 if cloud_storage.Exists(bucket_name, source_path): | 366 if cloud_storage.Exists(bucket_name, source_path): |
369 print 'Fetching file from gs//%s/%s ...' % (bucket_name, source_path) | 367 print 'Fetching file from gs//%s/%s ...' % (bucket_name, source_path) |
370 cloud_storage.Get(bucket_name, source_path, destination_path) | 368 cloud_storage.Get(bucket_name, source_path, destination_path) |
371 if os.path.exists(target_file): | 369 if os.path.exists(target_file): |
372 return target_file | 370 return target_file |
373 else: | 371 else: |
374 print ('File gs://%s/%s not found in cloud storage.' % ( | 372 print ('File gs://%s/%s not found in cloud storage.' % ( |
375 bucket_name, source_path)) | 373 bucket_name, source_path)) |
376 except Exception as e: | 374 except Exception as e: |
377 print 'Something went wrong while fetching file from cloud: %s' % e | 375 print 'Something went wrong while fetching file from cloud: %s' % e |
378 if os.path.exists(target_file): | 376 if os.path.exists(target_file): |
379 os.remove(target_file) | 377 os.remove(target_file) |
380 return None | 378 return None |
381 | 379 |
382 | 380 |
383 # This is copied from Chromium's project build/scripts/common/chromium_utils.py. | 381 # This is copied from build/scripts/common/chromium_utils.py. |
384 def MaybeMakeDirectory(*path): | 382 def MaybeMakeDirectory(*path): |
385 """Creates an entire path, if it doesn't already exist.""" | 383 """Creates an entire path, if it doesn't already exist.""" |
386 file_path = os.path.join(*path) | 384 file_path = os.path.join(*path) |
387 try: | 385 try: |
388 os.makedirs(file_path) | 386 os.makedirs(file_path) |
389 except OSError as e: | 387 except OSError as e: |
390 if e.errno != errno.EEXIST: | 388 if e.errno != errno.EEXIST: |
391 return False | 389 return False |
392 return True | 390 return True |
393 | 391 |
394 | 392 |
395 # This is copied from Chromium's project build/scripts/common/chromium_utils.py. | 393 # This was copied from build/scripts/common/chromium_utils.py. |
396 def ExtractZip(filename, output_dir, verbose=True): | 394 def ExtractZip(filename, output_dir, verbose=True): |
397 """ Extract the zip archive in the output directory.""" | 395 """ Extract the zip archive in the output directory.""" |
398 MaybeMakeDirectory(output_dir) | 396 MaybeMakeDirectory(output_dir) |
399 | 397 |
400 # On Linux and Mac, we use the unzip command as it will | 398 # On Linux and Mac, we use the unzip command as it will |
401 # handle links and file bits (executable), which is much | 399 # handle links and file bits (executable), which is much |
402 # easier then trying to do that with ZipInfo options. | 400 # easier then trying to do that with ZipInfo options. |
403 # | 401 # |
404 # The Mac Version of unzip unfortunately does not support Zip64, whereas | 402 # The Mac Version of unzip unfortunately does not support Zip64, whereas |
405 # the python module does, so we have to fallback to the python zip module | 403 # the python module does, so we have to fall back to the python zip module |
406 # on Mac if the filesize is greater than 4GB. | 404 # on Mac if the file size is greater than 4GB. |
407 # | 405 # |
408 # On Windows, try to use 7z if it is installed, otherwise fall back to python | 406 # On Windows, try to use 7z if it is installed, otherwise fall back to python |
409 # zip module and pray we don't have files larger than 512MB to unzip. | 407 # zip module and pray we don't have files larger than 512MB to unzip. |
410 unzip_cmd = None | 408 unzip_cmd = None |
411 if ((bisect_utils.IsMacHost() | 409 if ((bisect_utils.IsMacHost() |
412 and os.path.getsize(filename) < 4 * 1024 * 1024 * 1024) | 410 and os.path.getsize(filename) < 4 * 1024 * 1024 * 1024) |
413 or bisect_utils.IsLinuxHost()): | 411 or bisect_utils.IsLinuxHost()): |
414 unzip_cmd = ['unzip', '-o'] | 412 unzip_cmd = ['unzip', '-o'] |
415 elif (bisect_utils.IsWindowsHost() | 413 elif (bisect_utils.IsWindowsHost() |
416 and os.path.exists('C:\\Program Files\\7-Zip\\7z.exe')): | 414 and os.path.exists('C:\\Program Files\\7-Zip\\7z.exe')): |
(...skipping 16 matching lines...) Expand all Loading... |
433 if verbose: | 431 if verbose: |
434 print 'Extracting %s' % name | 432 print 'Extracting %s' % name |
435 zf.extract(name, output_dir) | 433 zf.extract(name, output_dir) |
436 if bisect_utils.IsMacHost(): | 434 if bisect_utils.IsMacHost(): |
437 # Restore permission bits. | 435 # Restore permission bits. |
438 os.chmod(os.path.join(output_dir, name), | 436 os.chmod(os.path.join(output_dir, name), |
439 zf.getinfo(name).external_attr >> 16L) | 437 zf.getinfo(name).external_attr >> 16L) |
440 | 438 |
441 | 439 |
442 def WriteStringToFile(text, file_name): | 440 def WriteStringToFile(text, file_name): |
| 441 """Writes text to a file, raising an RuntimeError on failure.""" |
443 try: | 442 try: |
444 with open(file_name, 'wb') as f: | 443 with open(file_name, 'wb') as f: |
445 f.write(text) | 444 f.write(text) |
446 except IOError: | 445 except IOError: |
447 raise RuntimeError('Error writing to file [%s]' % file_name ) | 446 raise RuntimeError('Error writing to file [%s]' % file_name ) |
448 | 447 |
449 | 448 |
450 def ReadStringFromFile(file_name): | 449 def ReadStringFromFile(file_name): |
| 450 """Writes text to a file, raising an RuntimeError on failure.""" |
451 try: | 451 try: |
452 with open(file_name) as f: | 452 with open(file_name) as f: |
453 return f.read() | 453 return f.read() |
454 except IOError: | 454 except IOError: |
455 raise RuntimeError('Error reading file [%s]' % file_name ) | 455 raise RuntimeError('Error reading file [%s]' % file_name ) |
456 | 456 |
457 | 457 |
458 def ChangeBackslashToSlashInPatch(diff_text): | 458 def ChangeBackslashToSlashInPatch(diff_text): |
459 """Formats file paths in the given text to unix-style paths.""" | 459 """Formats file paths in the given patch text to Unix-style paths.""" |
460 if diff_text: | 460 if not diff_text: |
461 diff_lines = diff_text.split('\n') | 461 return None |
462 for i in range(len(diff_lines)): | 462 diff_lines = diff_text.split('\n') |
463 if (diff_lines[i].startswith('--- ') or | 463 for i in range(len(diff_lines)): |
464 diff_lines[i].startswith('+++ ')): | 464 line = diff_lines[i] |
465 diff_lines[i] = diff_lines[i].replace('\\', '/') | 465 if line.startswith('--- ') or line.startswith('+++ '): |
466 return '\n'.join(diff_lines) | 466 diff_lines[i] = line.replace('\\', '/') |
467 return None | 467 return '\n'.join(diff_lines) |
468 | 468 |
469 | 469 |
470 def _ParseRevisionsFromDEPSFileManually(deps_file_contents): | 470 def _ParseRevisionsFromDEPSFileManually(deps_file_contents): |
471 """Parses the vars section of the DEPS file with regex. | 471 """Parses the vars section of the DEPS file using regular expressions. |
472 | 472 |
473 Args: | 473 Args: |
474 deps_file_contents: The DEPS file contents as a string. | 474 deps_file_contents: The DEPS file contents as a string. |
475 | 475 |
476 Returns: | 476 Returns: |
477 A dict in the format {depot:revision} if successful, otherwise None. | 477 A dictionary in the format {depot: revision} if successful, otherwise None. |
478 """ | 478 """ |
479 # We'll parse the "vars" section of the DEPS file. | 479 # We'll parse the "vars" section of the DEPS file. |
480 rxp = re.compile('vars = {(?P<vars_body>[^}]+)', re.MULTILINE) | 480 rxp = re.compile('vars = {(?P<vars_body>[^}]+)', re.MULTILINE) |
481 re_results = rxp.search(deps_file_contents) | 481 re_results = rxp.search(deps_file_contents) |
482 | 482 |
483 if not re_results: | 483 if not re_results: |
484 return None | 484 return None |
485 | 485 |
486 # We should be left with a series of entries in the vars component of | 486 # We should be left with a series of entries in the vars component of |
487 # the DEPS file with the following format: | 487 # the DEPS file with the following format: |
488 # 'depot_name': 'revision', | 488 # 'depot_name': 'revision', |
489 vars_body = re_results.group('vars_body') | 489 vars_body = re_results.group('vars_body') |
490 rxp = re.compile("'(?P<depot_body>[\w_-]+)':[\s]+'(?P<rev_body>[\w@]+)'", | 490 rxp = re.compile("'(?P<depot_body>[\w_-]+)':[\s]+'(?P<rev_body>[\w@]+)'", |
491 re.MULTILINE) | 491 re.MULTILINE) |
492 re_results = rxp.findall(vars_body) | 492 re_results = rxp.findall(vars_body) |
493 | 493 |
494 return dict(re_results) | 494 return dict(re_results) |
495 | 495 |
496 | 496 |
497 def _WaitUntilBuildIsReady( | 497 def _WaitUntilBuildIsReady( |
498 fetch_build, bot_name, builder_host, builder_port, build_request_id, | 498 fetch_build, bot_name, builder_host, builder_port, build_request_id, |
499 max_timeout): | 499 max_timeout): |
500 """Waits until build is produced by bisect builder on tryserver. | 500 """Waits until build is produced by bisect builder on try server. |
501 | 501 |
502 Args: | 502 Args: |
503 fetch_build: Function to check and download build from cloud storage. | 503 fetch_build: Function to check and download build from cloud storage. |
504 bot_name: Builder bot name on tryserver. | 504 bot_name: Builder bot name on try server. |
505 builder_host Tryserver hostname. | 505 builder_host Try server host name. |
506 builder_port: Tryserver port. | 506 builder_port: Try server port. |
507 build_request_id: A unique ID of the build request posted to tryserver. | 507 build_request_id: A unique ID of the build request posted to try server. |
508 max_timeout: Maximum time to wait for the build. | 508 max_timeout: Maximum time to wait for the build. |
509 | 509 |
510 Returns: | 510 Returns: |
511 Downloaded archive file path if exists, otherwise None. | 511 Downloaded archive file path if exists, otherwise None. |
512 """ | 512 """ |
513 # Build number on the tryserver. | 513 # Build number on the try server. |
514 build_num = None | 514 build_num = None |
515 # Interval to check build on cloud storage. | 515 # Interval to check build on cloud storage. |
516 poll_interval = 60 | 516 poll_interval = 60 |
517 # Interval to check build status on tryserver. | 517 # Interval to check build status on try server in seconds. |
518 status_check_interval = 600 | 518 status_check_interval = 600 |
519 last_status_check = time.time() | 519 last_status_check = time.time() |
520 start_time = time.time() | 520 start_time = time.time() |
521 while True: | 521 while True: |
522 # Checks for build on gs://chrome-perf and download if exists. | 522 # Checks for build on gs://chrome-perf and download if exists. |
523 res = fetch_build() | 523 res = fetch_build() |
524 if res: | 524 if res: |
525 return (res, 'Build successfully found') | 525 return (res, 'Build successfully found') |
526 elapsed_status_check = time.time() - last_status_check | 526 elapsed_status_check = time.time() - last_status_check |
527 # To avoid overloading tryserver with status check requests, we check | 527 # To avoid overloading try server with status check requests, we check |
528 # build status for every 10 mins. | 528 # build status for every 10 minutes. |
529 if elapsed_status_check > status_check_interval: | 529 if elapsed_status_check > status_check_interval: |
530 last_status_check = time.time() | 530 last_status_check = time.time() |
531 if not build_num: | 531 if not build_num: |
532 # Get the build number on tryserver for the current build. | 532 # Get the build number on try server for the current build. |
533 build_num = bisect_builder.GetBuildNumFromBuilder( | 533 build_num = bisect_builder.GetBuildNumFromBuilder( |
534 build_request_id, bot_name, builder_host, builder_port) | 534 build_request_id, bot_name, builder_host, builder_port) |
535 # Check the status of build using the build number. | 535 # Check the status of build using the build number. |
536 # Note: Build is treated as PENDING if build number is not found | 536 # Note: Build is treated as PENDING if build number is not found |
537 # on the the tryserver. | 537 # on the the try server. |
538 build_status, status_link = bisect_builder.GetBuildStatus( | 538 build_status, status_link = bisect_builder.GetBuildStatus( |
539 build_num, bot_name, builder_host, builder_port) | 539 build_num, bot_name, builder_host, builder_port) |
540 if build_status == bisect_builder.FAILED: | 540 if build_status == bisect_builder.FAILED: |
541 return (None, 'Failed to produce build, log: %s' % status_link) | 541 return (None, 'Failed to produce build, log: %s' % status_link) |
542 elapsed_time = time.time() - start_time | 542 elapsed_time = time.time() - start_time |
543 if elapsed_time > max_timeout: | 543 if elapsed_time > max_timeout: |
544 return (None, 'Timed out: %ss without build' % max_timeout) | 544 return (None, 'Timed out: %ss without build' % max_timeout) |
545 | 545 |
546 print 'Time elapsed: %ss without build.' % elapsed_time | 546 print 'Time elapsed: %ss without build.' % elapsed_time |
547 time.sleep(poll_interval) | 547 time.sleep(poll_interval) |
(...skipping 228 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
776 if arg_dict.has_key('--profile-dir') and arg_dict.has_key('--browser'): | 776 if arg_dict.has_key('--profile-dir') and arg_dict.has_key('--browser'): |
777 profile_path, profile_type = os.path.split(arg_dict['--profile-dir']) | 777 profile_path, profile_type = os.path.split(arg_dict['--profile-dir']) |
778 return not bisect_utils.RunProcess(['python', path_to_generate, | 778 return not bisect_utils.RunProcess(['python', path_to_generate, |
779 '--profile-type-to-generate', profile_type, | 779 '--profile-type-to-generate', profile_type, |
780 '--browser', arg_dict['--browser'], '--output-dir', profile_path]) | 780 '--browser', arg_dict['--browser'], '--output-dir', profile_path]) |
781 return False | 781 return False |
782 return True | 782 return True |
783 | 783 |
784 | 784 |
785 def _AddRevisionsIntoRevisionData(revisions, depot, sort, revision_data): | 785 def _AddRevisionsIntoRevisionData(revisions, depot, sort, revision_data): |
786 """Adds new revisions to the revision_data dict and initializes them. | 786 """Adds new revisions to the revision_data dictionary and initializes them. |
787 | 787 |
788 Args: | 788 Args: |
789 revisions: List of revisions to add. | 789 revisions: List of revisions to add. |
790 depot: Depot that's currently in use (src, webkit, etc...) | 790 depot: Depot that's currently in use (src, webkit, etc...) |
791 sort: Sorting key for displaying revisions. | 791 sort: Sorting key for displaying revisions. |
792 revision_data: A dict to add the new revisions into. Existing revisions | 792 revision_data: A dictionary to add the new revisions into. |
793 will have their sort keys offset. | 793 Existing revisions will have their sort keys adjusted. |
794 """ | 794 """ |
795 num_depot_revisions = len(revisions) | 795 num_depot_revisions = len(revisions) |
796 | 796 |
797 for _, v in revision_data.iteritems(): | 797 for _, v in revision_data.iteritems(): |
798 if v['sort'] > sort: | 798 if v['sort'] > sort: |
799 v['sort'] += num_depot_revisions | 799 v['sort'] += num_depot_revisions |
800 | 800 |
801 for i in xrange(num_depot_revisions): | 801 for i in xrange(num_depot_revisions): |
802 r = revisions[i] | 802 r = revisions[i] |
803 revision_data[r] = { | 803 revision_data[r] = { |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
850 print 'Average build time : %s' % datetime.timedelta( | 850 print 'Average build time : %s' % datetime.timedelta( |
851 seconds=int(step_build_time_avg)) | 851 seconds=int(step_build_time_avg)) |
852 print 'Average test time : %s' % datetime.timedelta( | 852 print 'Average test time : %s' % datetime.timedelta( |
853 seconds=int(step_perf_time_avg)) | 853 seconds=int(step_perf_time_avg)) |
854 | 854 |
855 | 855 |
856 def _FindOtherRegressions(revision_data_sorted, bad_greater_than_good): | 856 def _FindOtherRegressions(revision_data_sorted, bad_greater_than_good): |
857 """Compiles a list of other possible regressions from the revision data. | 857 """Compiles a list of other possible regressions from the revision data. |
858 | 858 |
859 Args: | 859 Args: |
860 revision_data_sorted: Sorted list of (revision, revision data dict) pairs. | 860 revision_data_sorted: Sorted list of (revision, revision data) pairs. |
861 bad_greater_than_good: Whether the result value at the "bad" revision is | 861 bad_greater_than_good: Whether the result value at the "bad" revision is |
862 numerically greater than the result value at the "good" revision. | 862 numerically greater than the result value at the "good" revision. |
863 | 863 |
864 Returns: | 864 Returns: |
865 A list of [current_rev, previous_rev, confidence] for other places where | 865 A list of [current_rev, previous_rev, confidence] for other places where |
866 there may have been a regression. | 866 there may have been a regression. |
867 """ | 867 """ |
868 other_regressions = [] | 868 other_regressions = [] |
869 previous_values = [] | 869 previous_values = [] |
870 previous_id = None | 870 previous_id = None |
(...skipping 209 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1080 if (depot_data.get('recurse') and depot in depot_data.get('from')): | 1080 if (depot_data.get('recurse') and depot in depot_data.get('from')): |
1081 depot_data_src = depot_data.get('src') or depot_data.get('src_old') | 1081 depot_data_src = depot_data.get('src') or depot_data.get('src_old') |
1082 src_dir = deps_data.get(depot_data_src) | 1082 src_dir = deps_data.get(depot_data_src) |
1083 if src_dir: | 1083 if src_dir: |
1084 self.depot_cwd[depot_name] = os.path.join(self.src_cwd, | 1084 self.depot_cwd[depot_name] = os.path.join(self.src_cwd, |
1085 depot_data_src[4:]) | 1085 depot_data_src[4:]) |
1086 re_results = rxp.search(src_dir) | 1086 re_results = rxp.search(src_dir) |
1087 if re_results: | 1087 if re_results: |
1088 results[depot_name] = re_results.group('revision') | 1088 results[depot_name] = re_results.group('revision') |
1089 else: | 1089 else: |
1090 warning_text = ('Couldn\'t parse revision for %s while bisecting ' | 1090 warning_text = ('Could not parse revision for %s while bisecting ' |
1091 '%s' % (depot_name, depot)) | 1091 '%s' % (depot_name, depot)) |
1092 if not warning_text in self.warnings: | 1092 if not warning_text in self.warnings: |
1093 self.warnings.append(warning_text) | 1093 self.warnings.append(warning_text) |
1094 else: | 1094 else: |
1095 results[depot_name] = None | 1095 results[depot_name] = None |
1096 return results | 1096 return results |
1097 except ImportError: | 1097 except ImportError: |
1098 deps_file_contents = ReadStringFromFile(bisect_utils.FILE_DEPS_GIT) | 1098 deps_file_contents = ReadStringFromFile(bisect_utils.FILE_DEPS_GIT) |
1099 parse_results = _ParseRevisionsFromDEPSFileManually(deps_file_contents) | 1099 parse_results = _ParseRevisionsFromDEPSFileManually(deps_file_contents) |
1100 results = {} | 1100 results = {} |
1101 for depot_name, depot_revision in parse_results.iteritems(): | 1101 for depot_name, depot_revision in parse_results.iteritems(): |
1102 depot_revision = depot_revision.strip('@') | 1102 depot_revision = depot_revision.strip('@') |
1103 print depot_name, depot_revision | 1103 print depot_name, depot_revision |
1104 for current_name, current_data in DEPOT_DEPS_NAME.iteritems(): | 1104 for current_name, current_data in DEPOT_DEPS_NAME.iteritems(): |
1105 if (current_data.has_key('deps_var') and | 1105 if (current_data.has_key('deps_var') and |
1106 current_data['deps_var'] == depot_name): | 1106 current_data['deps_var'] == depot_name): |
1107 src_name = current_name | 1107 src_name = current_name |
1108 results[src_name] = depot_revision | 1108 results[src_name] = depot_revision |
1109 break | 1109 break |
1110 return results | 1110 return results |
1111 | 1111 |
1112 def _Get3rdPartyRevisions(self, depot): | 1112 def _Get3rdPartyRevisions(self, depot): |
1113 """Parses the DEPS file to determine WebKit/v8/etc... versions. | 1113 """Parses the DEPS file to determine WebKit/v8/etc... versions. |
1114 | 1114 |
| 1115 Args: |
| 1116 depot: A depot name. Should be in the DEPOT_NAMES list. |
| 1117 |
1115 Returns: | 1118 Returns: |
1116 A dict in the format {depot:revision} if successful, otherwise None. | 1119 A dict in the format {depot: revision} if successful, otherwise None. |
1117 """ | 1120 """ |
1118 cwd = os.getcwd() | 1121 cwd = os.getcwd() |
1119 self.ChangeToDepotWorkingDirectory(depot) | 1122 self.ChangeToDepotWorkingDirectory(depot) |
1120 | 1123 |
1121 results = {} | 1124 results = {} |
1122 | 1125 |
1123 if depot == 'chromium' or depot == 'android-chrome': | 1126 if depot == 'chromium' or depot == 'android-chrome': |
1124 results = self._ParseRevisionsFromDEPSFile(depot) | 1127 results = self._ParseRevisionsFromDEPSFile(depot) |
1125 os.chdir(cwd) | 1128 os.chdir(cwd) |
1126 | 1129 |
(...skipping 167 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1294 # Cleanup any leftovers from unzipping. | 1297 # Cleanup any leftovers from unzipping. |
1295 if os.path.exists(output_dir): | 1298 if os.path.exists(output_dir): |
1296 RmTreeAndMkDir(output_dir, skip_makedir=True) | 1299 RmTreeAndMkDir(output_dir, skip_makedir=True) |
1297 finally: | 1300 finally: |
1298 # Delete downloaded archive | 1301 # Delete downloaded archive |
1299 if os.path.exists(downloaded_file): | 1302 if os.path.exists(downloaded_file): |
1300 os.remove(downloaded_file) | 1303 os.remove(downloaded_file) |
1301 return False | 1304 return False |
1302 | 1305 |
1303 def PostBuildRequestAndWait(self, revision, fetch_build, patch=None): | 1306 def PostBuildRequestAndWait(self, revision, fetch_build, patch=None): |
1304 """POSTs the build request job to the tryserver instance. | 1307 """POSTs the build request job to the try server instance. |
1305 | 1308 |
1306 A try job build request is posted to tryserver.chromium.perf master, | 1309 A try job build request is posted to tryserver.chromium.perf master, |
1307 and waits for the binaries to be produced and archived on cloud storage. | 1310 and waits for the binaries to be produced and archived on cloud storage. |
1308 Once the build is ready and stored onto cloud, build archive is downloaded | 1311 Once the build is ready and stored onto cloud, build archive is downloaded |
1309 into the output folder. | 1312 into the output folder. |
1310 | 1313 |
1311 Args: | 1314 Args: |
1312 revision: A Git hash revision. | 1315 revision: A Git hash revision. |
1313 fetch_build: Function to check and download build from cloud storage. | 1316 fetch_build: Function to check and download build from cloud storage. |
1314 patch: A DEPS patch (used while bisecting 3rd party repositories). | 1317 patch: A DEPS patch (used while bisecting 3rd party repositories). |
(...skipping 23 matching lines...) Expand all Loading... |
1338 if bisect_utils.IsMacHost(): | 1341 if bisect_utils.IsMacHost(): |
1339 return ('mac_perf_bisect_builder', MAX_MAC_BUILD_TIME) | 1342 return ('mac_perf_bisect_builder', MAX_MAC_BUILD_TIME) |
1340 raise NotImplementedError('Unsupported Platform "%s".' % sys.platform) | 1343 raise NotImplementedError('Unsupported Platform "%s".' % sys.platform) |
1341 if not fetch_build: | 1344 if not fetch_build: |
1342 return False | 1345 return False |
1343 | 1346 |
1344 bot_name, build_timeout = GetBuilderNameAndBuildTime( | 1347 bot_name, build_timeout = GetBuilderNameAndBuildTime( |
1345 self.opts.target_platform, self.opts.target_arch) | 1348 self.opts.target_platform, self.opts.target_arch) |
1346 builder_host = self.opts.builder_host | 1349 builder_host = self.opts.builder_host |
1347 builder_port = self.opts.builder_port | 1350 builder_port = self.opts.builder_port |
1348 # Create a unique ID for each build request posted to tryserver builders. | 1351 # Create a unique ID for each build request posted to try server builders. |
1349 # This ID is added to "Reason" property in build's json. | 1352 # This ID is added to "Reason" property of the build. |
1350 build_request_id = GetSHA1HexDigest( | 1353 build_request_id = GetSHA1HexDigest( |
1351 '%s-%s-%s' % (svn_revision, patch, time.time())) | 1354 '%s-%s-%s' % (svn_revision, patch, time.time())) |
1352 | 1355 |
1353 # Creates a try job description. | 1356 # Creates a try job description. |
1354 job_args = { | 1357 job_args = { |
1355 'revision': 'src@%s' % svn_revision, | 1358 'revision': 'src@%s' % svn_revision, |
1356 'bot': bot_name, | 1359 'bot': bot_name, |
1357 'name': build_request_id, | 1360 'name': build_request_id, |
1358 } | 1361 } |
1359 # Update patch information if supplied. | 1362 # Update patch information if supplied. |
1360 if patch: | 1363 if patch: |
1361 job_args['patch'] = patch | 1364 job_args['patch'] = patch |
1362 # Posts job to build the revision on the server. | 1365 # Posts job to build the revision on the server. |
1363 if bisect_builder.PostTryJob(builder_host, builder_port, job_args): | 1366 if bisect_builder.PostTryJob(builder_host, builder_port, job_args): |
1364 target_file, error_msg = _WaitUntilBuildIsReady( | 1367 target_file, error_msg = _WaitUntilBuildIsReady( |
1365 fetch_build, bot_name, builder_host, builder_port, build_request_id, | 1368 fetch_build, bot_name, builder_host, builder_port, build_request_id, |
1366 build_timeout) | 1369 build_timeout) |
1367 if not target_file: | 1370 if not target_file: |
1368 print '%s [revision: %s]' % (error_msg, svn_revision) | 1371 print '%s [revision: %s]' % (error_msg, svn_revision) |
1369 return None | 1372 return None |
1370 return target_file | 1373 return target_file |
1371 print 'Failed to post build request for revision: [%s]' % svn_revision | 1374 print 'Failed to post build request for revision: [%s]' % svn_revision |
1372 return None | 1375 return None |
1373 | 1376 |
1374 def IsDownloadable(self, depot): | 1377 def IsDownloadable(self, depot): |
1375 """Checks if build is downloadable based on target platform and depot.""" | 1378 """Checks if build can be downloaded based on target platform and depot.""" |
1376 if (self.opts.target_platform in ['chromium', 'android'] and | 1379 if (self.opts.target_platform in ['chromium', 'android'] and |
1377 self.opts.gs_bucket): | 1380 self.opts.gs_bucket): |
1378 return (depot == 'chromium' or | 1381 return (depot == 'chromium' or |
1379 'chromium' in DEPOT_DEPS_NAME[depot]['from'] or | 1382 'chromium' in DEPOT_DEPS_NAME[depot]['from'] or |
1380 'v8' in DEPOT_DEPS_NAME[depot]['from']) | 1383 'v8' in DEPOT_DEPS_NAME[depot]['from']) |
1381 return False | 1384 return False |
1382 | 1385 |
1383 def UpdateDeps(self, revision, depot, deps_file): | 1386 def UpdateDeps(self, revision, depot, deps_file): |
1384 """Updates DEPS file with new revision of dependency repository. | 1387 """Updates DEPS file with new revision of dependency repository. |
1385 | 1388 |
(...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1530 def _IsBisectModeReturnCode(self): | 1533 def _IsBisectModeReturnCode(self): |
1531 return self.opts.bisect_mode in [BISECT_MODE_RETURN_CODE] | 1534 return self.opts.bisect_mode in [BISECT_MODE_RETURN_CODE] |
1532 | 1535 |
1533 def _IsBisectModeStandardDeviation(self): | 1536 def _IsBisectModeStandardDeviation(self): |
1534 return self.opts.bisect_mode in [BISECT_MODE_STD_DEV] | 1537 return self.opts.bisect_mode in [BISECT_MODE_STD_DEV] |
1535 | 1538 |
1536 def GetCompatibleCommand(self, command_to_run, revision, depot): | 1539 def GetCompatibleCommand(self, command_to_run, revision, depot): |
1537 # Prior to crrev.com/274857 *only* android-chromium-testshell | 1540 # Prior to crrev.com/274857 *only* android-chromium-testshell |
1538 # Then until crrev.com/276628 *both* (android-chromium-testshell and | 1541 # Then until crrev.com/276628 *both* (android-chromium-testshell and |
1539 # android-chrome-shell) work. After that rev 276628 *only* | 1542 # android-chrome-shell) work. After that rev 276628 *only* |
1540 # android-chrome-shell works. bisect-perf-reggresion.py script should | 1543 # android-chrome-shell works. bisect-perf-regression.py script should |
1541 # handle these cases and set appropriate browser type based on revision. | 1544 # handle these cases and set appropriate browser type based on revision. |
1542 if self.opts.target_platform in ['android']: | 1545 if self.opts.target_platform in ['android']: |
1543 # When its a third_party depot, get the chromium revision. | 1546 # When its a third_party depot, get the chromium revision. |
1544 if depot != 'chromium': | 1547 if depot != 'chromium': |
1545 revision = bisect_utils.CheckRunGit( | 1548 revision = bisect_utils.CheckRunGit( |
1546 ['rev-parse', 'HEAD'], cwd=self.src_cwd).strip() | 1549 ['rev-parse', 'HEAD'], cwd=self.src_cwd).strip() |
1547 svn_revision = self.source_control.SVNFindRev(revision, cwd=self.src_cwd) | 1550 svn_revision = self.source_control.SVNFindRev(revision, cwd=self.src_cwd) |
1548 if not svn_revision: | 1551 if not svn_revision: |
1549 return command_to_run | 1552 return command_to_run |
1550 cmd_re = re.compile('--browser=(?P<browser_type>\S+)') | 1553 cmd_re = re.compile('--browser=(?P<browser_type>\S+)') |
(...skipping 145 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1696 'std_dev': standard_dev, | 1699 'std_dev': standard_dev, |
1697 'values': metric_values, | 1700 'values': metric_values, |
1698 } | 1701 } |
1699 | 1702 |
1700 print 'Results of performance test: %12f %12f' % ( | 1703 print 'Results of performance test: %12f %12f' % ( |
1701 truncated_mean, standard_err) | 1704 truncated_mean, standard_err) |
1702 print | 1705 print |
1703 return (values, success_code, output_of_all_runs) | 1706 return (values, success_code, output_of_all_runs) |
1704 | 1707 |
1705 def FindAllRevisionsToSync(self, revision, depot): | 1708 def FindAllRevisionsToSync(self, revision, depot): |
1706 """Finds all dependant revisions and depots that need to be synced for a | 1709 """Finds all dependent revisions and depots that need to be synced. |
1707 given revision. This is only useful in the git workflow, as an svn depot | |
1708 may be split into multiple mirrors. | |
1709 | 1710 |
1710 ie. skia is broken up into 3 git mirrors over skia/src, skia/gyp, and | 1711 For example skia is broken up into 3 git mirrors over skia/src, |
1711 skia/include. To sync skia/src properly, one has to find the proper | 1712 skia/gyp, and skia/include. To sync skia/src properly, one has to find |
1712 revisions in skia/gyp and skia/include. | 1713 the proper revisions in skia/gyp and skia/include. |
| 1714 |
| 1715 This is only useful in the git workflow, as an SVN depot may be split into |
| 1716 multiple mirrors. |
1713 | 1717 |
1714 Args: | 1718 Args: |
1715 revision: The revision to sync to. | 1719 revision: The revision to sync to. |
1716 depot: The depot in use at the moment (probably skia). | 1720 depot: The depot in use at the moment (probably skia). |
1717 | 1721 |
1718 Returns: | 1722 Returns: |
1719 A list of [depot, revision] pairs that need to be synced. | 1723 A list of [depot, revision] pairs that need to be synced. |
1720 """ | 1724 """ |
1721 revisions_to_sync = [[depot, revision]] | 1725 revisions_to_sync = [[depot, revision]] |
1722 | 1726 |
1723 is_base = ((depot == 'chromium') or (depot == 'cros') or | 1727 is_base = ((depot == 'chromium') or (depot == 'cros') or |
1724 (depot == 'android-chrome')) | 1728 (depot == 'android-chrome')) |
1725 | 1729 |
1726 # Some SVN depots were split into multiple git depots, so we need to | 1730 # Some SVN depots were split into multiple git depots, so we need to |
1727 # figure out for each mirror which git revision to grab. There's no | 1731 # figure out for each mirror which git revision to grab. There's no |
1728 # guarantee that the SVN revision will exist for each of the dependant | 1732 # guarantee that the SVN revision will exist for each of the dependent |
1729 # depots, so we have to grep the git logs and grab the next earlier one. | 1733 # depots, so we have to grep the git logs and grab the next earlier one. |
1730 if (not is_base | 1734 if (not is_base |
1731 and DEPOT_DEPS_NAME[depot]['depends'] | 1735 and DEPOT_DEPS_NAME[depot]['depends'] |
1732 and self.source_control.IsGit()): | 1736 and self.source_control.IsGit()): |
1733 svn_rev = self.source_control.SVNFindRev(revision) | 1737 svn_rev = self.source_control.SVNFindRev(revision) |
1734 | 1738 |
1735 for d in DEPOT_DEPS_NAME[depot]['depends']: | 1739 for d in DEPOT_DEPS_NAME[depot]['depends']: |
1736 self.ChangeToDepotWorkingDirectory(d) | 1740 self.ChangeToDepotWorkingDirectory(d) |
1737 | 1741 |
1738 dependant_rev = self.source_control.ResolveToRevision( | 1742 dependant_rev = self.source_control.ResolveToRevision( |
1739 svn_rev, d, DEPOT_DEPS_NAME, -1000) | 1743 svn_rev, d, DEPOT_DEPS_NAME, -1000) |
1740 | 1744 |
1741 if dependant_rev: | 1745 if dependant_rev: |
1742 revisions_to_sync.append([d, dependant_rev]) | 1746 revisions_to_sync.append([d, dependant_rev]) |
1743 | 1747 |
1744 num_resolved = len(revisions_to_sync) | 1748 num_resolved = len(revisions_to_sync) |
1745 num_needed = len(DEPOT_DEPS_NAME[depot]['depends']) | 1749 num_needed = len(DEPOT_DEPS_NAME[depot]['depends']) |
1746 | 1750 |
1747 self.ChangeToDepotWorkingDirectory(depot) | 1751 self.ChangeToDepotWorkingDirectory(depot) |
1748 | 1752 |
1749 if not ((num_resolved - 1) == num_needed): | 1753 if not ((num_resolved - 1) == num_needed): |
1750 return None | 1754 return None |
1751 | 1755 |
1752 return revisions_to_sync | 1756 return revisions_to_sync |
1753 | 1757 |
1754 def PerformPreBuildCleanup(self): | 1758 def PerformPreBuildCleanup(self): |
1755 """Performs necessary cleanup between runs.""" | 1759 """Performs cleanup between runs.""" |
1756 print 'Cleaning up between runs.' | 1760 print 'Cleaning up between runs.' |
1757 print | 1761 print |
1758 | 1762 |
1759 # Having these pyc files around between runs can confuse the | 1763 # Leaving these .pyc files around between runs may disrupt some perf tests. |
1760 # perf tests and cause them to crash. | |
1761 for (path, _, files) in os.walk(self.src_cwd): | 1764 for (path, _, files) in os.walk(self.src_cwd): |
1762 for cur_file in files: | 1765 for cur_file in files: |
1763 if cur_file.endswith('.pyc'): | 1766 if cur_file.endswith('.pyc'): |
1764 path_to_file = os.path.join(path, cur_file) | 1767 path_to_file = os.path.join(path, cur_file) |
1765 os.remove(path_to_file) | 1768 os.remove(path_to_file) |
1766 | 1769 |
1767 def PerformWebkitDirectoryCleanup(self, revision): | 1770 def PerformWebkitDirectoryCleanup(self, revision): |
1768 """If the script is switching between Blink and WebKit during bisect, | 1771 """Cleans up the Webkit directory before syncing another revision. |
| 1772 |
| 1773 If the script is switching between Blink and WebKit during bisect, |
1769 its faster to just delete the directory rather than leave it up to git | 1774 its faster to just delete the directory rather than leave it up to git |
1770 to sync. | 1775 to sync. |
1771 | 1776 |
1772 Returns: | 1777 Returns: |
1773 True if successful. | 1778 True if successful. |
1774 """ | 1779 """ |
1775 if not self.source_control.CheckoutFileAtRevision( | 1780 if not self.source_control.CheckoutFileAtRevision( |
1776 bisect_utils.FILE_DEPS_GIT, revision, cwd=self.src_cwd): | 1781 bisect_utils.FILE_DEPS_GIT, revision, cwd=self.src_cwd): |
1777 return False | 1782 return False |
1778 | 1783 |
(...skipping 11 matching lines...) Expand all Loading... |
1790 if self.was_blink != is_blink: | 1795 if self.was_blink != is_blink: |
1791 self.was_blink = is_blink | 1796 self.was_blink = is_blink |
1792 # Removes third_party/Webkit directory. | 1797 # Removes third_party/Webkit directory. |
1793 return bisect_utils.RemoveThirdPartyDirectory('Webkit') | 1798 return bisect_utils.RemoveThirdPartyDirectory('Webkit') |
1794 return True | 1799 return True |
1795 | 1800 |
1796 def PerformCrosChrootCleanup(self): | 1801 def PerformCrosChrootCleanup(self): |
1797 """Deletes the chroot. | 1802 """Deletes the chroot. |
1798 | 1803 |
1799 Returns: | 1804 Returns: |
1800 True if successful. | 1805 True if successful. |
1801 """ | 1806 """ |
1802 cwd = os.getcwd() | 1807 cwd = os.getcwd() |
1803 self.ChangeToDepotWorkingDirectory('cros') | 1808 self.ChangeToDepotWorkingDirectory('cros') |
1804 cmd = [bisect_utils.CROS_SDK_PATH, '--delete'] | 1809 cmd = [bisect_utils.CROS_SDK_PATH, '--delete'] |
1805 return_code = bisect_utils.RunProcess(cmd) | 1810 return_code = bisect_utils.RunProcess(cmd) |
1806 os.chdir(cwd) | 1811 os.chdir(cwd) |
1807 return not return_code | 1812 return not return_code |
1808 | 1813 |
1809 def CreateCrosChroot(self): | 1814 def CreateCrosChroot(self): |
1810 """Creates a new chroot. | 1815 """Creates a new chroot. |
1811 | 1816 |
1812 Returns: | 1817 Returns: |
1813 True if successful. | 1818 True if successful. |
1814 """ | 1819 """ |
1815 cwd = os.getcwd() | 1820 cwd = os.getcwd() |
1816 self.ChangeToDepotWorkingDirectory('cros') | 1821 self.ChangeToDepotWorkingDirectory('cros') |
1817 cmd = [bisect_utils.CROS_SDK_PATH, '--create'] | 1822 cmd = [bisect_utils.CROS_SDK_PATH, '--create'] |
1818 return_code = bisect_utils.RunProcess(cmd) | 1823 return_code = bisect_utils.RunProcess(cmd) |
1819 os.chdir(cwd) | 1824 os.chdir(cwd) |
1820 return not return_code | 1825 return not return_code |
1821 | 1826 |
1822 def PerformPreSyncCleanup(self, revision, depot): | 1827 def PerformPreSyncCleanup(self, revision, depot): |
1823 """Performs any necessary cleanup before syncing. | 1828 """Performs any necessary cleanup before syncing. |
1824 | 1829 |
1825 Returns: | 1830 Returns: |
1826 True if successful. | 1831 True if successful. |
1827 """ | 1832 """ |
1828 if depot == 'chromium' or depot == 'android-chrome': | 1833 if depot == 'chromium' or depot == 'android-chrome': |
1829 # Removes third_party/libjingle. At some point, libjingle was causing | 1834 # Removes third_party/libjingle. At some point, libjingle was causing |
1830 # issues syncing when using the git workflow (crbug.com/266324). | 1835 # issues syncing when using the git workflow (crbug.com/266324). |
1831 os.chdir(self.src_cwd) | 1836 os.chdir(self.src_cwd) |
1832 if not bisect_utils.RemoveThirdPartyDirectory('libjingle'): | 1837 if not bisect_utils.RemoveThirdPartyDirectory('libjingle'): |
1833 return False | 1838 return False |
1834 # Removes third_party/skia. At some point, skia was causing | 1839 # Removes third_party/skia. At some point, skia was causing |
1835 # issues syncing when using the git workflow (crbug.com/377951). | 1840 # issues syncing when using the git workflow (crbug.com/377951). |
1836 if not bisect_utils.RemoveThirdPartyDirectory('skia'): | 1841 if not bisect_utils.RemoveThirdPartyDirectory('skia'): |
1837 return False | 1842 return False |
1838 if depot == 'chromium': | 1843 if depot == 'chromium': |
1839 # The fast webkit cleanup doesn't work for android_chrome | 1844 # The fast webkit cleanup doesn't work for android_chrome |
1840 # The switch from Webkit to Blink that this deals with now happened | 1845 # The switch from Webkit to Blink that this deals with now happened |
1841 # quite a long time ago so this is unlikely to be a problem. | 1846 # quite a long time ago so this is unlikely to be a problem. |
1842 return self.PerformWebkitDirectoryCleanup(revision) | 1847 return self.PerformWebkitDirectoryCleanup(revision) |
1843 elif depot == 'cros': | 1848 elif depot == 'cros': |
1844 return self.PerformCrosChrootCleanup() | 1849 return self.PerformCrosChrootCleanup() |
1845 return True | 1850 return True |
1846 | 1851 |
1847 def RunPostSync(self, depot): | 1852 def RunPostSync(self, depot): |
1848 """Performs any work after syncing. | 1853 """Performs any work after syncing. |
1849 | 1854 |
1850 Returns: | 1855 Returns: |
1851 True if successful. | 1856 True if successful. |
1852 """ | 1857 """ |
1853 if self.opts.target_platform == 'android': | 1858 if self.opts.target_platform == 'android': |
1854 if not builder.SetupAndroidBuildEnvironment(self.opts, | 1859 if not builder.SetupAndroidBuildEnvironment(self.opts, |
1855 path_to_src=self.src_cwd): | 1860 path_to_src=self.src_cwd): |
1856 return False | 1861 return False |
1857 | 1862 |
1858 if depot == 'cros': | 1863 if depot == 'cros': |
1859 return self.CreateCrosChroot() | 1864 return self.CreateCrosChroot() |
1860 else: | 1865 else: |
1861 return self.RunGClientHooks() | 1866 return self.RunGClientHooks() |
1862 return True | 1867 return True |
1863 | 1868 |
1864 def ShouldSkipRevision(self, depot, revision): | 1869 def ShouldSkipRevision(self, depot, revision): |
1865 """Some commits can be safely skipped (such as a DEPS roll), since the tool | 1870 """Checks whether a particular revision can be safely skipped. |
| 1871 |
| 1872 Some commits can be safely skipped (such as a DEPS roll), since the tool |
1866 is git based those changes would have no effect. | 1873 is git based those changes would have no effect. |
1867 | 1874 |
1868 Args: | 1875 Args: |
1869 depot: The depot being bisected. | 1876 depot: The depot being bisected. |
1870 revision: Current revision we're synced to. | 1877 revision: Current revision we're synced to. |
1871 | 1878 |
1872 Returns: | 1879 Returns: |
1873 True if we should skip building/testing this revision. | 1880 True if we should skip building/testing this revision. |
1874 """ | 1881 """ |
1875 if depot == 'chromium': | 1882 if depot == 'chromium': |
(...skipping 24 matching lines...) Expand all Loading... |
1900 """ | 1907 """ |
1901 sync_client = None | 1908 sync_client = None |
1902 if depot == 'chromium' or depot == 'android-chrome': | 1909 if depot == 'chromium' or depot == 'android-chrome': |
1903 sync_client = 'gclient' | 1910 sync_client = 'gclient' |
1904 elif depot == 'cros': | 1911 elif depot == 'cros': |
1905 sync_client = 'repo' | 1912 sync_client = 'repo' |
1906 | 1913 |
1907 revisions_to_sync = self.FindAllRevisionsToSync(revision, depot) | 1914 revisions_to_sync = self.FindAllRevisionsToSync(revision, depot) |
1908 | 1915 |
1909 if not revisions_to_sync: | 1916 if not revisions_to_sync: |
1910 return ('Failed to resolve dependant depots.', BUILD_RESULT_FAIL) | 1917 return ('Failed to resolve dependent depots.', BUILD_RESULT_FAIL) |
1911 | 1918 |
1912 if not self.PerformPreSyncCleanup(revision, depot): | 1919 if not self.PerformPreSyncCleanup(revision, depot): |
1913 return ('Failed to perform pre-sync cleanup.', BUILD_RESULT_FAIL) | 1920 return ('Failed to perform pre-sync cleanup.', BUILD_RESULT_FAIL) |
1914 | 1921 |
1915 success = True | 1922 success = True |
1916 | 1923 |
1917 if not self.opts.debug_ignore_sync: | 1924 if not self.opts.debug_ignore_sync: |
1918 for r in revisions_to_sync: | 1925 for r in revisions_to_sync: |
1919 self.ChangeToDepotWorkingDirectory(r[0]) | 1926 self.ChangeToDepotWorkingDirectory(r[0]) |
1920 | 1927 |
1921 if sync_client: | 1928 if sync_client: |
1922 self.PerformPreBuildCleanup() | 1929 self.PerformPreBuildCleanup() |
1923 | 1930 |
1924 # If you're using gclient to sync, you need to specify the depot you | 1931 # If you're using gclient to sync, you need to specify the depot you |
1925 # want so that all the dependencies sync properly as well. | 1932 # want so that all the dependencies sync properly as well. |
1926 # ie. gclient sync src@<SHA1> | 1933 # i.e. gclient sync src@<SHA1> |
1927 current_revision = r[1] | 1934 current_revision = r[1] |
1928 if sync_client == 'gclient': | 1935 if sync_client == 'gclient': |
1929 current_revision = '%s@%s' % (DEPOT_DEPS_NAME[depot]['src'], | 1936 current_revision = '%s@%s' % (DEPOT_DEPS_NAME[depot]['src'], |
1930 current_revision) | 1937 current_revision) |
1931 if not self.source_control.SyncToRevision(current_revision, | 1938 if not self.source_control.SyncToRevision(current_revision, |
1932 sync_client): | 1939 sync_client): |
1933 success = False | 1940 success = False |
1934 | 1941 |
1935 break | 1942 break |
1936 | 1943 |
1937 if success: | 1944 if success: |
1938 success = self.RunPostSync(depot) | 1945 success = self.RunPostSync(depot) |
1939 if success: | 1946 if success: |
1940 if skippable and self.ShouldSkipRevision(depot, revision): | 1947 if skippable and self.ShouldSkipRevision(depot, revision): |
1941 return ('Skipped revision: [%s]' % str(revision), | 1948 return ('Skipped revision: [%s]' % str(revision), |
1942 BUILD_RESULT_SKIPPED) | 1949 BUILD_RESULT_SKIPPED) |
1943 | 1950 |
1944 start_build_time = time.time() | 1951 start_build_time = time.time() |
1945 if self.BuildCurrentRevision(depot, revision): | 1952 if self.BuildCurrentRevision(depot, revision): |
1946 after_build_time = time.time() | 1953 after_build_time = time.time() |
1947 # Hack to support things that got changed. | 1954 # Hack to support things that got changed. |
1948 command_to_run = self.GetCompatibleCommand( | 1955 command_to_run = self.GetCompatibleCommand( |
1949 command_to_run, revision, depot) | 1956 command_to_run, revision, depot) |
1950 results = self.RunPerformanceTestAndParseResults(command_to_run, | 1957 results = self.RunPerformanceTestAndParseResults(command_to_run, |
1951 metric) | 1958 metric) |
1952 # Restore build output directory once the tests are done, to avoid | 1959 # Restore build output directory once the tests are done, to avoid |
1953 # any descrepancy. | 1960 # any discrepancies. |
1954 if self.IsDownloadable(depot) and revision: | 1961 if self.IsDownloadable(depot) and revision: |
1955 self.BackupOrRestoreOutputdirectory(restore=True) | 1962 self.BackupOrRestoreOutputdirectory(restore=True) |
1956 | 1963 |
1957 if results[1] == 0: | 1964 if results[1] == 0: |
1958 external_revisions = self._Get3rdPartyRevisions(depot) | 1965 external_revisions = self._Get3rdPartyRevisions(depot) |
1959 | 1966 |
1960 if not external_revisions is None: | 1967 if not external_revisions is None: |
1961 return (results[0], results[1], external_revisions, | 1968 return (results[0], results[1], external_revisions, |
1962 time.time() - after_build_time, after_build_time - | 1969 time.time() - after_build_time, after_build_time - |
1963 start_build_time) | 1970 start_build_time) |
(...skipping 266 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2230 | 2237 |
2231 output = bisect_utils.CheckRunGit(cmd, cwd=cwd) | 2238 output = bisect_utils.CheckRunGit(cmd, cwd=cwd) |
2232 good_commit_time = int(output) | 2239 good_commit_time = int(output) |
2233 | 2240 |
2234 cmd = ['log', '--format=%ct', '-1', bad_revision] | 2241 cmd = ['log', '--format=%ct', '-1', bad_revision] |
2235 output = bisect_utils.CheckRunGit(cmd, cwd=cwd) | 2242 output = bisect_utils.CheckRunGit(cmd, cwd=cwd) |
2236 bad_commit_time = int(output) | 2243 bad_commit_time = int(output) |
2237 | 2244 |
2238 return good_commit_time <= bad_commit_time | 2245 return good_commit_time <= bad_commit_time |
2239 else: | 2246 else: |
2240 # Cros/svn use integers | 2247 # CrOS and SVN use integers. |
2241 return int(good_revision) <= int(bad_revision) | 2248 return int(good_revision) <= int(bad_revision) |
2242 | 2249 |
2243 def CanPerformBisect(self, revision_to_check): | 2250 def CanPerformBisect(self, revision_to_check): |
2244 """Checks whether a given revision is bisectable. | 2251 """Checks whether a given revision is bisectable. |
2245 | 2252 |
2246 Note: At present it checks whether a given revision is bisectable on | 2253 Note: At present it checks whether a given revision is bisectable on |
2247 android bots(refer crbug.com/385324). | 2254 android bots(refer crbug.com/385324). |
2248 | 2255 |
2249 Args: | 2256 Args: |
2250 revision_to_check: Known good revision. | 2257 revision_to_check: Known good revision. |
2251 | 2258 |
2252 Returns: | 2259 Returns: |
2253 A dictionary indicating the result. If revision is not bisectable, | 2260 A dictionary indicating the result. If revision is not bisectable, |
2254 this will contain the field "error", otherwise None. | 2261 this will contain the field "error", otherwise None. |
2255 """ | 2262 """ |
2256 if self.opts.target_platform == 'android': | 2263 if self.opts.target_platform == 'android': |
2257 revision_to_check = self.source_control.SVNFindRev(revision_to_check) | 2264 revision_to_check = self.source_control.SVNFindRev(revision_to_check) |
2258 if (bisect_utils.IsStringInt(revision_to_check) | 2265 if (bisect_utils.IsStringInt(revision_to_check) |
2259 and revision_to_check < 265549): | 2266 and revision_to_check < 265549): |
2260 return {'error': ( | 2267 return {'error': ( |
2261 'Bisect cannot conitnue for the given revision range.\n' | 2268 'Bisect cannot continue for the given revision range.\n' |
2262 'It is impossible to bisect Android regressions ' | 2269 'It is impossible to bisect Android regressions ' |
2263 'prior to r265549, which allows the bisect bot to ' | 2270 'prior to r265549, which allows the bisect bot to ' |
2264 'rely on Telemetry to do apk installation of the most recently ' | 2271 'rely on Telemetry to do apk installation of the most recently ' |
2265 'built local ChromeShell(refer to crbug.com/385324).\n' | 2272 'built local ChromeShell(refer to crbug.com/385324).\n' |
2266 'Please try bisecting revisions greater than or equal to r265549.')} | 2273 'Please try bisecting revisions greater than or equal to r265549.')} |
2267 return None | 2274 return None |
2268 | 2275 |
2269 def Run(self, command_to_run, bad_revision_in, good_revision_in, metric): | 2276 def Run(self, command_to_run, bad_revision_in, good_revision_in, metric): |
2270 """Given known good and bad revisions, run a binary search on all | 2277 """Given known good and bad revisions, run a binary search on all |
2271 intermediate revisions to determine the CL where the performance regression | 2278 intermediate revisions to determine the CL where the performance regression |
2272 occurred. | 2279 occurred. |
2273 | 2280 |
2274 Args: | 2281 Args: |
2275 command_to_run: Specify the command to execute the performance test. | 2282 command_to_run: Specify the command to execute the performance test. |
2276 good_revision: Number/tag of the known good revision. | 2283 good_revision: Number/tag of the known good revision. |
2277 bad_revision: Number/tag of the known bad revision. | 2284 bad_revision: Number/tag of the known bad revision. |
2278 metric: The performance metric to monitor. | 2285 metric: The performance metric to monitor. |
2279 | 2286 |
2280 Returns: | 2287 Returns: |
2281 A dict with 2 members, 'revision_data' and 'error'. On success, | 2288 A dict with 2 members, 'revision_data' and 'error'. On success, |
2282 'revision_data' will contain a dict mapping revision ids to | 2289 'revision_data' will contain a dict mapping revision ids to |
2283 data about that revision. Each piece of revision data consists of a | 2290 data about that revision. Each piece of revision data consists of a |
2284 dict with the following keys: | 2291 dict with the following keys: |
2285 | 2292 |
2286 'passed': Represents whether the performance test was successful at | 2293 'passed': Represents whether the performance test was successful at |
2287 that revision. Possible values include: 1 (passed), 0 (failed), | 2294 that revision. Possible values include: 1 (passed), 0 (failed), |
2288 '?' (skipped), 'F' (build failed). | 2295 '?' (skipped), 'F' (build failed). |
2289 'depot': The depot that this revision is from (ie. WebKit) | 2296 'depot': The depot that this revision is from (i.e. WebKit) |
2290 'external': If the revision is a 'src' revision, 'external' contains | 2297 'external': If the revision is a 'src' revision, 'external' contains |
2291 the revisions of each of the external libraries. | 2298 the revisions of each of the external libraries. |
2292 'sort': A sort value for sorting the dict in order of commits. | 2299 'sort': A sort value for sorting the dict in order of commits. |
2293 | 2300 |
2294 For example: | 2301 For example: |
2295 { | 2302 { |
2296 'error':None, | 2303 'error':None, |
2297 'revision_data': | 2304 'revision_data': |
2298 { | 2305 { |
2299 'CL #1': | 2306 'CL #1': |
2300 { | 2307 { |
2301 'passed':False, | 2308 'passed': False, |
2302 'depot':'chromium', | 2309 'depot': 'chromium', |
2303 'external':None, | 2310 'external': None, |
2304 'sort':0 | 2311 'sort': 0 |
2305 } | 2312 } |
2306 } | 2313 } |
2307 } | 2314 } |
2308 | 2315 |
2309 If an error occurred, the 'error' field will contain the message and | 2316 If an error occurred, the 'error' field will contain the message and |
2310 'revision_data' will be empty. | 2317 'revision_data' will be empty. |
2311 """ | 2318 """ |
2312 results = { | 2319 results = { |
2313 'revision_data' : {}, | 2320 'revision_data' : {}, |
2314 'error' : None, | 2321 'error' : None, |
2315 } | 2322 } |
2316 | 2323 |
2317 # Choose depot to bisect first | 2324 # Choose depot to bisect first |
2318 target_depot = 'chromium' | 2325 target_depot = 'chromium' |
2319 if self.opts.target_platform == 'cros': | 2326 if self.opts.target_platform == 'cros': |
2320 target_depot = 'cros' | 2327 target_depot = 'cros' |
2321 elif self.opts.target_platform == 'android-chrome': | 2328 elif self.opts.target_platform == 'android-chrome': |
2322 target_depot = 'android-chrome' | 2329 target_depot = 'android-chrome' |
2323 | 2330 |
2324 cwd = os.getcwd() | 2331 cwd = os.getcwd() |
2325 self.ChangeToDepotWorkingDirectory(target_depot) | 2332 self.ChangeToDepotWorkingDirectory(target_depot) |
2326 | 2333 |
2327 # If they passed SVN CL's, etc... we can try match them to git SHA1's. | 2334 # If they passed SVN revisions, we can try match them to git SHA1 hashes. |
2328 bad_revision = self.source_control.ResolveToRevision( | 2335 bad_revision = self.source_control.ResolveToRevision( |
2329 bad_revision_in, target_depot, DEPOT_DEPS_NAME, 100) | 2336 bad_revision_in, target_depot, DEPOT_DEPS_NAME, 100) |
2330 good_revision = self.source_control.ResolveToRevision( | 2337 good_revision = self.source_control.ResolveToRevision( |
2331 good_revision_in, target_depot, DEPOT_DEPS_NAME, -100) | 2338 good_revision_in, target_depot, DEPOT_DEPS_NAME, -100) |
2332 | 2339 |
2333 os.chdir(cwd) | 2340 os.chdir(cwd) |
2334 | 2341 |
2335 if bad_revision is None: | 2342 if bad_revision is None: |
2336 results['error'] = 'Could\'t resolve [%s] to SHA1.' % (bad_revision_in,) | 2343 results['error'] = 'Couldn\'t resolve [%s] to SHA1.' % bad_revision_in |
2337 return results | 2344 return results |
2338 | 2345 |
2339 if good_revision is None: | 2346 if good_revision is None: |
2340 results['error'] = 'Could\'t resolve [%s] to SHA1.' % (good_revision_in,) | 2347 results['error'] = 'Couldn\'t resolve [%s] to SHA1.' % good_revision_in |
2341 return results | 2348 return results |
2342 | 2349 |
2343 # Check that they didn't accidentally swap good and bad revisions. | 2350 # Check that they didn't accidentally swap good and bad revisions. |
2344 if not self.CheckIfRevisionsInProperOrder( | 2351 if not self.CheckIfRevisionsInProperOrder( |
2345 target_depot, good_revision, bad_revision): | 2352 target_depot, good_revision, bad_revision): |
2346 results['error'] = ('bad_revision < good_revision, did you swap these ' | 2353 results['error'] = ('bad_revision < good_revision, did you swap these ' |
2347 'by mistake?') | 2354 'by mistake?') |
2348 return results | 2355 return results |
2349 | 2356 |
2350 bad_revision, good_revision = self.NudgeRevisionsIfDEPSChange( | 2357 bad_revision, good_revision = self.NudgeRevisionsIfDEPSChange( |
(...skipping 264 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2615 return '' | 2622 return '' |
2616 | 2623 |
2617 def _PrintRevisionInfo(self, cl, info, depot=None): | 2624 def _PrintRevisionInfo(self, cl, info, depot=None): |
2618 email_info = '' | 2625 email_info = '' |
2619 if not info['email'].startswith(info['author']): | 2626 if not info['email'].startswith(info['author']): |
2620 email_info = '\nEmail : %s' % info['email'] | 2627 email_info = '\nEmail : %s' % info['email'] |
2621 commit_link = self._GetViewVCLinkFromDepotAndHash(cl, depot) | 2628 commit_link = self._GetViewVCLinkFromDepotAndHash(cl, depot) |
2622 if commit_link: | 2629 if commit_link: |
2623 commit_info = '\nLink : %s' % commit_link | 2630 commit_info = '\nLink : %s' % commit_link |
2624 else: | 2631 else: |
2625 commit_info = ('\nFailed to parse svn revision from body:\n%s' % | 2632 commit_info = ('\nFailed to parse SVN revision from body:\n%s' % |
2626 info['body']) | 2633 info['body']) |
2627 print RESULTS_REVISION_INFO % { | 2634 print RESULTS_REVISION_INFO % { |
2628 'subject': info['subject'], | 2635 'subject': info['subject'], |
2629 'author': info['author'], | 2636 'author': info['author'], |
2630 'email_info': email_info, | 2637 'email_info': email_info, |
2631 'commit_info': commit_info, | 2638 'commit_info': commit_info, |
2632 'cl': cl, | 2639 'cl': cl, |
2633 'cl_date': info['date'] | 2640 'cl_date': info['date'] |
2634 } | 2641 } |
2635 | 2642 |
(...skipping 417 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3053 | 3060 |
3054 @staticmethod | 3061 @staticmethod |
3055 def _CreateCommandLineParser(): | 3062 def _CreateCommandLineParser(): |
3056 """Creates a parser with bisect options. | 3063 """Creates a parser with bisect options. |
3057 | 3064 |
3058 Returns: | 3065 Returns: |
3059 An instance of optparse.OptionParser. | 3066 An instance of optparse.OptionParser. |
3060 """ | 3067 """ |
3061 usage = ('%prog [options] [-- chromium-options]\n' | 3068 usage = ('%prog [options] [-- chromium-options]\n' |
3062 'Perform binary search on revision history to find a minimal ' | 3069 'Perform binary search on revision history to find a minimal ' |
3063 'range of revisions where a peformance metric regressed.\n') | 3070 'range of revisions where a performance metric regressed.\n') |
3064 | 3071 |
3065 parser = optparse.OptionParser(usage=usage) | 3072 parser = optparse.OptionParser(usage=usage) |
3066 | 3073 |
3067 group = optparse.OptionGroup(parser, 'Bisect options') | 3074 group = optparse.OptionGroup(parser, 'Bisect options') |
3068 group.add_option('-c', '--command', | 3075 group.add_option('-c', '--command', |
3069 type='str', | 3076 type='str', |
3070 help='A command to execute your performance test at' + | 3077 help='A command to execute your performance test at' + |
3071 ' each point in the bisection.') | 3078 ' each point in the bisection.') |
3072 group.add_option('-b', '--bad_revision', | 3079 group.add_option('-b', '--bad_revision', |
3073 type='str', | 3080 type='str', |
(...skipping 147 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3221 if not opts.bad_revision: | 3228 if not opts.bad_revision: |
3222 raise RuntimeError('missing required parameter: --bad_revision') | 3229 raise RuntimeError('missing required parameter: --bad_revision') |
3223 | 3230 |
3224 if not opts.metric and opts.bisect_mode != BISECT_MODE_RETURN_CODE: | 3231 if not opts.metric and opts.bisect_mode != BISECT_MODE_RETURN_CODE: |
3225 raise RuntimeError('missing required parameter: --metric') | 3232 raise RuntimeError('missing required parameter: --metric') |
3226 | 3233 |
3227 if opts.gs_bucket: | 3234 if opts.gs_bucket: |
3228 if not cloud_storage.List(opts.gs_bucket): | 3235 if not cloud_storage.List(opts.gs_bucket): |
3229 raise RuntimeError('Invalid Google Storage: gs://%s' % opts.gs_bucket) | 3236 raise RuntimeError('Invalid Google Storage: gs://%s' % opts.gs_bucket) |
3230 if not opts.builder_host: | 3237 if not opts.builder_host: |
3231 raise RuntimeError('Must specify try server hostname, when ' | 3238 raise RuntimeError('Must specify try server host name using ' |
3232 'gs_bucket is used: --builder_host') | 3239 '--builder_host when gs_bucket is used.') |
3233 if not opts.builder_port: | 3240 if not opts.builder_port: |
3234 raise RuntimeError('Must specify try server port number, when ' | 3241 raise RuntimeError('Must specify try server port number using ' |
3235 'gs_bucket is used: --builder_port') | 3242 '--builder_port when gs_bucket is used.') |
3236 if opts.target_platform == 'cros': | 3243 if opts.target_platform == 'cros': |
3237 # Run sudo up front to make sure credentials are cached for later. | 3244 # Run sudo up front to make sure credentials are cached for later. |
3238 print 'Sudo is required to build cros:' | 3245 print 'Sudo is required to build cros:' |
3239 print | 3246 print |
3240 bisect_utils.RunProcess(['sudo', 'true']) | 3247 bisect_utils.RunProcess(['sudo', 'true']) |
3241 | 3248 |
3242 if not opts.cros_board: | 3249 if not opts.cros_board: |
3243 raise RuntimeError('missing required parameter: --cros_board') | 3250 raise RuntimeError('missing required parameter: --cros_board') |
3244 | 3251 |
3245 if not opts.cros_remote_ip: | 3252 if not opts.cros_remote_ip: |
(...skipping 18 matching lines...) Expand all Loading... |
3264 setattr(self, k, v) | 3271 setattr(self, k, v) |
3265 except RuntimeError, e: | 3272 except RuntimeError, e: |
3266 output_string = StringIO.StringIO() | 3273 output_string = StringIO.StringIO() |
3267 parser.print_help(file=output_string) | 3274 parser.print_help(file=output_string) |
3268 error_message = '%s\n\n%s' % (e.message, output_string.getvalue()) | 3275 error_message = '%s\n\n%s' % (e.message, output_string.getvalue()) |
3269 output_string.close() | 3276 output_string.close() |
3270 raise RuntimeError(error_message) | 3277 raise RuntimeError(error_message) |
3271 | 3278 |
3272 @staticmethod | 3279 @staticmethod |
3273 def FromDict(values): | 3280 def FromDict(values): |
3274 """Creates an instance of BisectOptions with the values parsed from a | 3281 """Creates an instance of BisectOptions from a dictionary. |
3275 .cfg file. | |
3276 | 3282 |
3277 Args: | 3283 Args: |
3278 values: a dict containing options to set. | 3284 values: a dict containing options to set. |
3279 | 3285 |
3280 Returns: | 3286 Returns: |
3281 An instance of BisectOptions. | 3287 An instance of BisectOptions. |
3282 """ | 3288 """ |
3283 opts = BisectOptions() | 3289 opts = BisectOptions() |
3284 for k, v in values.iteritems(): | 3290 for k, v in values.iteritems(): |
3285 assert hasattr(opts, k), 'Invalid %s attribute in BisectOptions.' % k | 3291 assert hasattr(opts, k), 'Invalid %s attribute in BisectOptions.' % k |
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3356 # bugs. If you change this, please update the perf dashboard as well. | 3362 # bugs. If you change this, please update the perf dashboard as well. |
3357 bisect_utils.OutputAnnotationStepStart('Results') | 3363 bisect_utils.OutputAnnotationStepStart('Results') |
3358 print 'Error: %s' % e.message | 3364 print 'Error: %s' % e.message |
3359 if opts.output_buildbot_annotations: | 3365 if opts.output_buildbot_annotations: |
3360 bisect_utils.OutputAnnotationStepClosed() | 3366 bisect_utils.OutputAnnotationStepClosed() |
3361 return 1 | 3367 return 1 |
3362 | 3368 |
3363 | 3369 |
3364 if __name__ == '__main__': | 3370 if __name__ == '__main__': |
3365 sys.exit(main()) | 3371 sys.exit(main()) |
OLD | NEW |