OLD | NEW |
---|---|
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 # Copyright (c) 2013 The Chromium Authors. All rights reserved. | 2 # Copyright (c) 2013 The Chromium Authors. All rights reserved. |
3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
5 | 5 |
6 """Performance Test Bisect Tool | 6 """Performance Test Bisect Tool |
7 | 7 |
8 This script bisects a series of changelists using binary search. It starts at | 8 This script bisects a series of changelists using binary search. It starts at |
9 a bad revision where a performance metric has regressed, and asks for a last | 9 a bad revision where a performance metric has regressed, and asks for a last |
10 known-good revision. It will then binary search across this revision range by | 10 known-good revision. It will then binary search across this revision range by |
(...skipping 18 matching lines...) Expand all Loading... | |
29 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\ | 29 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\ |
30 -g 1f6e67861535121c5c819c16a666f2436c207e7b\ | 30 -g 1f6e67861535121c5c819c16a666f2436c207e7b\ |
31 -b b732f23b4f81c382db0b23b9035f3dadc7d925bb\ | 31 -b b732f23b4f81c382db0b23b9035f3dadc7d925bb\ |
32 -m shutdown/simple-user-quit | 32 -m shutdown/simple-user-quit |
33 """ | 33 """ |
34 | 34 |
35 import copy | 35 import copy |
36 import datetime | 36 import datetime |
37 import errno | 37 import errno |
38 import hashlib | 38 import hashlib |
39 import logging | |
39 import optparse | 40 import optparse |
40 import os | 41 import os |
41 import re | 42 import re |
42 import shlex | 43 import shlex |
43 import shutil | 44 import shutil |
44 import StringIO | 45 import StringIO |
45 import sys | 46 import sys |
46 import time | 47 import time |
47 import zipfile | 48 import zipfile |
48 | 49 |
49 sys.path.append(os.path.join( | 50 sys.path.append(os.path.join( |
50 os.path.dirname(__file__), os.path.pardir, 'telemetry')) | 51 os.path.dirname(__file__), os.path.pardir, 'telemetry')) |
51 | 52 |
52 from bisect_results import BisectResults | 53 from bisect_results import BisectResults |
53 from bisect_results import ConfidenceScore | 54 from bisect_results import ConfidenceScore |
54 import bisect_utils | 55 import bisect_utils |
55 import builder | 56 import builder |
56 import math_utils | 57 import math_utils |
57 import request_build | 58 import request_build |
58 import source_control | 59 import source_control |
59 from telemetry.util import cloud_storage | 60 from telemetry.util import cloud_storage |
60 | 61 |
62 | |
qyearsley
2014/10/20 18:26:00
I think this blank line isn't necessary according
RobertoCN
2014/10/20 21:01:03
Done.
| |
61 # Below is the map of "depot" names to information about each depot. Each depot | 63 # Below is the map of "depot" names to information about each depot. Each depot |
62 # is a repository, and in the process of bisecting, revision ranges in these | 64 # is a repository, and in the process of bisecting, revision ranges in these |
63 # repositories may also be bisected. | 65 # repositories may also be bisected. |
64 # | 66 # |
65 # Each depot information dictionary may contain: | 67 # Each depot information dictionary may contain: |
66 # src: Path to the working directory. | 68 # src: Path to the working directory. |
67 # recurse: True if this repository will get bisected. | 69 # recurse: True if this repository will get bisected. |
68 # depends: A list of other repositories that are actually part of the same | 70 # depends: A list of other repositories that are actually part of the same |
69 # repository in svn. If the repository has any dependent repositories | 71 # repository in svn. If the repository has any dependent repositories |
70 # (e.g. skia/src needs skia/include and skia/gyp to be updated), then | 72 # (e.g. skia/src needs skia/include and skia/gyp to be updated), then |
(...skipping 266 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
337 bucket_name: Google Storage bucket name. | 339 bucket_name: Google Storage bucket name. |
338 source_path: Source file path. | 340 source_path: Source file path. |
339 destination_path: Destination file path. | 341 destination_path: Destination file path. |
340 | 342 |
341 Returns: | 343 Returns: |
342 Downloaded file path if exists, otherwise None. | 344 Downloaded file path if exists, otherwise None. |
343 """ | 345 """ |
344 target_file = os.path.join(destination_path, os.path.basename(source_path)) | 346 target_file = os.path.join(destination_path, os.path.basename(source_path)) |
345 try: | 347 try: |
346 if cloud_storage.Exists(bucket_name, source_path): | 348 if cloud_storage.Exists(bucket_name, source_path): |
347 print 'Fetching file from gs//%s/%s ...' % (bucket_name, source_path) | 349 logging.info('Fetching file from gs//%s/%s ...', |
350 bucket_name, source_path) | |
348 cloud_storage.Get(bucket_name, source_path, destination_path) | 351 cloud_storage.Get(bucket_name, source_path, destination_path) |
349 if os.path.exists(target_file): | 352 if os.path.exists(target_file): |
350 return target_file | 353 return target_file |
351 else: | 354 else: |
352 print ('File gs://%s/%s not found in cloud storage.' % ( | 355 logging.info('File gs://%s/%s not found in cloud storage.', |
353 bucket_name, source_path)) | 356 bucket_name, source_path) |
354 except Exception as e: | 357 except Exception as e: |
355 print 'Something went wrong while fetching file from cloud: %s' % e | 358 logging.warn('Something went wrong while fetching file from cloud: %s', e) |
356 if os.path.exists(target_file): | 359 if os.path.exists(target_file): |
357 os.remove(target_file) | 360 os.remove(target_file) |
358 return None | 361 return None |
359 | 362 |
360 | 363 |
361 # This is copied from build/scripts/common/chromium_utils.py. | 364 # This is copied from build/scripts/common/chromium_utils.py. |
362 def MaybeMakeDirectory(*path): | 365 def MaybeMakeDirectory(*path): |
363 """Creates an entire path, if it doesn't already exist.""" | 366 """Creates an entire path, if it doesn't already exist.""" |
364 file_path = os.path.join(*path) | 367 file_path = os.path.join(*path) |
365 try: | 368 try: |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
402 command = unzip_cmd + [filepath] | 405 command = unzip_cmd + [filepath] |
403 result = bisect_utils.RunProcess(command) | 406 result = bisect_utils.RunProcess(command) |
404 os.chdir(saved_dir) | 407 os.chdir(saved_dir) |
405 if result: | 408 if result: |
406 raise IOError('unzip failed: %s => %s' % (str(command), result)) | 409 raise IOError('unzip failed: %s => %s' % (str(command), result)) |
407 else: | 410 else: |
408 assert bisect_utils.IsWindowsHost() or bisect_utils.IsMacHost() | 411 assert bisect_utils.IsWindowsHost() or bisect_utils.IsMacHost() |
409 zf = zipfile.ZipFile(filename) | 412 zf = zipfile.ZipFile(filename) |
410 for name in zf.namelist(): | 413 for name in zf.namelist(): |
411 if verbose: | 414 if verbose: |
412 print 'Extracting %s' % name | 415 logging.info('Extracting %s', name) |
413 zf.extract(name, output_dir) | 416 zf.extract(name, output_dir) |
414 if bisect_utils.IsMacHost(): | 417 if bisect_utils.IsMacHost(): |
415 # Restore permission bits. | 418 # Restore permission bits. |
416 os.chmod(os.path.join(output_dir, name), | 419 os.chmod(os.path.join(output_dir, name), |
417 zf.getinfo(name).external_attr >> 16L) | 420 zf.getinfo(name).external_attr >> 16L) |
418 | 421 |
419 | 422 |
420 def WriteStringToFile(text, file_name): | 423 def WriteStringToFile(text, file_name): |
421 """Writes text to a file, raising an RuntimeError on failure.""" | 424 """Writes text to a file, raising an RuntimeError on failure.""" |
422 try: | 425 try: |
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
516 # Note: Build is treated as PENDING if build number is not found | 519 # Note: Build is treated as PENDING if build number is not found |
517 # on the the try server. | 520 # on the the try server. |
518 build_status, status_link = request_build.GetBuildStatus( | 521 build_status, status_link = request_build.GetBuildStatus( |
519 build_num, bot_name, builder_host, builder_port) | 522 build_num, bot_name, builder_host, builder_port) |
520 if build_status == request_build.FAILED: | 523 if build_status == request_build.FAILED: |
521 return (None, 'Failed to produce build, log: %s' % status_link) | 524 return (None, 'Failed to produce build, log: %s' % status_link) |
522 elapsed_time = time.time() - start_time | 525 elapsed_time = time.time() - start_time |
523 if elapsed_time > max_timeout: | 526 if elapsed_time > max_timeout: |
524 return (None, 'Timed out: %ss without build' % max_timeout) | 527 return (None, 'Timed out: %ss without build' % max_timeout) |
525 | 528 |
526 print 'Time elapsed: %ss without build.' % elapsed_time | 529 logging.info('Time elapsed: %ss without build.', elapsed_time) |
527 time.sleep(poll_interval) | 530 time.sleep(poll_interval) |
528 # For some reason, mac bisect bots were not flushing stdout periodically. | 531 # For some reason, mac bisect bots were not flushing stdout periodically. |
529 # As a result buildbot command is timed-out. Flush stdout on all platforms | 532 # As a result buildbot command is timed-out. Flush stdout on all platforms |
530 # while waiting for build. | 533 # while waiting for build. |
531 sys.stdout.flush() | 534 sys.stdout.flush() |
532 | 535 |
533 | 536 |
534 def _UpdateV8Branch(deps_content): | 537 def _UpdateV8Branch(deps_content): |
535 """Updates V8 branch in DEPS file to process v8_bleeding_edge. | 538 """Updates V8 branch in DEPS file to process v8_bleeding_edge. |
536 | 539 |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
587 new_data = re.sub(angle_rev_pattern, revision, deps_contents) | 590 new_data = re.sub(angle_rev_pattern, revision, deps_contents) |
588 else: | 591 else: |
589 # Check whether the depot and revision pattern in DEPS file deps | 592 # Check whether the depot and revision pattern in DEPS file deps |
590 # variable. e.g., | 593 # variable. e.g., |
591 # "src/third_party/angle": Var("chromium_git") + | 594 # "src/third_party/angle": Var("chromium_git") + |
592 # "/angle/angle.git@fa63e947cb3eccf463648d21a05d5002c9b8adfa",. | 595 # "/angle/angle.git@fa63e947cb3eccf463648d21a05d5002c9b8adfa",. |
593 angle_rev_pattern = re.compile( | 596 angle_rev_pattern = re.compile( |
594 r'(?<=angle\.git@)([a-fA-F0-9]{40})(?=")', re.MULTILINE) | 597 r'(?<=angle\.git@)([a-fA-F0-9]{40})(?=")', re.MULTILINE) |
595 match = re.search(angle_rev_pattern, deps_contents) | 598 match = re.search(angle_rev_pattern, deps_contents) |
596 if not match: | 599 if not match: |
597 print 'Could not find angle revision information in DEPS file.' | 600 logging.info('Could not find angle revision information in DEPS file.') |
598 return False | 601 return False |
599 new_data = re.sub(angle_rev_pattern, revision, deps_contents) | 602 new_data = re.sub(angle_rev_pattern, revision, deps_contents) |
600 # Write changes to DEPS file | 603 # Write changes to DEPS file |
601 WriteStringToFile(new_data, deps_file) | 604 WriteStringToFile(new_data, deps_file) |
602 return True | 605 return True |
603 except IOError, e: | 606 except IOError, e: |
604 print 'Something went wrong while updating DEPS file, %s' % e | 607 logging.warn('Something went wrong while updating DEPS file, %s', e) |
605 return False | 608 return False |
606 | 609 |
607 | 610 |
608 def _TryParseHistogramValuesFromOutput(metric, text): | 611 def _TryParseHistogramValuesFromOutput(metric, text): |
609 """Attempts to parse a metric in the format HISTOGRAM <graph: <trace>. | 612 """Attempts to parse a metric in the format HISTOGRAM <graph: <trace>. |
610 | 613 |
611 Args: | 614 Args: |
612 metric: The metric as a list of [<trace>, <value>] strings. | 615 metric: The metric as a list of [<trace>, <value>] strings. |
613 text: The text to parse the metric values from. | 616 text: The text to parse the metric values from. |
614 | 617 |
(...skipping 318 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
933 '-n', bisect_job_name, | 936 '-n', bisect_job_name, |
934 '--svn_repo=%s' % SVN_REPO_URL, | 937 '--svn_repo=%s' % SVN_REPO_URL, |
935 '--diff=%s' % patch_content | 938 '--diff=%s' % patch_content |
936 ] | 939 ] |
937 # Execute try job to build revision. | 940 # Execute try job to build revision. |
938 output, returncode = bisect_utils.RunGit(try_cmd) | 941 output, returncode = bisect_utils.RunGit(try_cmd) |
939 | 942 |
940 if returncode: | 943 if returncode: |
941 raise RunGitError('Could not execute tryjob: %s.\n Error: %s' % ( | 944 raise RunGitError('Could not execute tryjob: %s.\n Error: %s' % ( |
942 'git %s' % ' '.join(try_cmd), output)) | 945 'git %s' % ' '.join(try_cmd), output)) |
943 print ('Try job successfully submitted.\n TryJob Details: %s\n%s' % ( | 946 logging.info('Try job successfully submitted.\n TryJob Details: %s\n%s', |
944 'git %s' % ' '.join(try_cmd), output)) | 947 'git %s' % ' '.join(try_cmd), output) |
945 finally: | 948 finally: |
946 # Delete patch file if exists | 949 # Delete patch file if exists |
947 try: | 950 try: |
948 os.remove(BISECT_PATCH_FILE) | 951 os.remove(BISECT_PATCH_FILE) |
949 except OSError as e: | 952 except OSError as e: |
950 if e.errno != errno.ENOENT: | 953 if e.errno != errno.ENOENT: |
951 raise | 954 raise |
952 # Checkout master branch and delete bisect-tryjob branch. | 955 # Checkout master branch and delete bisect-tryjob branch. |
953 bisect_utils.RunGit(['checkout', '-f', BISECT_MASTER_BRANCH]) | 956 bisect_utils.RunGit(['checkout', '-f', BISECT_MASTER_BRANCH]) |
954 bisect_utils.RunGit(['branch', '-D', BISECT_TRYJOB_BRANCH]) | 957 bisect_utils.RunGit(['branch', '-D', BISECT_TRYJOB_BRANCH]) |
(...skipping 195 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1150 self.warnings.append(warning_text) | 1153 self.warnings.append(warning_text) |
1151 else: | 1154 else: |
1152 results[depot_name] = None | 1155 results[depot_name] = None |
1153 return results | 1156 return results |
1154 except ImportError: | 1157 except ImportError: |
1155 deps_file_contents = ReadStringFromFile(deps_file) | 1158 deps_file_contents = ReadStringFromFile(deps_file) |
1156 parse_results = _ParseRevisionsFromDEPSFileManually(deps_file_contents) | 1159 parse_results = _ParseRevisionsFromDEPSFileManually(deps_file_contents) |
1157 results = {} | 1160 results = {} |
1158 for depot_name, depot_revision in parse_results.iteritems(): | 1161 for depot_name, depot_revision in parse_results.iteritems(): |
1159 depot_revision = depot_revision.strip('@') | 1162 depot_revision = depot_revision.strip('@') |
1160 print depot_name, depot_revision | 1163 logging.warn(depot_name, depot_revision) |
1161 for current_name, current_data in DEPOT_DEPS_NAME.iteritems(): | 1164 for current_name, current_data in DEPOT_DEPS_NAME.iteritems(): |
1162 if (current_data.has_key('deps_var') and | 1165 if (current_data.has_key('deps_var') and |
1163 current_data['deps_var'] == depot_name): | 1166 current_data['deps_var'] == depot_name): |
1164 src_name = current_name | 1167 src_name = current_name |
1165 results[src_name] = depot_revision | 1168 results[src_name] = depot_revision |
1166 break | 1169 break |
1167 return results | 1170 return results |
1168 | 1171 |
1169 def _Get3rdPartyRevisions(self, depot): | 1172 def _Get3rdPartyRevisions(self, depot): |
1170 """Parses the DEPS file to determine WebKit/v8/etc... versions. | 1173 """Parses the DEPS file to determine WebKit/v8/etc... versions. |
(...skipping 172 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1343 target_build_output_dir = os.path.join(abs_build_dir, build_type) | 1346 target_build_output_dir = os.path.join(abs_build_dir, build_type) |
1344 ExtractZip(downloaded_file, abs_build_dir) | 1347 ExtractZip(downloaded_file, abs_build_dir) |
1345 if not os.path.exists(output_dir): | 1348 if not os.path.exists(output_dir): |
1346 # Due to recipe changes, the builds extract folder contains | 1349 # Due to recipe changes, the builds extract folder contains |
1347 # out/Release instead of full-build-<platform>/Release. | 1350 # out/Release instead of full-build-<platform>/Release. |
1348 if os.path.exists(os.path.join(abs_build_dir, 'out', build_type)): | 1351 if os.path.exists(os.path.join(abs_build_dir, 'out', build_type)): |
1349 output_dir = os.path.join(abs_build_dir, 'out', build_type) | 1352 output_dir = os.path.join(abs_build_dir, 'out', build_type) |
1350 else: | 1353 else: |
1351 raise IOError('Missing extracted folder %s ' % output_dir) | 1354 raise IOError('Missing extracted folder %s ' % output_dir) |
1352 | 1355 |
1353 print 'Moving build from %s to %s' % ( | 1356 logging.info('Moving build from %s to %s', |
1354 output_dir, target_build_output_dir) | 1357 output_dir, target_build_output_dir) |
1355 shutil.move(output_dir, target_build_output_dir) | 1358 shutil.move(output_dir, target_build_output_dir) |
1356 return True | 1359 return True |
1357 except Exception as e: | 1360 except Exception as e: |
1358 print 'Something went wrong while extracting archive file: %s' % e | 1361 logging.info('Something went wrong while extracting archive file: %s', e) |
1359 self.BackupOrRestoreOutputDirectory(restore=True) | 1362 self.BackupOrRestoreOutputDirectory(restore=True) |
1360 # Cleanup any leftovers from unzipping. | 1363 # Cleanup any leftovers from unzipping. |
1361 if os.path.exists(output_dir): | 1364 if os.path.exists(output_dir): |
1362 RemoveDirectoryTree(output_dir) | 1365 RemoveDirectoryTree(output_dir) |
1363 finally: | 1366 finally: |
1364 # Delete downloaded archive | 1367 # Delete downloaded archive |
1365 if os.path.exists(downloaded_file): | 1368 if os.path.exists(downloaded_file): |
1366 os.remove(downloaded_file) | 1369 os.remove(downloaded_file) |
1367 return False | 1370 return False |
1368 | 1371 |
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1413 bot_name, build_timeout = GetBuilderNameAndBuildTime( | 1416 bot_name, build_timeout = GetBuilderNameAndBuildTime( |
1414 self.opts.target_platform, self.opts.target_arch) | 1417 self.opts.target_platform, self.opts.target_arch) |
1415 target_file = None | 1418 target_file = None |
1416 try: | 1419 try: |
1417 # Execute try job request to build revision with patch. | 1420 # Execute try job request to build revision with patch. |
1418 _BuilderTryjob(git_revision, bot_name, build_request_id, patch) | 1421 _BuilderTryjob(git_revision, bot_name, build_request_id, patch) |
1419 target_file, error_msg = _WaitUntilBuildIsReady( | 1422 target_file, error_msg = _WaitUntilBuildIsReady( |
1420 fetch_build, bot_name, self.opts.builder_host, | 1423 fetch_build, bot_name, self.opts.builder_host, |
1421 self.opts.builder_port, build_request_id, build_timeout) | 1424 self.opts.builder_port, build_request_id, build_timeout) |
1422 if not target_file: | 1425 if not target_file: |
1423 print '%s [revision: %s]' % (error_msg, git_revision) | 1426 logging.warn('%s [revision: %s]', error_msg, git_revision) |
1424 except RunGitError as e: | 1427 except RunGitError as e: |
1425 print ('Failed to post builder try job for revision: [%s].\n' | 1428 logging.warn('Failed to post builder try job for revision: [%s].\n' |
1426 'Error: %s' % (git_revision, e)) | 1429 'Error: %s', git_revision, e) |
1427 | 1430 |
1428 return target_file | 1431 return target_file |
1429 | 1432 |
1430 def IsDownloadable(self, depot): | 1433 def IsDownloadable(self, depot): |
1431 """Checks if build can be downloaded based on target platform and depot.""" | 1434 """Checks if build can be downloaded based on target platform and depot.""" |
1432 if (self.opts.target_platform in ['chromium', 'android'] and | 1435 if (self.opts.target_platform in ['chromium', 'android'] and |
1433 self.opts.gs_bucket): | 1436 self.opts.gs_bucket): |
1434 return (depot == 'chromium' or | 1437 return (depot == 'chromium' or |
1435 'chromium' in DEPOT_DEPS_NAME[depot]['from'] or | 1438 'chromium' in DEPOT_DEPS_NAME[depot]['from'] or |
1436 'v8' in DEPOT_DEPS_NAME[depot]['from']) | 1439 'v8' in DEPOT_DEPS_NAME[depot]['from']) |
(...skipping 13 matching lines...) Expand all Loading... | |
1450 """ | 1453 """ |
1451 # Check whether the depot and revision pattern in DEPS file vars | 1454 # Check whether the depot and revision pattern in DEPS file vars |
1452 # e.g. for webkit the format is "webkit_revision": "12345". | 1455 # e.g. for webkit the format is "webkit_revision": "12345". |
1453 deps_revision = re.compile(r'(?<="%s": ")([0-9]+)(?=")' % deps_key, | 1456 deps_revision = re.compile(r'(?<="%s": ")([0-9]+)(?=")' % deps_key, |
1454 re.MULTILINE) | 1457 re.MULTILINE) |
1455 new_data = None | 1458 new_data = None |
1456 if re.search(deps_revision, deps_contents): | 1459 if re.search(deps_revision, deps_contents): |
1457 commit_position = source_control.GetCommitPosition( | 1460 commit_position = source_control.GetCommitPosition( |
1458 git_revision, self.depot_registry.GetDepotDir(depot)) | 1461 git_revision, self.depot_registry.GetDepotDir(depot)) |
1459 if not commit_position: | 1462 if not commit_position: |
1460 print 'Could not determine commit position for %s' % git_revision | 1463 logging.warn('Could not determine commit position for %s', git_revision) |
1461 return None | 1464 return None |
1462 # Update the revision information for the given depot | 1465 # Update the revision information for the given depot |
1463 new_data = re.sub(deps_revision, str(commit_position), deps_contents) | 1466 new_data = re.sub(deps_revision, str(commit_position), deps_contents) |
1464 else: | 1467 else: |
1465 # Check whether the depot and revision pattern in DEPS file vars | 1468 # Check whether the depot and revision pattern in DEPS file vars |
1466 # e.g. for webkit the format is "webkit_revision": "559a6d4ab7a84c539..". | 1469 # e.g. for webkit the format is "webkit_revision": "559a6d4ab7a84c539..". |
1467 deps_revision = re.compile( | 1470 deps_revision = re.compile( |
1468 r'(?<=["\']%s["\']: ["\'])([a-fA-F0-9]{40})(?=["\'])' % deps_key, | 1471 r'(?<=["\']%s["\']: ["\'])([a-fA-F0-9]{40})(?=["\'])' % deps_key, |
1469 re.MULTILINE) | 1472 re.MULTILINE) |
1470 if re.search(deps_revision, deps_contents): | 1473 if re.search(deps_revision, deps_contents): |
(...skipping 21 matching lines...) Expand all Loading... | |
1492 | 1495 |
1493 Returns: | 1496 Returns: |
1494 True if DEPS file is modified successfully, otherwise False. | 1497 True if DEPS file is modified successfully, otherwise False. |
1495 """ | 1498 """ |
1496 if not os.path.exists(deps_file): | 1499 if not os.path.exists(deps_file): |
1497 return False | 1500 return False |
1498 | 1501 |
1499 deps_var = DEPOT_DEPS_NAME[depot]['deps_var'] | 1502 deps_var = DEPOT_DEPS_NAME[depot]['deps_var'] |
1500 # Don't update DEPS file if deps_var is not set in DEPOT_DEPS_NAME. | 1503 # Don't update DEPS file if deps_var is not set in DEPOT_DEPS_NAME. |
1501 if not deps_var: | 1504 if not deps_var: |
1502 print 'DEPS update not supported for Depot: %s', depot | 1505 logging.warn('DEPS update not supported for Depot: %s', depot) |
1503 return False | 1506 return False |
1504 | 1507 |
1505 # Hack for Angle repository. In the DEPS file, "vars" dictionary variable | 1508 # Hack for Angle repository. In the DEPS file, "vars" dictionary variable |
1506 # contains "angle_revision" key that holds git hash instead of SVN revision. | 1509 # contains "angle_revision" key that holds git hash instead of SVN revision. |
1507 # And sometime "angle_revision" key is not specified in "vars" variable. | 1510 # And sometime "angle_revision" key is not specified in "vars" variable. |
1508 # In such cases check, "deps" dictionary variable that matches | 1511 # In such cases check, "deps" dictionary variable that matches |
1509 # angle.git@[a-fA-F0-9]{40}$ and replace git hash. | 1512 # angle.git@[a-fA-F0-9]{40}$ and replace git hash. |
1510 if depot == 'angle': | 1513 if depot == 'angle': |
1511 return _UpdateDEPSForAngle(revision, depot, deps_file) | 1514 return _UpdateDEPSForAngle(revision, depot, deps_file) |
1512 | 1515 |
1513 try: | 1516 try: |
1514 deps_contents = ReadStringFromFile(deps_file) | 1517 deps_contents = ReadStringFromFile(deps_file) |
1515 updated_deps_content = self.UpdateDepsContents( | 1518 updated_deps_content = self.UpdateDepsContents( |
1516 deps_contents, depot, revision, deps_var) | 1519 deps_contents, depot, revision, deps_var) |
1517 # Write changes to DEPS file | 1520 # Write changes to DEPS file |
1518 if updated_deps_content: | 1521 if updated_deps_content: |
1519 WriteStringToFile(updated_deps_content, deps_file) | 1522 WriteStringToFile(updated_deps_content, deps_file) |
1520 return True | 1523 return True |
1521 except IOError, e: | 1524 except IOError, e: |
1522 print 'Something went wrong while updating DEPS file. [%s]' % e | 1525 logging.warn('Something went wrong while updating DEPS file. [%s]', e) |
1523 return False | 1526 return False |
1524 | 1527 |
1525 def CreateDEPSPatch(self, depot, revision): | 1528 def CreateDEPSPatch(self, depot, revision): |
1526 """Modifies DEPS and returns diff as text. | 1529 """Modifies DEPS and returns diff as text. |
1527 | 1530 |
1528 Args: | 1531 Args: |
1529 depot: Current depot being bisected. | 1532 depot: Current depot being bisected. |
1530 revision: A git hash revision of the dependency repository. | 1533 revision: A git hash revision of the dependency repository. |
1531 | 1534 |
1532 Returns: | 1535 Returns: |
(...skipping 933 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2466 if metric_increased: | 2469 if metric_increased: |
2467 message += "and the metric appears to have increased. " | 2470 message += "and the metric appears to have increased. " |
2468 else: | 2471 else: |
2469 message += "and the metric appears to have decreased. " | 2472 message += "and the metric appears to have decreased. " |
2470 if ((higher_is_better and metric_increased) or | 2473 if ((higher_is_better and metric_increased) or |
2471 (not higher_is_better and not metric_increased)): | 2474 (not higher_is_better and not metric_increased)): |
2472 results.error = (message + 'Then, the test results for the ends of ' | 2475 results.error = (message + 'Then, the test results for the ends of ' |
2473 'the given \'good\' - \'bad\' range of revisions ' | 2476 'the given \'good\' - \'bad\' range of revisions ' |
2474 'represent an improvement (and not a regression).') | 2477 'represent an improvement (and not a regression).') |
2475 return results | 2478 return results |
2476 print message, "Therefore we continue to bisect." | 2479 logging.info(message + "Therefore we continue to bisect.") |
2477 | 2480 |
2478 # Check how likely it is that the good and bad results are different | 2481 # Check how likely it is that the good and bad results are different |
2479 # beyond chance-induced variation. | 2482 # beyond chance-induced variation. |
2480 if not self.opts.debug_ignore_regression_confidence: | 2483 if not self.opts.debug_ignore_regression_confidence: |
2481 regression_confidence = ConfidenceScore(known_bad_value['values'], | 2484 regression_confidence = ConfidenceScore(known_bad_value['values'], |
2482 known_good_value['values']) | 2485 known_good_value['values']) |
2483 if regression_confidence < REGRESSION_CONFIDENCE: | 2486 if regression_confidence < REGRESSION_CONFIDENCE: |
2484 results.error = ('We could not reproduce the regression with this ' | 2487 results.error = ('We could not reproduce the regression with this ' |
2485 'test/metric/platform combination with enough ' | 2488 'test/metric/platform combination with enough ' |
2486 'confidence. There\'s still a chance that this is ' | 2489 'confidence. There\'s still a chance that this is ' |
(...skipping 701 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3188 if not cloud_storage.List(opts.gs_bucket): | 3191 if not cloud_storage.List(opts.gs_bucket): |
3189 raise RuntimeError('Invalid Google Storage: gs://%s' % opts.gs_bucket) | 3192 raise RuntimeError('Invalid Google Storage: gs://%s' % opts.gs_bucket) |
3190 if not opts.builder_host: | 3193 if not opts.builder_host: |
3191 raise RuntimeError('Must specify try server host name using ' | 3194 raise RuntimeError('Must specify try server host name using ' |
3192 '--builder_host when gs_bucket is used.') | 3195 '--builder_host when gs_bucket is used.') |
3193 if not opts.builder_port: | 3196 if not opts.builder_port: |
3194 raise RuntimeError('Must specify try server port number using ' | 3197 raise RuntimeError('Must specify try server port number using ' |
3195 '--builder_port when gs_bucket is used.') | 3198 '--builder_port when gs_bucket is used.') |
3196 if opts.target_platform == 'cros': | 3199 if opts.target_platform == 'cros': |
3197 # Run sudo up front to make sure credentials are cached for later. | 3200 # Run sudo up front to make sure credentials are cached for later. |
3198 print 'Sudo is required to build cros:' | 3201 logging.info('Sudo is required to build cros:') |
3199 print | 3202 logging.info('') |
qyearsley
2014/10/20 18:26:00
This one I'm not really sure about, maybe it shoul
RobertoCN
2014/10/20 21:01:03
Done.
| |
3200 bisect_utils.RunProcess(['sudo', 'true']) | 3203 bisect_utils.RunProcess(['sudo', 'true']) |
3201 | 3204 |
3202 if not opts.cros_board: | 3205 if not opts.cros_board: |
3203 raise RuntimeError('missing required parameter: --cros_board') | 3206 raise RuntimeError('missing required parameter: --cros_board') |
3204 | 3207 |
3205 if not opts.cros_remote_ip: | 3208 if not opts.cros_remote_ip: |
3206 raise RuntimeError('missing required parameter: --cros_remote_ip') | 3209 raise RuntimeError('missing required parameter: --cros_remote_ip') |
3207 | 3210 |
3208 if not opts.working_directory: | 3211 if not opts.working_directory: |
3209 raise RuntimeError('missing required parameter: --working_directory') | 3212 raise RuntimeError('missing required parameter: --working_directory') |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3251 opts.metric = metric_values | 3254 opts.metric = metric_values |
3252 | 3255 |
3253 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100) | 3256 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100) |
3254 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60) | 3257 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60) |
3255 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25) | 3258 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25) |
3256 opts.truncate_percent = opts.truncate_percent / 100.0 | 3259 opts.truncate_percent = opts.truncate_percent / 100.0 |
3257 | 3260 |
3258 return opts | 3261 return opts |
3259 | 3262 |
3260 | 3263 |
3264 def _ConfigureLogging(): | |
3265 """Trivial logging config. | |
3266 | |
3267 Configures logging to output any messages at or above INFO to standard out, | |
3268 without any additional formatting. | |
3269 """ | |
3270 loggingFormat = '%(message)s' | |
qyearsley
2014/10/20 18:26:00
"logging_format" is probably better here, even tho
RobertoCN
2014/10/20 21:01:03
Done.
| |
3271 logging.basicConfig( | |
3272 stream=logging.sys.stdout, level=logging.INFO, format=loggingFormat) | |
3273 | |
3274 | |
3261 def main(): | 3275 def main(): |
3262 | 3276 _ConfigureLogging() |
3263 try: | 3277 try: |
3264 opts = BisectOptions() | 3278 opts = BisectOptions() |
3265 opts.ParseCommandLine() | 3279 opts.ParseCommandLine() |
3266 | 3280 |
3267 if opts.extra_src: | 3281 if opts.extra_src: |
3268 extra_src = bisect_utils.LoadExtraSrc(opts.extra_src) | 3282 extra_src = bisect_utils.LoadExtraSrc(opts.extra_src) |
3269 if not extra_src: | 3283 if not extra_src: |
3270 raise RuntimeError('Invalid or missing --extra_src.') | 3284 raise RuntimeError('Invalid or missing --extra_src.') |
3271 _AddAdditionalDepotInfo(extra_src.GetAdditionalDepotInfo()) | 3285 _AddAdditionalDepotInfo(extra_src.GetAdditionalDepotInfo()) |
3272 | 3286 |
(...skipping 30 matching lines...) Expand all Loading... | |
3303 raise RuntimeError(bisect_results.error) | 3317 raise RuntimeError(bisect_results.error) |
3304 bisect_test.FormatAndPrintResults(bisect_results) | 3318 bisect_test.FormatAndPrintResults(bisect_results) |
3305 return 0 | 3319 return 0 |
3306 finally: | 3320 finally: |
3307 bisect_test.PerformCleanup() | 3321 bisect_test.PerformCleanup() |
3308 except RuntimeError, e: | 3322 except RuntimeError, e: |
3309 if opts.output_buildbot_annotations: | 3323 if opts.output_buildbot_annotations: |
3310 # The perf dashboard scrapes the "results" step in order to comment on | 3324 # The perf dashboard scrapes the "results" step in order to comment on |
3311 # bugs. If you change this, please update the perf dashboard as well. | 3325 # bugs. If you change this, please update the perf dashboard as well. |
3312 bisect_utils.OutputAnnotationStepStart('Results') | 3326 bisect_utils.OutputAnnotationStepStart('Results') |
3313 print 'Error: %s' % e.message | 3327 logging.warn('Error: %s', e.message) |
qyearsley
2014/10/20 18:26:00
We definitely want to make sure that this gets pri
RobertoCN
2014/10/20 21:01:03
How about we print it and also log it?
Also how is
qyearsley
2014/10/21 21:51:27
Printing as well as logging would probably be fine
| |
3314 if opts.output_buildbot_annotations: | 3328 if opts.output_buildbot_annotations: |
3315 bisect_utils.OutputAnnotationStepClosed() | 3329 bisect_utils.OutputAnnotationStepClosed() |
3316 return 1 | 3330 return 1 |
3317 | 3331 |
3318 | 3332 |
3319 if __name__ == '__main__': | 3333 if __name__ == '__main__': |
3320 sys.exit(main()) | 3334 sys.exit(main()) |
OLD | NEW |