Index: tools/auto_bisect/bisect_perf_regression.py |
diff --git a/tools/auto_bisect/bisect_perf_regression.py b/tools/auto_bisect/bisect_perf_regression.py |
index 62f9cb27d9e85c02020b9b5315f69e6c85b1232d..3d09fa485bbb85538eb971bc3824df8071613e52 100755 |
--- a/tools/auto_bisect/bisect_perf_regression.py |
+++ b/tools/auto_bisect/bisect_perf_regression.py |
@@ -1178,7 +1178,12 @@ class BisectPerformanceMetrics(object): |
build_success = self._DownloadAndUnzipBuild( |
revision, depot, build_type='Release', create_patch=create_patch) |
else: |
- # Build locally. |
+ # Print the current environment set on the machine. |
+ logging.info('Full Environment:') |
+ for key, value in sorted(os.environ.items()): |
+ logging.info('%s: %s', key, value) |
+ # Print the environment before proceeding with compile. |
+ sys.stdout.flush() |
build_success = self.builder.Build(depot, self.opts) |
os.chdir(cwd) |
return build_success |
@@ -2731,6 +2736,8 @@ class BisectOptions(object): |
raise RuntimeError('Invalid metric specified: [%s]' % opts.metric) |
opts.metric = metric_values |
+ if opts.target_arch == 'x64' and opts.target_build_type == 'Release': |
+ opts.target_build_type = 'Release_x64' |
opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100) |
opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60) |
opts.truncate_percent = min(max(opts.truncate_percent, 0), 25) |