Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1303)

Side by Side Diff: tools/bisect-perf-regression.py

Issue 27165006: First pass performance try bot. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Changes from review. Created 7 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « tools/PRESUBMIT.py ('k') | tools/bisect_utils.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright (c) 2013 The Chromium Authors. All rights reserved. 2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """Performance Test Bisect Tool 6 """Performance Test Bisect Tool
7 7
8 This script bisects a series of changelists using binary search. It starts at 8 This script bisects a series of changelists using binary search. It starts at
9 a bad revision where a performance metric has regressed, and asks for a last 9 a bad revision where a performance metric has regressed, and asks for a last
10 known-good revision. It will then binary search across this revision range by 10 known-good revision. It will then binary search across this revision range by
(...skipping 17 matching lines...) Expand all
28 An example usage (using git hashes): 28 An example usage (using git hashes):
29 29
30 ./tools/bisect-perf-regression.py -c\ 30 ./tools/bisect-perf-regression.py -c\
31 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\ 31 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
32 -g 1f6e67861535121c5c819c16a666f2436c207e7b\ 32 -g 1f6e67861535121c5c819c16a666f2436c207e7b\
33 -b b732f23b4f81c382db0b23b9035f3dadc7d925bb\ 33 -b b732f23b4f81c382db0b23b9035f3dadc7d925bb\
34 -m shutdown/simple-user-quit 34 -m shutdown/simple-user-quit
35 35
36 """ 36 """
37 37
38 import copy
38 import datetime 39 import datetime
39 import errno 40 import errno
40 import imp 41 import imp
41 import math 42 import math
42 import optparse 43 import optparse
43 import os 44 import os
44 import re 45 import re
45 import shlex 46 import shlex
46 import shutil 47 import shutil
47 import subprocess 48 import subprocess
(...skipping 1179 matching lines...) Expand 10 before | Expand all | Expand 10 after
1227 path_to_generate = os.path.join('tools', 'perf', 'generate_profile') 1228 path_to_generate = os.path.join('tools', 'perf', 'generate_profile')
1228 1229
1229 if arg_dict.has_key('--profile-dir') and arg_dict.has_key('--browser'): 1230 if arg_dict.has_key('--profile-dir') and arg_dict.has_key('--browser'):
1230 profile_path, profile_type = os.path.split(arg_dict['--profile-dir']) 1231 profile_path, profile_type = os.path.split(arg_dict['--profile-dir'])
1231 return not RunProcess(['python', path_to_generate, 1232 return not RunProcess(['python', path_to_generate,
1232 '--profile-type-to-generate', profile_type, 1233 '--profile-type-to-generate', profile_type,
1233 '--browser', arg_dict['--browser'], '--output-dir', profile_path]) 1234 '--browser', arg_dict['--browser'], '--output-dir', profile_path])
1234 return False 1235 return False
1235 return True 1236 return True
1236 1237
1237 def RunPerformanceTestAndParseResults(self, command_to_run, metric): 1238 def RunPerformanceTestAndParseResults(self, command_to_run, metric,
1239 reset_on_first_run=False, upload_on_last_run=False, results_label=None):
1238 """Runs a performance test on the current revision by executing the 1240 """Runs a performance test on the current revision by executing the
1239 'command_to_run' and parses the results. 1241 'command_to_run' and parses the results.
1240 1242
1241 Args: 1243 Args:
1242 command_to_run: The command to be run to execute the performance test. 1244 command_to_run: The command to be run to execute the performance test.
1243 metric: The metric to parse out from the results of the performance test. 1245 metric: The metric to parse out from the results of the performance test.
1244 1246
1245 Returns: 1247 Returns:
1246 On success, it will return a tuple of the average value of the metric, 1248 On success, it will return a tuple of the average value of the metric,
1247 and a success code of 0. 1249 and a success code of 0.
1248 """ 1250 """
1249 1251
1250 if self.opts.debug_ignore_perf_test: 1252 if self.opts.debug_ignore_perf_test:
1251 return ({'mean': 0.0, 'std_err': 0.0, 'std_dev': 0.0, 'values': [0.0]}, 0) 1253 return ({'mean': 0.0, 'std_err': 0.0, 'std_dev': 0.0, 'values': [0.0]}, 0)
1252 1254
1253 if IsWindows(): 1255 if IsWindows():
1254 command_to_run = command_to_run.replace('/', r'\\') 1256 command_to_run = command_to_run.replace('/', r'\\')
1255 1257
1256 args = shlex.split(command_to_run) 1258 args = shlex.split(command_to_run)
1257 1259
1258 if not self._GenerateProfileIfNecessary(args): 1260 if not self._GenerateProfileIfNecessary(args):
1259 return ('Failed to generate profile for performance test.', -1) 1261 return ('Failed to generate profile for performance test.', -1)
1260 1262
1261 # If running a telemetry test for cros, insert the remote ip, and 1263 # If running a telemetry test for cros, insert the remote ip, and
1262 # identity parameters. 1264 # identity parameters.
1263 if self.opts.target_platform == 'cros': 1265 is_telemetry = ('tools/perf/run_' in command_to_run or
1264 if 'tools/perf/run_' in args[0]: 1266 'tools\\perf\\run_' in command_to_run)
1265 args.append('--remote=%s' % self.opts.cros_remote_ip) 1267 if self.opts.target_platform == 'cros' and is_telemetry:
1266 args.append('--identity=%s' % CROS_TEST_KEY_PATH) 1268 args.append('--remote=%s' % self.opts.cros_remote_ip)
1269 args.append('--identity=%s' % CROS_TEST_KEY_PATH)
1267 1270
1268 cwd = os.getcwd() 1271 cwd = os.getcwd()
1269 os.chdir(self.src_cwd) 1272 os.chdir(self.src_cwd)
1270 1273
1271 start_time = time.time() 1274 start_time = time.time()
1272 1275
1273 metric_values = [] 1276 metric_values = []
1277 output_of_all_runs = ''
1274 for i in xrange(self.opts.repeat_test_count): 1278 for i in xrange(self.opts.repeat_test_count):
1275 # Can ignore the return code since if the tests fail, it won't return 0. 1279 # Can ignore the return code since if the tests fail, it won't return 0.
1276 try: 1280 try:
1277 (output, return_code) = RunProcessAndRetrieveOutput(args) 1281 current_args = copy.copy(args)
1282 if is_telemetry:
1283 if i == 0 and reset_on_first_run:
1284 current_args.append('--reset-results')
1285 elif i == self.opts.repeat_test_count - 1 and upload_on_last_run:
1286 current_args.append('--upload-results')
1287 if results_label:
1288 current_args.append('--results-label=%s' % results_label)
1289 (output, return_code) = RunProcessAndRetrieveOutput(current_args)
1278 except OSError, e: 1290 except OSError, e:
1279 if e.errno == errno.ENOENT: 1291 if e.errno == errno.ENOENT:
1280 err_text = ("Something went wrong running the performance test. " 1292 err_text = ("Something went wrong running the performance test. "
1281 "Please review the command line:\n\n") 1293 "Please review the command line:\n\n")
1282 if 'src/' in ' '.join(args): 1294 if 'src/' in ' '.join(args):
1283 err_text += ("Check that you haven't accidentally specified a path " 1295 err_text += ("Check that you haven't accidentally specified a path "
1284 "with src/ in the command.\n\n") 1296 "with src/ in the command.\n\n")
1285 err_text += ' '.join(args) 1297 err_text += ' '.join(args)
1286 err_text += '\n' 1298 err_text += '\n'
1287 1299
1288 return (err_text, -1) 1300 return (err_text, -1)
1289 raise 1301 raise
1290 1302
1303 output_of_all_runs += output
1291 if self.opts.output_buildbot_annotations: 1304 if self.opts.output_buildbot_annotations:
1292 print output 1305 print output
1293 1306
1294 metric_values += self.ParseMetricValuesFromOutput(metric, output) 1307 metric_values += self.ParseMetricValuesFromOutput(metric, output)
1295 1308
1296 elapsed_minutes = (time.time() - start_time) / 60.0 1309 elapsed_minutes = (time.time() - start_time) / 60.0
1297 1310
1298 if elapsed_minutes >= self.opts.max_time_minutes or not metric_values: 1311 if elapsed_minutes >= self.opts.max_time_minutes or not metric_values:
1299 break 1312 break
1300 1313
1301 os.chdir(cwd) 1314 os.chdir(cwd)
1302 1315
1303 # Need to get the average value if there were multiple values. 1316 # Need to get the average value if there were multiple values.
1304 if metric_values: 1317 if metric_values:
1305 truncated_mean = CalculateTruncatedMean(metric_values, 1318 truncated_mean = CalculateTruncatedMean(metric_values,
1306 self.opts.truncate_percent) 1319 self.opts.truncate_percent)
1307 standard_err = CalculateStandardError(metric_values) 1320 standard_err = CalculateStandardError(metric_values)
1308 standard_dev = CalculateStandardDeviation(metric_values) 1321 standard_dev = CalculateStandardDeviation(metric_values)
1309 1322
1310 values = { 1323 values = {
1311 'mean': truncated_mean, 1324 'mean': truncated_mean,
1312 'std_err': standard_err, 1325 'std_err': standard_err,
1313 'std_dev': standard_dev, 1326 'std_dev': standard_dev,
1314 'values': metric_values, 1327 'values': metric_values,
1315 } 1328 }
1316 1329
1317 print 'Results of performance test: %12f %12f' % ( 1330 print 'Results of performance test: %12f %12f' % (
1318 truncated_mean, standard_err) 1331 truncated_mean, standard_err)
1319 print 1332 print
1320 return (values, 0) 1333 return (values, 0, output_of_all_runs)
1321 else: 1334 else:
1322 return ('Invalid metric specified, or no values returned from ' 1335 return ('Invalid metric specified, or no values returned from '
1323 'performance test.', -1) 1336 'performance test.', -1, output_of_all_runs)
1324 1337
1325 def FindAllRevisionsToSync(self, revision, depot): 1338 def FindAllRevisionsToSync(self, revision, depot):
1326 """Finds all dependant revisions and depots that need to be synced for a 1339 """Finds all dependant revisions and depots that need to be synced for a
1327 given revision. This is only useful in the git workflow, as an svn depot 1340 given revision. This is only useful in the git workflow, as an svn depot
1328 may be split into multiple mirrors. 1341 may be split into multiple mirrors.
1329 1342
1330 ie. skia is broken up into 3 git mirrors over skia/src, skia/gyp, and 1343 ie. skia is broken up into 3 git mirrors over skia/src, skia/gyp, and
1331 skia/include. To sync skia/src properly, one has to find the proper 1344 skia/include. To sync skia/src properly, one has to find the proper
1332 revisions in skia/gyp and skia/include. 1345 revisions in skia/gyp and skia/include.
1333 1346
(...skipping 1403 matching lines...) Expand 10 before | Expand all | Expand 10 after
2737 2750
2738 Args: 2751 Args:
2739 values: a dict containing options to set. 2752 values: a dict containing options to set.
2740 2753
2741 Returns: 2754 Returns:
2742 An instance of BisectOptions. 2755 An instance of BisectOptions.
2743 """ 2756 """
2744 opts = BisectOptions() 2757 opts = BisectOptions()
2745 2758
2746 for k, v in values.iteritems(): 2759 for k, v in values.iteritems():
2747 assert hasattr(opts, name_to_attr[k]), 'Invalid %s attribute in '\ 2760 assert hasattr(opts, k), 'Invalid %s attribute in '\
2748 'BisectOptions.' % name_to_attr[k] 2761 'BisectOptions.' % k
2749 setattr(opts, name_to_attr[k], v) 2762 setattr(opts, k, v)
2750 2763
2751 metric_values = opts.metric.split('/') 2764 metric_values = opts.metric.split('/')
2752 if len(metric_values) != 2: 2765 if len(metric_values) != 2:
2753 raise RuntimeError("Invalid metric specified: [%s]" % opts.metric) 2766 raise RuntimeError("Invalid metric specified: [%s]" % opts.metric)
2754 2767
2755 opts.metric = metric_values 2768 opts.metric = metric_values
2756 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100) 2769 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100)
2757 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60) 2770 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60)
2758 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25) 2771 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25)
2759 opts.truncate_percent = opts.truncate_percent / 100.0 2772 opts.truncate_percent = opts.truncate_percent / 100.0
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
2815 return 0 2828 return 0
2816 finally: 2829 finally:
2817 bisect_test.PerformCleanup() 2830 bisect_test.PerformCleanup()
2818 except RuntimeError, e: 2831 except RuntimeError, e:
2819 print 'Error: %s' % e.message 2832 print 'Error: %s' % e.message
2820 print 2833 print
2821 return 1 2834 return 1
2822 2835
2823 if __name__ == '__main__': 2836 if __name__ == '__main__':
2824 sys.exit(main()) 2837 sys.exit(main())
OLDNEW
« no previous file with comments | « tools/PRESUBMIT.py ('k') | tools/bisect_utils.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698