Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(3)

Side by Side Diff: tools/bisect-perf-regression.py

Issue 27165006: First pass performance try bot. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 7 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright (c) 2013 The Chromium Authors. All rights reserved. 2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """Performance Test Bisect Tool 6 """Performance Test Bisect Tool
7 7
8 This script bisects a series of changelists using binary search. It starts at 8 This script bisects a series of changelists using binary search. It starts at
9 a bad revision where a performance metric has regressed, and asks for a last 9 a bad revision where a performance metric has regressed, and asks for a last
10 known-good revision. It will then binary search across this revision range by 10 known-good revision. It will then binary search across this revision range by
(...skipping 17 matching lines...) Expand all
28 An example usage (using git hashes): 28 An example usage (using git hashes):
29 29
30 ./tools/bisect-perf-regression.py -c\ 30 ./tools/bisect-perf-regression.py -c\
31 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\ 31 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
32 -g 1f6e67861535121c5c819c16a666f2436c207e7b\ 32 -g 1f6e67861535121c5c819c16a666f2436c207e7b\
33 -b b732f23b4f81c382db0b23b9035f3dadc7d925bb\ 33 -b b732f23b4f81c382db0b23b9035f3dadc7d925bb\
34 -m shutdown/simple-user-quit 34 -m shutdown/simple-user-quit
35 35
36 """ 36 """
37 37
38 import copy
38 import datetime 39 import datetime
39 import errno 40 import errno
40 import imp 41 import imp
41 import math 42 import math
42 import optparse 43 import optparse
43 import os 44 import os
44 import re 45 import re
45 import shlex 46 import shlex
46 import shutil 47 import shutil
47 import subprocess 48 import subprocess
(...skipping 1179 matching lines...) Expand 10 before | Expand all | Expand 10 after
1227 path_to_generate = os.path.join('tools', 'perf', 'generate_profile') 1228 path_to_generate = os.path.join('tools', 'perf', 'generate_profile')
1228 1229
1229 if arg_dict.has_key('--profile-dir') and arg_dict.has_key('--browser'): 1230 if arg_dict.has_key('--profile-dir') and arg_dict.has_key('--browser'):
1230 profile_path, profile_type = os.path.split(arg_dict['--profile-dir']) 1231 profile_path, profile_type = os.path.split(arg_dict['--profile-dir'])
1231 return not RunProcess(['python', path_to_generate, 1232 return not RunProcess(['python', path_to_generate,
1232 '--profile-type-to-generate', profile_type, 1233 '--profile-type-to-generate', profile_type,
1233 '--browser', arg_dict['--browser'], '--output-dir', profile_path]) 1234 '--browser', arg_dict['--browser'], '--output-dir', profile_path])
1234 return False 1235 return False
1235 return True 1236 return True
1236 1237
1237 def RunPerformanceTestAndParseResults(self, command_to_run, metric): 1238 def RunPerformanceTestAndParseResults(self, command_to_run, metric,
1239 reset_on_first_run=False, upload_on_last_run=False):
1238 """Runs a performance test on the current revision by executing the 1240 """Runs a performance test on the current revision by executing the
1239 'command_to_run' and parses the results. 1241 'command_to_run' and parses the results.
1240 1242
1241 Args: 1243 Args:
1242 command_to_run: The command to be run to execute the performance test. 1244 command_to_run: The command to be run to execute the performance test.
1243 metric: The metric to parse out from the results of the performance test. 1245 metric: The metric to parse out from the results of the performance test.
1244 1246
1245 Returns: 1247 Returns:
1246 On success, it will return a tuple of the average value of the metric, 1248 On success, it will return a tuple of the average value of the metric,
1247 and a success code of 0. 1249 and a success code of 0.
1248 """ 1250 """
1249 1251
1250 if self.opts.debug_ignore_perf_test: 1252 if self.opts.debug_ignore_perf_test:
1251 return ({'mean': 0.0, 'std_err': 0.0, 'std_dev': 0.0, 'values': [0.0]}, 0) 1253 return ({'mean': 0.0, 'std_err': 0.0, 'std_dev': 0.0, 'values': [0.0]}, 0)
1252 1254
1253 if IsWindows(): 1255 if IsWindows():
1254 command_to_run = command_to_run.replace('/', r'\\') 1256 command_to_run = command_to_run.replace('/', r'\\')
1255 1257
1256 args = shlex.split(command_to_run) 1258 args = shlex.split(command_to_run)
1257 1259
1258 if not self._GenerateProfileIfNecessary(args): 1260 if not self._GenerateProfileIfNecessary(args):
1259 return ('Failed to generate profile for performance test.', -1) 1261 return ('Failed to generate profile for performance test.', -1)
1260 1262
1261 # If running a telemetry test for cros, insert the remote ip, and 1263 # If running a telemetry test for cros, insert the remote ip, and
1262 # identity parameters. 1264 # identity parameters.
1263 if self.opts.target_platform == 'cros': 1265 is_telemetry = ('tools/perf/run_' in command_to_run or
1264 if 'tools/perf/run_' in args[0]: 1266 r'tools\\perf\\run_' in command_to_run)
tonyg 2013/10/17 01:01:32 I believe you want the r'' OR the \\s, but not bot
shatch 2013/10/17 21:59:08 Done.
1265 args.append('--remote=%s' % self.opts.cros_remote_ip) 1267 if self.opts.target_platform == 'cros' and is_telemetry:
1266 args.append('--identity=%s' % CROS_TEST_KEY_PATH) 1268 args.append('--remote=%s' % self.opts.cros_remote_ip)
1269 args.append('--identity=%s' % CROS_TEST_KEY_PATH)
1270
1271 # These arguments only work with telemetry.
1272 reset_on_first_run = reset_on_first_run and is_telemetry
1273 upload_on_last_run = upload_on_last_run and is_telemetry
1267 1274
1268 cwd = os.getcwd() 1275 cwd = os.getcwd()
1269 os.chdir(self.src_cwd) 1276 os.chdir(self.src_cwd)
1270 1277
1271 start_time = time.time() 1278 start_time = time.time()
1272 1279
1273 metric_values = [] 1280 metric_values = []
1281 output_of_all_runs = ''
1274 for i in xrange(self.opts.repeat_test_count): 1282 for i in xrange(self.opts.repeat_test_count):
1275 # Can ignore the return code since if the tests fail, it won't return 0. 1283 # Can ignore the return code since if the tests fail, it won't return 0.
1276 try: 1284 try:
1277 (output, return_code) = RunProcessAndRetrieveOutput(args) 1285 current_args = copy.copy(args)
1286 if i == 0 and reset_on_first_run:
1287 current_args.append('--reset-results')
1288 elif i == self.opts.repeat_test_count - 1 and upload_on_last_run:
1289 current_args.append('--upload-results')
1290 (output, return_code) = RunProcessAndRetrieveOutput(current_args)
1278 except OSError, e: 1291 except OSError, e:
1279 if e.errno == errno.ENOENT: 1292 if e.errno == errno.ENOENT:
1280 err_text = ("Something went wrong running the performance test. " 1293 err_text = ("Something went wrong running the performance test. "
1281 "Please review the command line:\n\n") 1294 "Please review the command line:\n\n")
1282 if 'src/' in ' '.join(args): 1295 if 'src/' in ' '.join(args):
1283 err_text += ("Check that you haven't accidentally specified a path " 1296 err_text += ("Check that you haven't accidentally specified a path "
1284 "with src/ in the command.\n\n") 1297 "with src/ in the command.\n\n")
1285 err_text += ' '.join(args) 1298 err_text += ' '.join(args)
1286 err_text += '\n' 1299 err_text += '\n'
1287 1300
1288 return (err_text, -1) 1301 return (err_text, -1)
1289 raise 1302 raise
1290 1303
1304 output_of_all_runs += output
1291 if self.opts.output_buildbot_annotations: 1305 if self.opts.output_buildbot_annotations:
1292 print output 1306 print output
1293 1307
1294 metric_values += self.ParseMetricValuesFromOutput(metric, output) 1308 metric_values += self.ParseMetricValuesFromOutput(metric, output)
1295 1309
1296 elapsed_minutes = (time.time() - start_time) / 60.0 1310 elapsed_minutes = (time.time() - start_time) / 60.0
1297 1311
1298 if elapsed_minutes >= self.opts.max_time_minutes or not metric_values: 1312 if elapsed_minutes >= self.opts.max_time_minutes or not metric_values:
1299 break 1313 break
1300 1314
1301 os.chdir(cwd) 1315 os.chdir(cwd)
1302 1316
1303 # Need to get the average value if there were multiple values. 1317 # Need to get the average value if there were multiple values.
1304 if metric_values: 1318 if metric_values:
1305 truncated_mean = CalculateTruncatedMean(metric_values, 1319 truncated_mean = CalculateTruncatedMean(metric_values,
1306 self.opts.truncate_percent) 1320 self.opts.truncate_percent)
1307 standard_err = CalculateStandardError(metric_values) 1321 standard_err = CalculateStandardError(metric_values)
1308 standard_dev = CalculateStandardDeviation(metric_values) 1322 standard_dev = CalculateStandardDeviation(metric_values)
1309 1323
1310 values = { 1324 values = {
1311 'mean': truncated_mean, 1325 'mean': truncated_mean,
1312 'std_err': standard_err, 1326 'std_err': standard_err,
1313 'std_dev': standard_dev, 1327 'std_dev': standard_dev,
1314 'values': metric_values, 1328 'values': metric_values,
1315 } 1329 }
1316 1330
1317 print 'Results of performance test: %12f %12f' % ( 1331 print 'Results of performance test: %12f %12f' % (
1318 truncated_mean, standard_err) 1332 truncated_mean, standard_err)
1319 print 1333 print
1320 return (values, 0) 1334 return (values, 0, output_of_all_runs)
1321 else: 1335 else:
1322 return ('Invalid metric specified, or no values returned from ' 1336 return ('Invalid metric specified, or no values returned from '
1323 'performance test.', -1) 1337 'performance test.', -1, output_of_all_runs)
1324 1338
1325 def FindAllRevisionsToSync(self, revision, depot): 1339 def FindAllRevisionsToSync(self, revision, depot):
1326 """Finds all dependant revisions and depots that need to be synced for a 1340 """Finds all dependant revisions and depots that need to be synced for a
1327 given revision. This is only useful in the git workflow, as an svn depot 1341 given revision. This is only useful in the git workflow, as an svn depot
1328 may be split into multiple mirrors. 1342 may be split into multiple mirrors.
1329 1343
1330 ie. skia is broken up into 3 git mirrors over skia/src, skia/gyp, and 1344 ie. skia is broken up into 3 git mirrors over skia/src, skia/gyp, and
1331 skia/include. To sync skia/src properly, one has to find the proper 1345 skia/include. To sync skia/src properly, one has to find the proper
1332 revisions in skia/gyp and skia/include. 1346 revisions in skia/gyp and skia/include.
1333 1347
(...skipping 1403 matching lines...) Expand 10 before | Expand all | Expand 10 after
2737 2751
2738 Args: 2752 Args:
2739 values: a dict containing options to set. 2753 values: a dict containing options to set.
2740 2754
2741 Returns: 2755 Returns:
2742 An instance of BisectOptions. 2756 An instance of BisectOptions.
2743 """ 2757 """
2744 opts = BisectOptions() 2758 opts = BisectOptions()
2745 2759
2746 for k, v in values.iteritems(): 2760 for k, v in values.iteritems():
2747 assert hasattr(opts, name_to_attr[k]), 'Invalid %s attribute in '\ 2761 assert hasattr(opts, k), 'Invalid %s attribute in '\
2748 'BisectOptions.' % name_to_attr[k] 2762 'BisectOptions.' % k
2749 setattr(opts, name_to_attr[k], v) 2763 setattr(opts, k, v)
2750 2764
2751 metric_values = opts.metric.split('/') 2765 metric_values = opts.metric.split('/')
2752 if len(metric_values) != 2: 2766 if len(metric_values) != 2:
2753 raise RuntimeError("Invalid metric specified: [%s]" % opts.metric) 2767 raise RuntimeError("Invalid metric specified: [%s]" % opts.metric)
2754 2768
2755 opts.metric = metric_values 2769 opts.metric = metric_values
2756 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100) 2770 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100)
2757 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60) 2771 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60)
2758 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25) 2772 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25)
2759 opts.truncate_percent = opts.truncate_percent / 100.0 2773 opts.truncate_percent = opts.truncate_percent / 100.0
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
2815 return 0 2829 return 0
2816 finally: 2830 finally:
2817 bisect_test.PerformCleanup() 2831 bisect_test.PerformCleanup()
2818 except RuntimeError, e: 2832 except RuntimeError, e:
2819 print 'Error: %s' % e.message 2833 print 'Error: %s' % e.message
2820 print 2834 print
2821 return 1 2835 return 1
2822 2836
2823 if __name__ == '__main__': 2837 if __name__ == '__main__':
2824 sys.exit(main()) 2838 sys.exit(main())
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698