OLD | NEW |
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 # Copyright (c) 2013 The Chromium Authors. All rights reserved. | 2 # Copyright (c) 2013 The Chromium Authors. All rights reserved. |
3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
5 | 5 |
6 """Run Performance Test Bisect Tool | 6 """Run Performance Test Bisect Tool |
7 | 7 |
8 This script is used by a try bot to run the bisect script with the parameters | 8 This script is used by a try bot to run the bisect script with the parameters |
9 specified in the bisect config file. It checks out a copy of the depot in | 9 specified in the bisect config file. It checks out a copy of the depot in |
10 a subdirectory 'bisect' of the working directory provided, annd runs the | 10 a subdirectory 'bisect' of the working directory provided, annd runs the |
11 bisect scrip there. | 11 bisect scrip there. |
12 """ | 12 """ |
13 | 13 |
14 import optparse | 14 import optparse |
15 import os | 15 import os |
16 import platform | 16 import platform |
17 import re | 17 import re |
18 import subprocess | 18 import subprocess |
19 import sys | 19 import sys |
20 import traceback | 20 import traceback |
21 | 21 |
22 from auto_bisect import bisect_perf_regression | 22 from auto_bisect import bisect_perf_regression |
23 from auto_bisect import bisect_utils | 23 from auto_bisect import bisect_utils |
24 from auto_bisect import math_utils | 24 from auto_bisect import math_utils |
| 25 from auto_bisect import source_control |
25 | 26 |
26 CROS_BOARD_ENV = 'BISECT_CROS_BOARD' | 27 CROS_BOARD_ENV = 'BISECT_CROS_BOARD' |
27 CROS_IP_ENV = 'BISECT_CROS_IP' | 28 CROS_IP_ENV = 'BISECT_CROS_IP' |
28 | 29 |
29 SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__)) | 30 SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__)) |
30 SRC_DIR = os.path.join(SCRIPT_DIR, os.path.pardir) | 31 SRC_DIR = os.path.join(SCRIPT_DIR, os.path.pardir) |
31 BISECT_CONFIG_PATH = os.path.join(SCRIPT_DIR, 'auto_bisect', 'bisect.cfg') | 32 BISECT_CONFIG_PATH = os.path.join(SCRIPT_DIR, 'auto_bisect', 'bisect.cfg') |
32 RUN_TEST_CONFIG_PATH = os.path.join(SCRIPT_DIR, 'run-perf-test.cfg') | 33 RUN_TEST_CONFIG_PATH = os.path.join(SCRIPT_DIR, 'run-perf-test.cfg') |
33 WEBKIT_RUN_TEST_CONFIG_PATH = os.path.join( | 34 WEBKIT_RUN_TEST_CONFIG_PATH = os.path.join( |
34 SRC_DIR, 'third_party', 'WebKit', 'Tools', 'run-perf-test.cfg') | 35 SRC_DIR, 'third_party', 'WebKit', 'Tools', 'run-perf-test.cfg') |
(...skipping 213 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
248 re.MULTILINE) | 249 re.MULTILINE) |
249 | 250 |
250 results = { | 251 results = { |
251 'html-results': html_results_pattern.findall(output), | 252 'html-results': html_results_pattern.findall(output), |
252 'profiler': profiler_pattern.findall(output), | 253 'profiler': profiler_pattern.findall(output), |
253 } | 254 } |
254 | 255 |
255 return results | 256 return results |
256 | 257 |
257 | 258 |
258 def _RunPerformanceTest(config): | 259 def _ParseAndOutputCloudLinks(results_without_patch, results_with_patch): |
259 """Runs a performance test with and without the current patch. | |
260 | |
261 Args: | |
262 config: Contents of the config file, a dictionary. | |
263 | |
264 Attempts to build and run the current revision with and without the | |
265 current patch, with the parameters passed in. | |
266 """ | |
267 # Bisect script expects to be run from the src directory | |
268 os.chdir(SRC_DIR) | |
269 | |
270 bisect_utils.OutputAnnotationStepStart('Building With Patch') | |
271 | |
272 opts = _CreateBisectOptionsFromConfig(config) | |
273 b = bisect_perf_regression.BisectPerformanceMetrics(opts, os.getcwd()) | |
274 | |
275 if bisect_utils.RunGClient(['runhooks']): | |
276 raise RuntimeError('Failed to run gclient runhooks') | |
277 | |
278 if not b.ObtainBuild('chromium'): | |
279 raise RuntimeError('Patched version failed to build.') | |
280 | |
281 bisect_utils.OutputAnnotationStepClosed() | |
282 bisect_utils.OutputAnnotationStepStart('Running With Patch') | |
283 | |
284 results_with_patch = b.RunPerformanceTestAndParseResults( | |
285 opts.command, | |
286 opts.metric, | |
287 reset_on_first_run=True, | |
288 upload_on_last_run=True, | |
289 results_label='Patch') | |
290 | |
291 if results_with_patch[1]: | |
292 raise RuntimeError('Patched version failed to run performance test.') | |
293 | |
294 bisect_utils.OutputAnnotationStepClosed() | |
295 | |
296 bisect_utils.OutputAnnotationStepStart('Reverting Patch') | |
297 # TODO: When this is re-written to recipes, this should use bot_update's | |
298 # revert mechanism to fully revert the client. But for now, since we know that | |
299 # the perf try bot currently only supports src/ and src/third_party/WebKit, we | |
300 # simply reset those two directories. | |
301 bisect_utils.CheckRunGit(['reset', '--hard']) | |
302 bisect_utils.CheckRunGit(['reset', '--hard'], | |
303 os.path.join('third_party', 'WebKit')) | |
304 bisect_utils.OutputAnnotationStepClosed() | |
305 | |
306 bisect_utils.OutputAnnotationStepStart('Building Without Patch') | |
307 | |
308 if bisect_utils.RunGClient(['runhooks']): | |
309 raise RuntimeError('Failed to run gclient runhooks') | |
310 | |
311 if not b.ObtainBuild('chromium'): | |
312 raise RuntimeError('Unpatched version failed to build.') | |
313 | |
314 bisect_utils.OutputAnnotationStepClosed() | |
315 bisect_utils.OutputAnnotationStepStart('Running Without Patch') | |
316 | |
317 results_without_patch = b.RunPerformanceTestAndParseResults( | |
318 opts.command, opts.metric, upload_on_last_run=True, results_label='ToT') | |
319 | |
320 if results_without_patch[1]: | |
321 raise RuntimeError('Unpatched version failed to run performance test.') | |
322 | |
323 # Find the link to the cloud stored results file. | |
324 cloud_links_without_patch = _ParseCloudLinksFromOutput( | 260 cloud_links_without_patch = _ParseCloudLinksFromOutput( |
325 results_without_patch[2]) | 261 results_without_patch[2]) |
326 cloud_links_with_patch = _ParseCloudLinksFromOutput( | 262 cloud_links_with_patch = _ParseCloudLinksFromOutput( |
327 results_with_patch[2]) | 263 results_with_patch[2]) |
328 | 264 |
329 cloud_file_link = (cloud_links_without_patch['html-results'][0] | 265 cloud_file_link = (cloud_links_without_patch['html-results'][0] |
330 if cloud_links_without_patch['html-results'] else '') | 266 if cloud_links_without_patch['html-results'] else '') |
331 | 267 |
332 profiler_file_links_with_patch = cloud_links_with_patch['profiler'] | 268 profiler_file_links_with_patch = cloud_links_with_patch['profiler'] |
333 profiler_file_links_without_patch = cloud_links_without_patch['profiler'] | 269 profiler_file_links_without_patch = cloud_links_without_patch['profiler'] |
334 | 270 |
335 # Calculate the % difference in the means of the 2 runs. | 271 # Calculate the % difference in the means of the 2 runs. |
336 percent_diff_in_means = None | 272 percent_diff_in_means = None |
337 std_err = None | 273 std_err = None |
338 if (results_with_patch[0].has_key('mean') and | 274 if (results_with_patch[0].has_key('mean') and |
339 results_with_patch[0].has_key('values')): | 275 results_with_patch[0].has_key('values')): |
340 percent_diff_in_means = (results_with_patch[0]['mean'] / | 276 percent_diff_in_means = (results_with_patch[0]['mean'] / |
341 max(0.0001, results_without_patch[0]['mean'])) * 100.0 - 100.0 | 277 max(0.0001, results_without_patch[0]['mean'])) * 100.0 - 100.0 |
342 std_err = math_utils.PooledStandardError( | 278 std_err = math_utils.PooledStandardError( |
343 [results_with_patch[0]['values'], results_without_patch[0]['values']]) | 279 [results_with_patch[0]['values'], results_without_patch[0]['values']]) |
344 | 280 |
345 bisect_utils.OutputAnnotationStepClosed() | |
346 if percent_diff_in_means is not None and std_err is not None: | 281 if percent_diff_in_means is not None and std_err is not None: |
347 bisect_utils.OutputAnnotationStepStart('Results - %.02f +- %0.02f delta' % | 282 bisect_utils.OutputAnnotationStepStart('Results - %.02f +- %0.02f delta' % |
348 (percent_diff_in_means, std_err)) | 283 (percent_diff_in_means, std_err)) |
349 print ' %s %s %s' % (''.center(10, ' '), 'Mean'.center(20, ' '), | 284 print ' %s %s %s' % (''.center(10, ' '), 'Mean'.center(20, ' '), |
350 'Std. Error'.center(20, ' ')) | 285 'Std. Error'.center(20, ' ')) |
351 print ' %s %s %s' % ('Patch'.center(10, ' '), | 286 print ' %s %s %s' % ('Patch'.center(10, ' '), |
352 ('%.02f' % results_with_patch[0]['mean']).center(20, ' '), | 287 ('%.02f' % results_with_patch[0]['mean']).center(20, ' '), |
353 ('%.02f' % results_with_patch[0]['std_err']).center(20, ' ')) | 288 ('%.02f' % results_with_patch[0]['std_err']).center(20, ' ')) |
354 print ' %s %s %s' % ('No Patch'.center(10, ' '), | 289 print ' %s %s %s' % ('No Patch'.center(10, ' '), |
355 ('%.02f' % results_without_patch[0]['mean']).center(20, ' '), | 290 ('%.02f' % results_without_patch[0]['mean']).center(20, ' '), |
356 ('%.02f' % results_without_patch[0]['std_err']).center(20, ' ')) | 291 ('%.02f' % results_without_patch[0]['std_err']).center(20, ' ')) |
357 if cloud_file_link: | 292 if cloud_file_link: |
358 bisect_utils.OutputAnnotationStepLink('HTML Results', cloud_file_link) | 293 bisect_utils.OutputAnnotationStepLink('HTML Results', cloud_file_link) |
359 bisect_utils.OutputAnnotationStepClosed() | 294 bisect_utils.OutputAnnotationStepClosed() |
360 elif cloud_file_link: | 295 elif cloud_file_link: |
361 bisect_utils.OutputAnnotationStepLink('HTML Results', cloud_file_link) | 296 bisect_utils.OutputAnnotationStepLink('HTML Results', cloud_file_link) |
362 | 297 |
363 if profiler_file_links_with_patch and profiler_file_links_without_patch: | 298 if profiler_file_links_with_patch and profiler_file_links_without_patch: |
364 for i in xrange(len(profiler_file_links_with_patch)): | 299 for i in xrange(len(profiler_file_links_with_patch)): |
365 bisect_utils.OutputAnnotationStepLink( | 300 bisect_utils.OutputAnnotationStepLink( |
366 'With Patch - Profiler Data[%d]' % i, | 301 'With Patch - Profiler Data[%d]' % i, |
367 profiler_file_links_with_patch[i]) | 302 profiler_file_links_with_patch[i]) |
368 for i in xrange(len(profiler_file_links_without_patch)): | 303 for i in xrange(len(profiler_file_links_without_patch)): |
369 bisect_utils.OutputAnnotationStepLink( | 304 bisect_utils.OutputAnnotationStepLink( |
370 'Without Patch - Profiler Data[%d]' % i, | 305 'Without Patch - Profiler Data[%d]' % i, |
371 profiler_file_links_without_patch[i]) | 306 profiler_file_links_without_patch[i]) |
372 | 307 |
373 | 308 |
| 309 def _ResolveRevisionsFromConfig(config): |
| 310 if not 'good_revision' in config and not 'bad_revision' in config: |
| 311 return (None, None) |
| 312 |
| 313 bad_revision = source_control.ResolveToRevision( |
| 314 config['bad_revision'], 'chromium', bisect_utils.DEPOT_DEPS_NAME, 100) |
| 315 if not bad_revision: |
| 316 raise RuntimeError('Failed to resolve [%s] to git hash.', |
| 317 config['bad_revision']) |
| 318 good_revision = source_control.ResolveToRevision( |
| 319 config['good_revision'], 'chromium', bisect_utils.DEPOT_DEPS_NAME, -100) |
| 320 if not good_revision: |
| 321 raise RuntimeError('Failed to resolve [%s] to git hash.', |
| 322 config['good_revision']) |
| 323 |
| 324 return (good_revision, bad_revision) |
| 325 |
| 326 |
| 327 def _GetStepAnnotationStringsDict(config): |
| 328 if 'good_revision' in config and 'bad_revision' in config: |
| 329 return { |
| 330 'build1': 'Building [%s]' % config['good_revision'], |
| 331 'build2': 'Building [%s]' % config['bad_revision'], |
| 332 'run1': 'Running [%s]' % config['good_revision'], |
| 333 'run2': 'Running [%s]' % config['bad_revision'], |
| 334 'results_label1': config['good_revision'], |
| 335 'results_label2': config['bad_revision'], |
| 336 } |
| 337 else: |
| 338 return { |
| 339 'build1': 'Building With Patch', |
| 340 'build2': 'Building Without Patch', |
| 341 'run1': 'Running With Patch', |
| 342 'run2': 'Running Without Patch', |
| 343 'results_label1': 'Patch', |
| 344 'results_label2': 'ToT', |
| 345 } |
| 346 |
| 347 |
| 348 def _RunBuildStepForPerformanceTest(bisect_instance, build_string, revision): |
| 349 if revision: |
| 350 bisect_utils.OutputAnnotationStepStart('Syncing [%s]' % revision) |
| 351 if not source_control.SyncToRevision(revision, 'gclient'): |
| 352 raise RuntimeError('Failed to sync to [%s].' % revision) |
| 353 bisect_utils.OutputAnnotationStepClosed() |
| 354 |
| 355 bisect_utils.OutputAnnotationStepStart(build_string) |
| 356 |
| 357 if bisect_utils.RunGClient(['runhooks']): |
| 358 raise RuntimeError('Failed to run gclient runhooks') |
| 359 |
| 360 if not bisect_instance.ObtainBuild('chromium'): |
| 361 raise RuntimeError('Patched version failed to build.') |
| 362 |
| 363 bisect_utils.OutputAnnotationStepClosed() |
| 364 |
| 365 |
| 366 def _RunCommandStepForPerformanceTest(bisect_instance, |
| 367 opts, |
| 368 reset_on_first_run, |
| 369 upload_on_last_run, |
| 370 results_label, |
| 371 run_string): |
| 372 bisect_utils.OutputAnnotationStepStart(run_string) |
| 373 |
| 374 results = bisect_instance.RunPerformanceTestAndParseResults( |
| 375 opts.command, |
| 376 opts.metric, |
| 377 reset_on_first_run=reset_on_first_run, |
| 378 upload_on_last_run=upload_on_last_run, |
| 379 results_label=results_label) |
| 380 |
| 381 if results[1]: |
| 382 raise RuntimeError('Patched version failed to run performance test.') |
| 383 |
| 384 bisect_utils.OutputAnnotationStepClosed() |
| 385 |
| 386 return results |
| 387 |
| 388 |
| 389 def _RunPerformanceTest(config): |
| 390 """Runs a performance test with and without the current patch. |
| 391 |
| 392 Args: |
| 393 config: Contents of the config file, a dictionary. |
| 394 |
| 395 Attempts to build and run the current revision with and without the |
| 396 current patch, with the parameters passed in. |
| 397 """ |
| 398 # Bisect script expects to be run from the src directory |
| 399 os.chdir(SRC_DIR) |
| 400 |
| 401 opts = _CreateBisectOptionsFromConfig(config) |
| 402 revisions = _ResolveRevisionsFromConfig(config) |
| 403 annotations_dict = _GetStepAnnotationStringsDict(config) |
| 404 b = bisect_perf_regression.BisectPerformanceMetrics(opts, os.getcwd()) |
| 405 |
| 406 _RunBuildStepForPerformanceTest(b, annotations_dict['build1'], revisions[0]) |
| 407 |
| 408 results_with_patch = _RunCommandStepForPerformanceTest( |
| 409 b, opts, True, True, annotations_dict['results_label1'], |
| 410 annotations_dict['run1']) |
| 411 |
| 412 bisect_utils.OutputAnnotationStepStart('Reverting Patch') |
| 413 # TODO: When this is re-written to recipes, this should use bot_update's |
| 414 # revert mechanism to fully revert the client. But for now, since we know that |
| 415 # the perf try bot currently only supports src/ and src/third_party/WebKit, we |
| 416 # simply reset those two directories. |
| 417 bisect_utils.CheckRunGit(['reset', '--hard']) |
| 418 bisect_utils.CheckRunGit(['reset', '--hard'], |
| 419 os.path.join('third_party', 'WebKit')) |
| 420 bisect_utils.OutputAnnotationStepClosed() |
| 421 |
| 422 _RunBuildStepForPerformanceTest(b, annotations_dict['build2'], revisions[1]) |
| 423 |
| 424 results_without_patch = _RunCommandStepForPerformanceTest( |
| 425 b, opts, False, True, annotations_dict['results_label2'], |
| 426 annotations_dict['run1']) |
| 427 |
| 428 # Find the link to the cloud stored results file. |
| 429 _ParseAndOutputCloudLinks(results_without_patch, results_with_patch) |
| 430 |
| 431 |
374 def _SetupAndRunPerformanceTest(config, path_to_goma): | 432 def _SetupAndRunPerformanceTest(config, path_to_goma): |
375 """Attempts to build and run the current revision with and without the | 433 """Attempts to build and run the current revision with and without the |
376 current patch, with the parameters passed in. | 434 current patch, with the parameters passed in. |
377 | 435 |
378 Args: | 436 Args: |
379 config: The config read from run-perf-test.cfg. | 437 config: The config read from run-perf-test.cfg. |
380 path_to_goma: Path to goma directory. | 438 path_to_goma: Path to goma directory. |
381 | 439 |
382 Returns: | 440 Returns: |
383 An exit code: 0 on success, otherwise 1. | 441 An exit code: 0 on success, otherwise 1. |
(...skipping 193 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
577 if config and config_is_valid: | 635 if config and config_is_valid: |
578 return _SetupAndRunPerformanceTest(config, opts.path_to_goma) | 636 return _SetupAndRunPerformanceTest(config, opts.path_to_goma) |
579 | 637 |
580 print ('Error: Could not load config file. Double check your changes to ' | 638 print ('Error: Could not load config file. Double check your changes to ' |
581 'auto_bisect/bisect.cfg or run-perf-test.cfg for syntax errors.\n') | 639 'auto_bisect/bisect.cfg or run-perf-test.cfg for syntax errors.\n') |
582 return 1 | 640 return 1 |
583 | 641 |
584 | 642 |
585 if __name__ == '__main__': | 643 if __name__ == '__main__': |
586 sys.exit(main()) | 644 sys.exit(main()) |
OLD | NEW |