Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(540)

Side by Side Diff: tools/perf/generate_perf_json.py

Issue 2621383003: Add custom swarming timeouts for chromium.perf.* tests. (Closed)
Patch Set: fix Created 3 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « testing/buildbot/chromium.perf.fyi.json ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright 2016 The Chromium Authors. All rights reserved. 2 # Copyright 2016 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """Script to generate chromium.perf.json and chromium.perf.fyi.json in 6 """Script to generate chromium.perf.json and chromium.perf.fyi.json in
7 the src/testing/buildbot directory. Maintaining these files by hand is 7 the src/testing/buildbot directory. Maintaining these files by hand is
8 too unwieldy. 8 too unwieldy.
9 """ 9 """
10 10
(...skipping 420 matching lines...) Expand 10 before | Expand all | Expand 10 after
431 ('cc_perftests', 2), 431 ('cc_perftests', 2),
432 ('load_library_perf_tests', 2), 432 ('load_library_perf_tests', 2),
433 ('tracing_perftests', 2)] 433 ('tracing_perftests', 2)]
434 } 434 }
435 ]) 435 ])
436 436
437 return waterfall 437 return waterfall
438 438
439 439
440 def generate_isolate_script_entry(swarming_dimensions, test_args, 440 def generate_isolate_script_entry(swarming_dimensions, test_args,
441 isolate_name, step_name, override_compile_targets=None): 441 isolate_name, step_name, override_compile_targets=None,
442 swarming_timeout=None):
442 result = { 443 result = {
443 'args': test_args, 444 'args': test_args,
444 'isolate_name': isolate_name, 445 'isolate_name': isolate_name,
445 'name': step_name, 446 'name': step_name,
446 } 447 }
447 if override_compile_targets: 448 if override_compile_targets:
448 result['override_compile_targets'] = override_compile_targets 449 result['override_compile_targets'] = override_compile_targets
449 if swarming_dimensions: 450 if swarming_dimensions:
450 result['swarming'] = { 451 result['swarming'] = {
451 # Always say this is true regardless of whether the tester 452 # Always say this is true regardless of whether the tester
452 # supports swarming. It doesn't hurt. 453 # supports swarming. It doesn't hurt.
453 'can_use_on_swarming_builders': True, 454 'can_use_on_swarming_builders': True,
454 'expiration': 21600, 455 'expiration': 21600,
455 'hard_timeout': 7200, 456 'hard_timeout': swarming_timeout if swarming_timeout else 7200,
456 'io_timeout': 3600, 457 'io_timeout': 3600,
457 'dimension_sets': swarming_dimensions, 458 'dimension_sets': swarming_dimensions,
458 } 459 }
459 return result 460 return result
460 461
461 462
462 def generate_telemetry_test(swarming_dimensions, benchmark_name, browser): 463 def generate_telemetry_test(swarming_dimensions, benchmark_name, browser):
463 # The step name must end in 'test' or 'tests' in order for the 464 # The step name must end in 'test' or 'tests' in order for the
464 # results to automatically show up on the flakiness dashboard. 465 # results to automatically show up on the flakiness dashboard.
465 # (At least, this was true some time ago.) Continue to use this 466 # (At least, this was true some time ago.) Continue to use this
466 # naming convention for the time being to minimize changes. 467 # naming convention for the time being to minimize changes.
467 468
468 test_args = [ 469 test_args = [
469 benchmark_name, 470 benchmark_name,
470 '-v', 471 '-v',
471 '--upload-results', 472 '--upload-results',
472 '--output-format=chartjson', 473 '--output-format=chartjson',
473 '--browser=%s' % browser 474 '--browser=%s' % browser
474 ] 475 ]
475 # When this is enabled on more than just windows machines we will need 476 # When this is enabled on more than just windows machines we will need
476 # --device=android 477 # --device=android
477 478
478 step_name = benchmark_name 479 step_name = benchmark_name
479 if browser == 'reference': 480 if browser == 'reference':
480 test_args.append('--output-trace-tag=_ref') 481 test_args.append('--output-trace-tag=_ref')
481 step_name += '.reference' 482 step_name += '.reference'
482 483
483 return generate_isolate_script_entry( 484 return generate_isolate_script_entry(
484 swarming_dimensions, test_args, 'telemetry_perf_tests', 485 swarming_dimensions, test_args, 'telemetry_perf_tests',
485 step_name, ['telemetry_perf_tests']) 486 step_name, ['telemetry_perf_tests'],
487 swarming_timeout=BENCHMARK_SWARMING_TIMEOUTS.get(benchmark_name))
486 488
487 489
488 def script_test_enabled_on_tester(master, test, tester_name, shard): 490 def script_test_enabled_on_tester(master, test, tester_name, shard):
489 for enabled_tester in test['testers'].get(master, []): 491 for enabled_tester in test['testers'].get(master, []):
490 if enabled_tester['name'] == tester_name: 492 if enabled_tester['name'] == tester_name:
491 if shard in enabled_tester['shards']: 493 if shard in enabled_tester['shards']:
492 return True 494 return True
493 return False 495 return False
494 496
495 497
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after
595 'multipage_skpicture_printer', 597 'multipage_skpicture_printer',
596 'multipage_skpicture_printer_ct', 598 'multipage_skpicture_printer_ct',
597 'rasterize_and_record_micro_ct', 599 'rasterize_and_record_micro_ct',
598 'repaint_ct', 600 'repaint_ct',
599 'multipage_skpicture_printer', 601 'multipage_skpicture_printer',
600 'multipage_skpicture_printer_ct', 602 'multipage_skpicture_printer_ct',
601 'skpicture_printer', 603 'skpicture_printer',
602 'skpicture_printer_ct', 604 'skpicture_printer_ct',
603 ] 605 ]
604 606
607 # Overrides the default 2 hour timeout for swarming tasks.
608 BENCHMARK_SWARMING_TIMEOUTS = {
609 'loading.mobile': 14400,
610 }
611
605 # Certain swarming bots are not sharding correctly with the new device affinity 612 # Certain swarming bots are not sharding correctly with the new device affinity
606 # algorithm. Reverting to legacy algorithm to try and get them to complete. 613 # algorithm. Reverting to legacy algorithm to try and get them to complete.
607 # See crbug.com/670284 614 # See crbug.com/670284
608 LEGACY_DEVICE_AFFIINITY_ALGORITHM = [ 615 LEGACY_DEVICE_AFFIINITY_ALGORITHM = [
609 'Win Zenbook Perf', 616 'Win Zenbook Perf',
610 'Win 10 High-DPI Perf', 617 'Win 10 High-DPI Perf',
611 ] 618 ]
612 619
613 def current_benchmarks(use_whitelist): 620 def current_benchmarks(use_whitelist):
614 benchmarks_dir = os.path.join(src_dir(), 'tools', 'perf', 'benchmarks') 621 benchmarks_dir = os.path.join(src_dir(), 'tools', 'perf', 'benchmarks')
(...skipping 135 matching lines...) Expand 10 before | Expand all | Expand 10 after
750 waterfall['name'] = 'chromium.perf' 757 waterfall['name'] = 'chromium.perf'
751 fyi_waterfall = get_fyi_waterfall_config() 758 fyi_waterfall = get_fyi_waterfall_config()
752 fyi_waterfall['name'] = 'chromium.perf.fyi' 759 fyi_waterfall['name'] = 'chromium.perf.fyi'
753 760
754 generate_all_tests(fyi_waterfall) 761 generate_all_tests(fyi_waterfall)
755 generate_all_tests(waterfall) 762 generate_all_tests(waterfall)
756 return 0 763 return 0
757 764
758 if __name__ == '__main__': 765 if __name__ == '__main__':
759 sys.exit(main()) 766 sys.exit(main())
OLDNEW
« no previous file with comments | « testing/buildbot/chromium.perf.fyi.json ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698