Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(91)

Side by Side Diff: tools/perf/core/perf_data_generator.py

Issue 2822723002: [Telemetry]Do not run power.idle_platform for reference build runs (Closed)
Patch Set: [Telemetry]Do not run power.idle_platform for reference build runs Created 3 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright 2016 The Chromium Authors. All rights reserved. 2 # Copyright 2016 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """Script to generate chromium.perf.json and chromium.perf.fyi.json in 6 """Script to generate chromium.perf.json and chromium.perf.fyi.json in
7 the src/testing/buildbot directory and benchmark.csv in the src/tools/perf 7 the src/testing/buildbot directory and benchmark.csv in the src/tools/perf
8 directory. Maintaining these files by hand is too unwieldy. 8 directory. Maintaining these files by hand is too unwieldy.
9 """ 9 """
10 import argparse 10 import argparse
(...skipping 563 matching lines...) Expand 10 before | Expand all | Expand 10 after
574 def generate_cplusplus_isolate_script_test(dimension): 574 def generate_cplusplus_isolate_script_test(dimension):
575 return [ 575 return [
576 generate_isolate_script_entry( 576 generate_isolate_script_entry(
577 [get_swarming_dimension(dimension, shard)], [], name, name, 577 [get_swarming_dimension(dimension, shard)], [], name, name,
578 ignore_task_failure=False) 578 ignore_task_failure=False)
579 for name, shard in dimension['perf_tests'] 579 for name, shard in dimension['perf_tests']
580 ] 580 ]
581 581
582 582
583 def generate_telemetry_tests( 583 def generate_telemetry_tests(
584 tester_config, benchmarks, benchmark_sharding_map, use_whitelist): 584 tester_config, benchmarks, benchmark_sharding_map, use_whitelist,
585 benchmark_ref_build_blacklist=None):
nednguyen 2017/04/14 20:45:59 remove the default param here
rnephew (Reviews Here) 2017/04/14 21:40:17 Done.
585 isolated_scripts = [] 586 isolated_scripts = []
586 # First determine the browser that you need based on the tester 587 # First determine the browser that you need based on the tester
587 browser_name = '' 588 browser_name = ''
588 if tester_config['platform'] == 'android': 589 if tester_config['platform'] == 'android':
589 browser_name = 'android-chromium' 590 browser_name = 'android-chromium'
590 elif (tester_config['platform'] == 'win' 591 elif (tester_config['platform'] == 'win'
591 and tester_config['target_bits'] == 64): 592 and tester_config['target_bits'] == 64):
592 browser_name = 'release_x64' 593 browser_name = 'release_x64'
593 else: 594 else:
594 browser_name ='release' 595 browser_name ='release'
(...skipping 22 matching lines...) Expand all
617 if device_affinity is None: 618 if device_affinity is None:
618 raise Exception('Device affinity for benchmark %s not found' 619 raise Exception('Device affinity for benchmark %s not found'
619 % benchmark.Name()) 620 % benchmark.Name())
620 swarming_dimensions.append( 621 swarming_dimensions.append(
621 get_swarming_dimension(dimension, device_affinity)) 622 get_swarming_dimension(dimension, device_affinity))
622 623
623 test = generate_telemetry_test( 624 test = generate_telemetry_test(
624 swarming_dimensions, benchmark.Name(), browser_name) 625 swarming_dimensions, benchmark.Name(), browser_name)
625 isolated_scripts.append(test) 626 isolated_scripts.append(test)
626 # Now create another executable for this benchmark on the reference browser 627 # Now create another executable for this benchmark on the reference browser
627 reference_test = generate_telemetry_test( 628 # if it is not blacklisted from running on the reference browser.
628 swarming_dimensions, benchmark.Name(),'reference') 629 if benchmark.Name() not in benchmark_ref_build_blacklist:
629 isolated_scripts.append(reference_test) 630 reference_test = generate_telemetry_test(
630 if current_shard == (num_shards - 1): 631 swarming_dimensions, benchmark.Name(),'reference')
631 current_shard = 0 632 isolated_scripts.append(reference_test)
632 else: 633 if current_shard == (num_shards - 1):
633 current_shard += 1 634 current_shard = 0
635 else:
636 current_shard += 1
634 637
635 return isolated_scripts 638 return isolated_scripts
636 639
637 640
638 BENCHMARK_NAME_WHITELIST = set([ 641 BENCHMARK_NAME_WHITELIST = set([
639 u'smoothness.top_25_smooth', 642 u'smoothness.top_25_smooth',
640 u'sunspider', 643 u'sunspider',
641 u'system_health.webview_startup', 644 u'system_health.webview_startup',
642 u'page_cycler_v2.intl_hi_ru', 645 u'page_cycler_v2.intl_hi_ru',
643 u'dromaeo.cssqueryjquery', 646 u'dromaeo.cssqueryjquery',
(...skipping 17 matching lines...) Expand all
661 } 664 }
662 665
663 # Certain swarming bots are not sharding correctly with the new device affinity 666 # Certain swarming bots are not sharding correctly with the new device affinity
664 # algorithm. Reverting to legacy algorithm to try and get them to complete. 667 # algorithm. Reverting to legacy algorithm to try and get them to complete.
665 # See crbug.com/670284 668 # See crbug.com/670284
666 LEGACY_DEVICE_AFFIINITY_ALGORITHM = [ 669 LEGACY_DEVICE_AFFIINITY_ALGORITHM = [
667 'Win Zenbook Perf', 670 'Win Zenbook Perf',
668 'Win 10 High-DPI Perf', 671 'Win 10 High-DPI Perf',
669 ] 672 ]
670 673
674 # List of benchmarks that are to never be run with reference builds.
675 BENCHMARK_REF_BUILD_BLACKLIST = [
676 'power.idle_platform',
677 ]
678
679
671 def current_benchmarks(use_whitelist): 680 def current_benchmarks(use_whitelist):
672 benchmarks_dir = os.path.join(src_dir(), 'tools', 'perf', 'benchmarks') 681 benchmarks_dir = os.path.join(src_dir(), 'tools', 'perf', 'benchmarks')
673 top_level_dir = os.path.dirname(benchmarks_dir) 682 top_level_dir = os.path.dirname(benchmarks_dir)
674 683
675 all_benchmarks = discover.DiscoverClasses( 684 all_benchmarks = discover.DiscoverClasses(
676 benchmarks_dir, top_level_dir, benchmark_module.Benchmark, 685 benchmarks_dir, top_level_dir, benchmark_module.Benchmark,
677 index_by_class_name=True).values() 686 index_by_class_name=True).values()
678 # Remove all blacklisted benchmarks 687 # Remove all blacklisted benchmarks
679 for blacklisted in BENCHMARK_NAME_BLACKLIST: 688 for blacklisted in BENCHMARK_NAME_BLACKLIST:
680 for benchmark in all_benchmarks: 689 for benchmark in all_benchmarks:
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after
761 if config.get('swarming', False): 770 if config.get('swarming', False):
762 # Our current configuration only ever has one set of swarming dimensions 771 # Our current configuration only ever has one set of swarming dimensions
763 # Make sure this still holds true 772 # Make sure this still holds true
764 if len(config['swarming_dimensions']) > 1: 773 if len(config['swarming_dimensions']) > 1:
765 raise Exception('Invalid assumption on number of swarming dimensions') 774 raise Exception('Invalid assumption on number of swarming dimensions')
766 # Generate benchmarks 775 # Generate benchmarks
767 sharding_map = benchmark_sharding_map 776 sharding_map = benchmark_sharding_map
768 if name in LEGACY_DEVICE_AFFIINITY_ALGORITHM: 777 if name in LEGACY_DEVICE_AFFIINITY_ALGORITHM:
769 sharding_map = None 778 sharding_map = None
770 isolated_scripts = generate_telemetry_tests( 779 isolated_scripts = generate_telemetry_tests(
771 config, benchmark_list, sharding_map, use_whitelist) 780 config, benchmark_list, sharding_map, use_whitelist,
781 benchmark_ref_build_blacklist=BENCHMARK_REF_BUILD_BLACKLIST)
772 # Generate swarmed non-telemetry tests if present 782 # Generate swarmed non-telemetry tests if present
773 if config['swarming_dimensions'][0].get('perf_tests', False): 783 if config['swarming_dimensions'][0].get('perf_tests', False):
774 isolated_scripts += generate_cplusplus_isolate_script_test( 784 isolated_scripts += generate_cplusplus_isolate_script_test(
775 config['swarming_dimensions'][0]) 785 config['swarming_dimensions'][0])
776 tests[name] = { 786 tests[name] = {
777 'isolated_scripts': sorted(isolated_scripts, key=lambda x: x['name']) 787 'isolated_scripts': sorted(isolated_scripts, key=lambda x: x['name'])
778 } 788 }
779 else: 789 else:
780 # scripts are only currently run in addition to the main waterfall. They 790 # scripts are only currently run in addition to the main waterfall. They
781 # are currently the only thing generated in the perf json file. 791 # are currently the only thing generated in the perf json file.
(...skipping 184 matching lines...) Expand 10 before | Expand all | Expand 10 after
966 return 0 976 return 0
967 else: 977 else:
968 print ('The perf JSON config files are not up-to-date. Please run %s ' 978 print ('The perf JSON config files are not up-to-date. Please run %s '
969 'without --validate-only flag to update the perf JSON ' 979 'without --validate-only flag to update the perf JSON '
970 'configs and benchmark.csv.') % sys.argv[0] 980 'configs and benchmark.csv.') % sys.argv[0]
971 return 1 981 return 1
972 else: 982 else:
973 update_all_tests([fyi_waterfall, waterfall]) 983 update_all_tests([fyi_waterfall, waterfall])
974 update_benchmark_csv() 984 update_benchmark_csv()
975 return 0 985 return 0
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698