Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(319)

Side by Side Diff: tools/perf/core/perf_data_generator.py

Issue 2842623002: Remove whitelist from generate_perf_data (Closed)
Patch Set: Created 3 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | tools/perf/core/perf_data_generator_unittest.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright 2016 The Chromium Authors. All rights reserved. 2 # Copyright 2016 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """Script to generate chromium.perf.json and chromium.perf.fyi.json in 6 """Script to generate chromium.perf.json and chromium.perf.fyi.json in
7 the src/testing/buildbot directory and benchmark.csv in the src/tools/perf 7 the src/testing/buildbot directory and benchmark.csv in the src/tools/perf
8 directory. Maintaining these files by hand is too unwieldy. 8 directory. Maintaining these files by hand is too unwieldy.
9 """ 9 """
10 import argparse 10 import argparse
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after
115 115
116 116
117 def add_builder(waterfall, name, additional_compile_targets=None): 117 def add_builder(waterfall, name, additional_compile_targets=None):
118 waterfall['builders'][name] = added = {} 118 waterfall['builders'][name] = added = {}
119 if additional_compile_targets: 119 if additional_compile_targets:
120 added['additional_compile_targets'] = additional_compile_targets 120 added['additional_compile_targets'] = additional_compile_targets
121 121
122 return waterfall 122 return waterfall
123 123
124 def add_tester(waterfall, name, perf_id, platform, target_bits=64, 124 def add_tester(waterfall, name, perf_id, platform, target_bits=64,
125 num_host_shards=1, num_device_shards=1, swarming=None, 125 num_host_shards=1, num_device_shards=1, swarming=None):
126 use_whitelist=False):
127 del perf_id # this will be needed 126 del perf_id # this will be needed
128 waterfall['testers'][name] = { 127 waterfall['testers'][name] = {
129 'platform': platform, 128 'platform': platform,
130 'num_device_shards': num_device_shards, 129 'num_device_shards': num_device_shards,
131 'num_host_shards': num_host_shards, 130 'num_host_shards': num_host_shards,
132 'target_bits': target_bits, 131 'target_bits': target_bits,
133 'use_whitelist': use_whitelist
134 } 132 }
135 133
136 if swarming: 134 if swarming:
137 waterfall['testers'][name]['swarming_dimensions'] = swarming 135 waterfall['testers'][name]['swarming_dimensions'] = swarming
138 waterfall['testers'][name]['swarming'] = True 136 waterfall['testers'][name]['swarming'] = True
139 137
140 return waterfall 138 return waterfall
141 139
142 140
143 def get_fyi_waterfall_config(): 141 def get_fyi_waterfall_config():
(...skipping 449 matching lines...) Expand 10 before | Expand all | Expand 10 after
593 def generate_cplusplus_isolate_script_test(dimension): 591 def generate_cplusplus_isolate_script_test(dimension):
594 return [ 592 return [
595 generate_isolate_script_entry( 593 generate_isolate_script_entry(
596 [get_swarming_dimension(dimension, shard)], [], name, name, 594 [get_swarming_dimension(dimension, shard)], [], name, name,
597 ignore_task_failure=False) 595 ignore_task_failure=False)
598 for name, shard in dimension['perf_tests'] 596 for name, shard in dimension['perf_tests']
599 ] 597 ]
600 598
601 599
602 def generate_telemetry_tests(tester_config, benchmarks, benchmark_sharding_map, 600 def generate_telemetry_tests(tester_config, benchmarks, benchmark_sharding_map,
603 use_whitelist, benchmark_ref_build_blacklist): 601 benchmark_ref_build_blacklist):
604 isolated_scripts = [] 602 isolated_scripts = []
605 # First determine the browser that you need based on the tester 603 # First determine the browser that you need based on the tester
606 browser_name = '' 604 browser_name = ''
607 if tester_config['platform'] == 'android': 605 if tester_config['platform'] == 'android':
608 browser_name = 'android-chromium' 606 browser_name = 'android-chromium'
609 elif (tester_config['platform'] == 'win' 607 elif (tester_config['platform'] == 'win'
610 and tester_config['target_bits'] == 64): 608 and tester_config['target_bits'] == 64):
611 browser_name = 'release_x64' 609 browser_name = 'release_x64'
612 else: 610 else:
613 browser_name ='release' 611 browser_name ='release'
614 612
615 num_shards = len(tester_config['swarming_dimensions'][0]['device_ids']) 613 num_shards = len(tester_config['swarming_dimensions'][0]['device_ids'])
616 current_shard = 0 614 current_shard = 0
617 for benchmark in benchmarks: 615 for benchmark in benchmarks:
618 # First figure out swarming dimensions this test needs to be triggered on. 616 # First figure out swarming dimensions this test needs to be triggered on.
619 # For each set of dimensions it is only triggered on one of the devices 617 # For each set of dimensions it is only triggered on one of the devices
620 swarming_dimensions = [] 618 swarming_dimensions = []
621 for dimension in tester_config['swarming_dimensions']: 619 for dimension in tester_config['swarming_dimensions']:
622 device_affinity = None 620 device_affinity = None
623 if benchmark_sharding_map: 621 if benchmark_sharding_map:
624 sharding_map = benchmark_sharding_map.get(str(num_shards), None) 622 sharding_map = benchmark_sharding_map.get(str(num_shards), None)
625 if not sharding_map and not use_whitelist: 623 if not sharding_map:
626 raise Exception('Invalid number of shards, generate new sharding map') 624 raise Exception('Invalid number of shards, generate new sharding map')
627 if use_whitelist: 625 device_affinity = sharding_map.get(benchmark.Name(), None)
628 device_affinity = current_shard
629 else:
630 device_affinity = sharding_map.get(benchmark.Name(), None)
631 else: 626 else:
632 # No sharding map was provided, default to legacy device 627 # No sharding map was provided, default to legacy device
633 # affinity algorithm 628 # affinity algorithm
634 device_affinity = bot_utils.GetDeviceAffinity( 629 device_affinity = bot_utils.GetDeviceAffinity(
635 num_shards, benchmark.Name()) 630 num_shards, benchmark.Name())
636 if device_affinity is None: 631 if device_affinity is None:
637 raise Exception('Device affinity for benchmark %s not found' 632 raise Exception('Device affinity for benchmark %s not found'
638 % benchmark.Name()) 633 % benchmark.Name())
639 swarming_dimensions.append( 634 swarming_dimensions.append(
640 get_swarming_dimension(dimension, device_affinity)) 635 get_swarming_dimension(dimension, device_affinity))
641 636
642 test = generate_telemetry_test( 637 test = generate_telemetry_test(
643 swarming_dimensions, benchmark.Name(), browser_name) 638 swarming_dimensions, benchmark.Name(), browser_name)
644 isolated_scripts.append(test) 639 isolated_scripts.append(test)
645 # Now create another executable for this benchmark on the reference browser 640 # Now create another executable for this benchmark on the reference browser
646 # if it is not blacklisted from running on the reference browser. 641 # if it is not blacklisted from running on the reference browser.
647 if benchmark.Name() not in benchmark_ref_build_blacklist: 642 if benchmark.Name() not in benchmark_ref_build_blacklist:
648 reference_test = generate_telemetry_test( 643 reference_test = generate_telemetry_test(
649 swarming_dimensions, benchmark.Name(),'reference') 644 swarming_dimensions, benchmark.Name(),'reference')
650 isolated_scripts.append(reference_test) 645 isolated_scripts.append(reference_test)
651 if current_shard == (num_shards - 1): 646 if current_shard == (num_shards - 1):
652 current_shard = 0 647 current_shard = 0
653 else: 648 else:
654 current_shard += 1 649 current_shard += 1
655 650
656 return isolated_scripts 651 return isolated_scripts
657 652
658 653
659 BENCHMARK_NAME_WHITELIST = set([
660 u'smoothness.top_25_smooth',
661 u'sunspider',
662 u'system_health.webview_startup',
663 u'page_cycler_v2.intl_hi_ru',
664 u'dromaeo.cssqueryjquery',
665 ])
666
667 # List of benchmarks that are to never be run on a waterfall. 654 # List of benchmarks that are to never be run on a waterfall.
668 BENCHMARK_NAME_BLACKLIST = [ 655 BENCHMARK_NAME_BLACKLIST = [
669 'multipage_skpicture_printer', 656 'multipage_skpicture_printer',
670 'multipage_skpicture_printer_ct', 657 'multipage_skpicture_printer_ct',
671 'rasterize_and_record_micro_ct', 658 'rasterize_and_record_micro_ct',
672 'repaint_ct', 659 'repaint_ct',
673 'multipage_skpicture_printer', 660 'multipage_skpicture_printer',
674 'multipage_skpicture_printer_ct', 661 'multipage_skpicture_printer_ct',
675 'skpicture_printer', 662 'skpicture_printer',
676 'skpicture_printer_ct', 663 'skpicture_printer_ct',
677 ] 664 ]
678 665
666
679 # Overrides the default 2 hour timeout for swarming tasks. 667 # Overrides the default 2 hour timeout for swarming tasks.
680 BENCHMARK_SWARMING_TIMEOUTS = { 668 BENCHMARK_SWARMING_TIMEOUTS = {
681 'loading.mobile': 14400, # 4 hours 669 'loading.mobile': 14400, # 4 hours
682 'system_health.memory_mobile': 10800, # 4 hours 670 'system_health.memory_mobile': 10800, # 4 hours
683 } 671 }
684 672
673
685 # List of benchmarks that are to never be run with reference builds. 674 # List of benchmarks that are to never be run with reference builds.
686 BENCHMARK_REF_BUILD_BLACKLIST = [ 675 BENCHMARK_REF_BUILD_BLACKLIST = [
687 'power.idle_platform', 676 'power.idle_platform',
688 ] 677 ]
689 678
690 679
691 def current_benchmarks(use_whitelist): 680
681 def current_benchmarks():
692 benchmarks_dir = os.path.join(src_dir(), 'tools', 'perf', 'benchmarks') 682 benchmarks_dir = os.path.join(src_dir(), 'tools', 'perf', 'benchmarks')
693 top_level_dir = os.path.dirname(benchmarks_dir) 683 top_level_dir = os.path.dirname(benchmarks_dir)
694 684
695 all_benchmarks = discover.DiscoverClasses( 685 all_benchmarks = discover.DiscoverClasses(
696 benchmarks_dir, top_level_dir, benchmark_module.Benchmark, 686 benchmarks_dir, top_level_dir, benchmark_module.Benchmark,
697 index_by_class_name=True).values() 687 index_by_class_name=True).values()
698 # Remove all blacklisted benchmarks 688 # Remove all blacklisted benchmarks
699 for blacklisted in BENCHMARK_NAME_BLACKLIST: 689 for blacklisted in BENCHMARK_NAME_BLACKLIST:
700 for benchmark in all_benchmarks: 690 for benchmark in all_benchmarks:
701 if benchmark.Name() == blacklisted: 691 if benchmark.Name() == blacklisted:
702 all_benchmarks.remove(benchmark) 692 all_benchmarks.remove(benchmark)
703 break 693 break
704 694
705 if use_whitelist:
706 all_benchmarks = (
707 bench for bench in all_benchmarks
708 if bench.Name() in BENCHMARK_NAME_WHITELIST)
709 return sorted(all_benchmarks, key=lambda b: b.Name()) 695 return sorted(all_benchmarks, key=lambda b: b.Name())
710 696
711 697
712 # Returns a sorted list of (benchmark, avg_runtime) pairs for every 698 # Returns a sorted list of (benchmark, avg_runtime) pairs for every
713 # benchmark in the all_benchmarks list where avg_runtime is in seconds. Also 699 # benchmark in the all_benchmarks list where avg_runtime is in seconds. Also
714 # returns a list of benchmarks whose run time have not been seen before 700 # returns a list of benchmarks whose run time have not been seen before
715 def get_sorted_benchmark_list_by_time(all_benchmarks): 701 def get_sorted_benchmark_list_by_time(all_benchmarks):
716 runtime_list = [] 702 runtime_list = []
717 benchmark_avgs = {} 703 benchmark_avgs = {}
718 new_benchmarks = [] 704 new_benchmarks = []
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
756 # device affinity algorithm 742 # device affinity algorithm
757 for benchmark in new_benchmarks: 743 for benchmark in new_benchmarks:
758 device_affinity = bot_utils.GetDeviceAffinity(num_shards, benchmark.Name()) 744 device_affinity = bot_utils.GetDeviceAffinity(num_shards, benchmark.Name())
759 benchmark_to_shard_dict[benchmark.Name()] = device_affinity 745 benchmark_to_shard_dict[benchmark.Name()] = device_affinity
760 return benchmark_to_shard_dict 746 return benchmark_to_shard_dict
761 747
762 748
763 def generate_all_tests(waterfall): 749 def generate_all_tests(waterfall):
764 tests = {} 750 tests = {}
765 751
766 all_benchmarks = current_benchmarks(False) 752 all_benchmarks = current_benchmarks()
767 whitelist_benchmarks = current_benchmarks(True)
768 # Get benchmark sharding according to common sharding configurations 753 # Get benchmark sharding according to common sharding configurations
769 # Currently we only have bots sharded 5 directions and 1 direction 754 # Currently we only have bots sharded 5 directions and 1 direction
770 benchmark_sharding_map = {} 755 benchmark_sharding_map = {}
771 benchmark_sharding_map['22'] = shard_benchmarks(22, all_benchmarks) 756 benchmark_sharding_map['22'] = shard_benchmarks(22, all_benchmarks)
772 benchmark_sharding_map['5'] = shard_benchmarks(5, all_benchmarks) 757 benchmark_sharding_map['5'] = shard_benchmarks(5, all_benchmarks)
773 benchmark_sharding_map['1'] = shard_benchmarks(1, all_benchmarks) 758 benchmark_sharding_map['1'] = shard_benchmarks(1, all_benchmarks)
774 benchmark_sharding_map['21'] = shard_benchmarks(21, all_benchmarks) 759 benchmark_sharding_map['21'] = shard_benchmarks(21, all_benchmarks)
775 760
776 for name, config in waterfall['testers'].iteritems(): 761 for name, config in waterfall['testers'].iteritems():
777 use_whitelist = config['use_whitelist']
778 benchmark_list = all_benchmarks 762 benchmark_list = all_benchmarks
779 if use_whitelist:
780 benchmark_list = whitelist_benchmarks
781 if config.get('swarming', False): 763 if config.get('swarming', False):
782 # Our current configuration only ever has one set of swarming dimensions 764 # Our current configuration only ever has one set of swarming dimensions
783 # Make sure this still holds true 765 # Make sure this still holds true
784 if len(config['swarming_dimensions']) > 1: 766 if len(config['swarming_dimensions']) > 1:
785 raise Exception('Invalid assumption on number of swarming dimensions') 767 raise Exception('Invalid assumption on number of swarming dimensions')
786 # Generate benchmarks 768 # Generate benchmarks
787 sharding_map = benchmark_sharding_map 769 sharding_map = benchmark_sharding_map
788 isolated_scripts = generate_telemetry_tests( 770 isolated_scripts = generate_telemetry_tests(
789 config, benchmark_list, sharding_map, use_whitelist, 771 config, benchmark_list, sharding_map,
790 BENCHMARK_REF_BUILD_BLACKLIST) 772 BENCHMARK_REF_BUILD_BLACKLIST)
791 # Generate swarmed non-telemetry tests if present 773 # Generate swarmed non-telemetry tests if present
792 if config['swarming_dimensions'][0].get('perf_tests', False): 774 if config['swarming_dimensions'][0].get('perf_tests', False):
793 isolated_scripts += generate_cplusplus_isolate_script_test( 775 isolated_scripts += generate_cplusplus_isolate_script_test(
794 config['swarming_dimensions'][0]) 776 config['swarming_dimensions'][0])
795 tests[name] = { 777 tests[name] = {
796 'isolated_scripts': sorted(isolated_scripts, key=lambda x: x['name']) 778 'isolated_scripts': sorted(isolated_scripts, key=lambda x: x['name'])
797 } 779 }
798 else: 780 else:
799 # scripts are only currently run in addition to the main waterfall. They 781 # scripts are only currently run in addition to the main waterfall. They
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after
884 } 866 }
885 867
886 868
887 # Returns a dictionary mapping waterfall benchmark name to benchmark owner 869 # Returns a dictionary mapping waterfall benchmark name to benchmark owner
888 # metadata 870 # metadata
889 def get_all_waterfall_benchmarks_metadata(): 871 def get_all_waterfall_benchmarks_metadata():
890 return get_all_benchmarks_metadata(NON_TELEMETRY_BENCHMARKS) 872 return get_all_benchmarks_metadata(NON_TELEMETRY_BENCHMARKS)
891 873
892 874
893 def get_all_benchmarks_metadata(metadata): 875 def get_all_benchmarks_metadata(metadata):
894 benchmark_list = current_benchmarks(False) 876 benchmark_list = current_benchmarks()
895 877
896 for benchmark in benchmark_list: 878 for benchmark in benchmark_list:
897 emails = decorators.GetEmails(benchmark) 879 emails = decorators.GetEmails(benchmark)
898 if emails: 880 if emails:
899 emails = ', '.join(emails) 881 emails = ', '.join(emails)
900 metadata[benchmark.Name()] = BenchmarkMetadata( 882 metadata[benchmark.Name()] = BenchmarkMetadata(
901 emails, decorators.GetComponent(benchmark)) 883 emails, decorators.GetComponent(benchmark))
902 return metadata 884 return metadata
903 885
904 886
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after
985 return 0 967 return 0
986 else: 968 else:
987 print ('The perf JSON config files are not up-to-date. Please run %s ' 969 print ('The perf JSON config files are not up-to-date. Please run %s '
988 'without --validate-only flag to update the perf JSON ' 970 'without --validate-only flag to update the perf JSON '
989 'configs and benchmark.csv.') % sys.argv[0] 971 'configs and benchmark.csv.') % sys.argv[0]
990 return 1 972 return 1
991 else: 973 else:
992 update_all_tests([fyi_waterfall, waterfall]) 974 update_all_tests([fyi_waterfall, waterfall])
993 update_benchmark_csv() 975 update_benchmark_csv()
994 return 0 976 return 0
OLDNEW
« no previous file with comments | « no previous file | tools/perf/core/perf_data_generator_unittest.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698