Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
| 2 # Copyright 2016 The Chromium Authors. All rights reserved. | 2 # Copyright 2016 The Chromium Authors. All rights reserved. |
| 3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
| 4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
| 5 | 5 |
| 6 """Script to generate chromium.perf.json and chromium.perf.fyi.json in | 6 """Script to generate chromium.perf.json and chromium.perf.fyi.json in |
| 7 the src/testing/buildbot directory and benchmark.csv in the src/tools/perf | 7 the src/testing/buildbot directory and benchmark.csv in the src/tools/perf |
| 8 directory. Maintaining these files by hand is too unwieldy. | 8 directory. Maintaining these files by hand is too unwieldy. |
| 9 """ | 9 """ |
| 10 import argparse | 10 import argparse |
| (...skipping 563 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 574 def generate_cplusplus_isolate_script_test(dimension): | 574 def generate_cplusplus_isolate_script_test(dimension): |
| 575 return [ | 575 return [ |
| 576 generate_isolate_script_entry( | 576 generate_isolate_script_entry( |
| 577 [get_swarming_dimension(dimension, shard)], [], name, name, | 577 [get_swarming_dimension(dimension, shard)], [], name, name, |
| 578 ignore_task_failure=False) | 578 ignore_task_failure=False) |
| 579 for name, shard in dimension['perf_tests'] | 579 for name, shard in dimension['perf_tests'] |
| 580 ] | 580 ] |
| 581 | 581 |
| 582 | 582 |
| 583 def generate_telemetry_tests( | 583 def generate_telemetry_tests( |
| 584 tester_config, benchmarks, benchmark_sharding_map, use_whitelist): | 584 tester_config, benchmarks, benchmark_sharding_map, use_whitelist): |
|
nednguyen
2017/04/14 16:32:45
can you make add a benchmark_ref_build_blacklist p
rnephew (Reviews Here)
2017/04/14 17:22:21
I get "W:590, 0: Dangerous default value BENCHMARK
| |
| 585 isolated_scripts = [] | 585 isolated_scripts = [] |
| 586 # First determine the browser that you need based on the tester | 586 # First determine the browser that you need based on the tester |
| 587 browser_name = '' | 587 browser_name = '' |
| 588 if tester_config['platform'] == 'android': | 588 if tester_config['platform'] == 'android': |
| 589 browser_name = 'android-chromium' | 589 browser_name = 'android-chromium' |
| 590 elif (tester_config['platform'] == 'win' | 590 elif (tester_config['platform'] == 'win' |
| 591 and tester_config['target_bits'] == 64): | 591 and tester_config['target_bits'] == 64): |
| 592 browser_name = 'release_x64' | 592 browser_name = 'release_x64' |
| 593 else: | 593 else: |
| 594 browser_name ='release' | 594 browser_name ='release' |
| (...skipping 22 matching lines...) Expand all Loading... | |
| 617 if device_affinity is None: | 617 if device_affinity is None: |
| 618 raise Exception('Device affinity for benchmark %s not found' | 618 raise Exception('Device affinity for benchmark %s not found' |
| 619 % benchmark.Name()) | 619 % benchmark.Name()) |
| 620 swarming_dimensions.append( | 620 swarming_dimensions.append( |
| 621 get_swarming_dimension(dimension, device_affinity)) | 621 get_swarming_dimension(dimension, device_affinity)) |
| 622 | 622 |
| 623 test = generate_telemetry_test( | 623 test = generate_telemetry_test( |
| 624 swarming_dimensions, benchmark.Name(), browser_name) | 624 swarming_dimensions, benchmark.Name(), browser_name) |
| 625 isolated_scripts.append(test) | 625 isolated_scripts.append(test) |
| 626 # Now create another executable for this benchmark on the reference browser | 626 # Now create another executable for this benchmark on the reference browser |
| 627 reference_test = generate_telemetry_test( | 627 # if it is not blacklisted from running on the reference browser. |
| 628 swarming_dimensions, benchmark.Name(),'reference') | 628 if benchmark.Name() not in BENCHMARK_REF_BUILD_BLACKLIST: |
| 629 isolated_scripts.append(reference_test) | 629 reference_test = generate_telemetry_test( |
| 630 if current_shard == (num_shards - 1): | 630 swarming_dimensions, benchmark.Name(),'reference') |
| 631 current_shard = 0 | 631 isolated_scripts.append(reference_test) |
| 632 else: | 632 if current_shard == (num_shards - 1): |
| 633 current_shard += 1 | 633 current_shard = 0 |
| 634 else: | |
| 635 current_shard += 1 | |
| 634 | 636 |
| 635 return isolated_scripts | 637 return isolated_scripts |
| 636 | 638 |
| 637 | 639 |
| 638 BENCHMARK_NAME_WHITELIST = set([ | 640 BENCHMARK_NAME_WHITELIST = set([ |
| 639 u'smoothness.top_25_smooth', | 641 u'smoothness.top_25_smooth', |
| 640 u'sunspider', | 642 u'sunspider', |
| 641 u'system_health.webview_startup', | 643 u'system_health.webview_startup', |
| 642 u'page_cycler_v2.intl_hi_ru', | 644 u'page_cycler_v2.intl_hi_ru', |
| 643 u'dromaeo.cssqueryjquery', | 645 u'dromaeo.cssqueryjquery', |
| (...skipping 17 matching lines...) Expand all Loading... | |
| 661 } | 663 } |
| 662 | 664 |
| 663 # Certain swarming bots are not sharding correctly with the new device affinity | 665 # Certain swarming bots are not sharding correctly with the new device affinity |
| 664 # algorithm. Reverting to legacy algorithm to try and get them to complete. | 666 # algorithm. Reverting to legacy algorithm to try and get them to complete. |
| 665 # See crbug.com/670284 | 667 # See crbug.com/670284 |
| 666 LEGACY_DEVICE_AFFIINITY_ALGORITHM = [ | 668 LEGACY_DEVICE_AFFIINITY_ALGORITHM = [ |
| 667 'Win Zenbook Perf', | 669 'Win Zenbook Perf', |
| 668 'Win 10 High-DPI Perf', | 670 'Win 10 High-DPI Perf', |
| 669 ] | 671 ] |
| 670 | 672 |
| 673 # List of benchmarks that are to never be run with reference builds. | |
| 674 BENCHMARK_REF_BUILD_BLACKLIST = [ | |
| 675 'power.idle_platform', | |
| 676 ] | |
| 677 | |
| 678 | |
| 671 def current_benchmarks(use_whitelist): | 679 def current_benchmarks(use_whitelist): |
| 672 benchmarks_dir = os.path.join(src_dir(), 'tools', 'perf', 'benchmarks') | 680 benchmarks_dir = os.path.join(src_dir(), 'tools', 'perf', 'benchmarks') |
| 673 top_level_dir = os.path.dirname(benchmarks_dir) | 681 top_level_dir = os.path.dirname(benchmarks_dir) |
| 674 | 682 |
| 675 all_benchmarks = discover.DiscoverClasses( | 683 all_benchmarks = discover.DiscoverClasses( |
| 676 benchmarks_dir, top_level_dir, benchmark_module.Benchmark, | 684 benchmarks_dir, top_level_dir, benchmark_module.Benchmark, |
| 677 index_by_class_name=True).values() | 685 index_by_class_name=True).values() |
| 678 # Remove all blacklisted benchmarks | 686 # Remove all blacklisted benchmarks |
| 679 for blacklisted in BENCHMARK_NAME_BLACKLIST: | 687 for blacklisted in BENCHMARK_NAME_BLACKLIST: |
| 680 for benchmark in all_benchmarks: | 688 for benchmark in all_benchmarks: |
| (...skipping 285 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 966 return 0 | 974 return 0 |
| 967 else: | 975 else: |
| 968 print ('The perf JSON config files are not up-to-date. Please run %s ' | 976 print ('The perf JSON config files are not up-to-date. Please run %s ' |
| 969 'without --validate-only flag to update the perf JSON ' | 977 'without --validate-only flag to update the perf JSON ' |
| 970 'configs and benchmark.csv.') % sys.argv[0] | 978 'configs and benchmark.csv.') % sys.argv[0] |
| 971 return 1 | 979 return 1 |
| 972 else: | 980 else: |
| 973 update_all_tests([fyi_waterfall, waterfall]) | 981 update_all_tests([fyi_waterfall, waterfall]) |
| 974 update_benchmark_csv() | 982 update_benchmark_csv() |
| 975 return 0 | 983 return 0 |
| OLD | NEW |