Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(237)

Side by Side Diff: tools/perf/core/perf_data_generator.py

Issue 2876073003: Migrate cluster telemetry benchmarks to tools/perf/contrib/cluster_telemetry/ (Closed)
Patch Set: Remove BENCHMARKS_BLACK_LIST Created 3 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright 2016 The Chromium Authors. All rights reserved. 2 # Copyright 2016 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 # pylint: disable=too-many-lines 6 # pylint: disable=too-many-lines
7 7
8 """Script to generate chromium.perf.json and chromium.perf.fyi.json in 8 """Script to generate chromium.perf.json and chromium.perf.fyi.json in
9 the src/testing/buildbot directory and benchmark.csv in the src/tools/perf 9 the src/testing/buildbot directory and benchmark.csv in the src/tools/perf
10 directory. Maintaining these files by hand is too unwieldy. 10 directory. Maintaining these files by hand is too unwieldy.
(...skipping 680 matching lines...) Expand 10 before | Expand all | Expand 10 after
691 swarming_dimensions, benchmark.Name(),'reference') 691 swarming_dimensions, benchmark.Name(),'reference')
692 isolated_scripts.append(reference_test) 692 isolated_scripts.append(reference_test)
693 if current_shard == (num_shards - 1): 693 if current_shard == (num_shards - 1):
694 current_shard = 0 694 current_shard = 0
695 else: 695 else:
696 current_shard += 1 696 current_shard += 1
697 697
698 return isolated_scripts 698 return isolated_scripts
699 699
700 700
701 # List of benchmarks that are to never be run on a waterfall.
702 BENCHMARK_NAME_BLACKLIST = [
703 'multipage_skpicture_printer',
704 'multipage_skpicture_printer_ct',
705 'rasterize_and_record_micro_ct',
706 'repaint_ct',
707 'multipage_skpicture_printer',
708 'multipage_skpicture_printer_ct',
709 'skpicture_printer',
710 'skpicture_printer_ct',
711 ]
712
713
714 # Overrides the default 2 hour timeout for swarming tasks. 701 # Overrides the default 2 hour timeout for swarming tasks.
715 BENCHMARK_SWARMING_TIMEOUTS = { 702 BENCHMARK_SWARMING_TIMEOUTS = {
716 'loading.mobile': 14400, # 4 hours 703 'loading.mobile': 14400, # 4 hours
717 'system_health.memory_mobile': 10800, # 4 hours 704 'system_health.memory_mobile': 10800, # 4 hours
718 } 705 }
719 706
720 707
721 # List of benchmarks that are to never be run with reference builds. 708 # List of benchmarks that are to never be run with reference builds.
722 BENCHMARK_REF_BUILD_BLACKLIST = [ 709 BENCHMARK_REF_BUILD_BLACKLIST = [
723 'power.idle_platform', 710 'power.idle_platform',
724 ] 711 ]
725 712
726 713
727 714
728 def current_benchmarks(): 715 def current_benchmarks():
729 benchmarks_dir = os.path.join( 716 benchmarks_dir = os.path.join(
730 path_util.GetChromiumSrcDir(), 'tools', 'perf', 'benchmarks') 717 path_util.GetChromiumSrcDir(), 'tools', 'perf', 'benchmarks')
731 top_level_dir = os.path.dirname(benchmarks_dir) 718 top_level_dir = os.path.dirname(benchmarks_dir)
732 719
733 all_benchmarks = discover.DiscoverClasses( 720 all_benchmarks = discover.DiscoverClasses(
734 benchmarks_dir, top_level_dir, benchmark_module.Benchmark, 721 benchmarks_dir, top_level_dir, benchmark_module.Benchmark,
735 index_by_class_name=True).values() 722 index_by_class_name=True).values()
736 # Remove all blacklisted benchmarks
737 for blacklisted in BENCHMARK_NAME_BLACKLIST:
738 for benchmark in all_benchmarks:
739 if benchmark.Name() == blacklisted:
740 all_benchmarks.remove(benchmark)
741 break
742 723
743 return sorted(all_benchmarks, key=lambda b: b.Name()) 724 return sorted(all_benchmarks, key=lambda b: b.Name())
744 725
745 726
746 def generate_all_tests(waterfall): 727 def generate_all_tests(waterfall):
747 tests = {} 728 tests = {}
748 729
749 all_benchmarks = current_benchmarks() 730 all_benchmarks = current_benchmarks()
750 benchmark_sharding_map = load_benchmark_sharding_map() 731 benchmark_sharding_map = load_benchmark_sharding_map()
751 732
(...skipping 241 matching lines...) Expand 10 before | Expand all | Expand 10 after
993 return 0 974 return 0
994 else: 975 else:
995 print ('The perf JSON config files are not up-to-date. Please run %s ' 976 print ('The perf JSON config files are not up-to-date. Please run %s '
996 'without --validate-only flag to update the perf JSON ' 977 'without --validate-only flag to update the perf JSON '
997 'configs and benchmark.csv.') % sys.argv[0] 978 'configs and benchmark.csv.') % sys.argv[0]
998 return 1 979 return 1
999 else: 980 else:
1000 update_all_tests([fyi_waterfall, waterfall]) 981 update_all_tests([fyi_waterfall, waterfall])
1001 update_benchmark_csv() 982 update_benchmark_csv()
1002 return 0 983 return 0
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698