Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(102)

Unified Diff: tools/perf/core/perf_json_generator.py

Issue 2754883002: Generating benchmark.csv file (Closed)
Patch Set: Generating benchmark.csv file Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« tools/perf/benchmark.csv ('K') | « tools/perf/benchmark.csv ('k') | no next file » | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: tools/perf/core/perf_json_generator.py
diff --git a/tools/perf/core/perf_json_generator.py b/tools/perf/core/perf_json_generator.py
index 5e908f6f53d8be4f045c53ba7daf1b58e89ebc84..2540089efa42f1145ecc295afe80d49a4dcf9716 100755
--- a/tools/perf/core/perf_json_generator.py
+++ b/tools/perf/core/perf_json_generator.py
@@ -4,10 +4,12 @@
# found in the LICENSE file.
"""Script to generate chromium.perf.json and chromium.perf.fyi.json in
-the src/testing/buildbot directory. Maintaining these files by hand is
-too unwieldy.
+the src/testing/buildbot directory and benchmark.csv in the src/tools/perf
+directory. Maintaining these files by hand is too unwieldy.
"""
import argparse
+import collections
+import csv
import json
import os
import sys
@@ -16,6 +18,7 @@ from chrome_telemetry_build import chromium_config
sys.path.append(chromium_config.GetTelemetryDir())
from telemetry import benchmark as benchmark_module
+from telemetry import decorators
from telemetry.core import discover
from telemetry.util import bot_utils
@@ -654,19 +657,21 @@ LEGACY_DEVICE_AFFIINITY_ALGORITHM = [
'Win 10 High-DPI Perf',
]
-def current_benchmarks(use_whitelist):
+def current_benchmarks(use_whitelist, use_blacklist = True):
benchmarks_dir = os.path.join(src_dir(), 'tools', 'perf', 'benchmarks')
top_level_dir = os.path.dirname(benchmarks_dir)
all_benchmarks = discover.DiscoverClasses(
benchmarks_dir, top_level_dir, benchmark_module.Benchmark,
index_by_class_name=True).values()
- # Remove all blacklisted benchmarks
- for blacklisted in BENCHMARK_NAME_BLACKLIST:
- for benchmark in all_benchmarks:
- if benchmark.Name() == blacklisted:
- all_benchmarks.remove(benchmark)
- break
+
+ if use_blacklist:
+ # Remove all blacklisted benchmarks
+ for blacklisted in BENCHMARK_NAME_BLACKLIST:
+ for benchmark in all_benchmarks:
+ if benchmark.Name() == blacklisted:
+ all_benchmarks.remove(benchmark)
+ break
if use_whitelist:
all_benchmarks = (
@@ -813,11 +818,66 @@ def src_dir():
os.path.dirname(os.path.dirname(file_path))))
+BenchmarkMetadata = collections.namedtuple(
+ 'BenchmarkMetadata', 'emails component')
+NON_TELEMETRY_BENCHMARKS = {
nednguyen 2017/03/16 21:08:17 My concern here is that there is no way to make su
ashleymarie1 2017/03/16 21:18:02 I, too, am concerned about that. I'm having troubl
nednguyen 2017/03/16 21:23:43 One idea I have is make a method that return a map
ashleymarie1 2017/03/17 15:30:19 So this solves the problem of a user adding a test
+ "angle_perftests": BenchmarkMetadata("jmadill@chromium.org", None),
+ "cc_perftest": BenchmarkMetadata("enne@chromium.org", None),
+ "gpu_perftests": BenchmarkMetadata("reveman@chromium.org", None),
+ "tracing_perftests": BenchmarkMetadata(
+ "kkraynov@chromium.org, primiano@chromium.org", None),
+ "indexeddb_perf": BenchmarkMetadata("cmumford@chromium.org", None),
+ "load_library_perf_tests": BenchmarkMetadata(None, None),
+ "media_perftests": BenchmarkMetadata("crouleau@chromium.org", None),
+ "performance_browser_tests": BenchmarkMetadata(
+ "hubbe@chromium.org, justinlin@chromium.org, miu@chromium.org", None)
+}
+
+
+def update_benchmark_csv():
+ """Updates go/chrome-benchmarks.
+
+ Updates telemetry/perf/benchmark.csv containing the current benchmark names,
+ owners, and components.
+ """
+ benchmark_list = current_benchmarks(
+ use_whitelist = False, use_blacklist = False)
+
+ header_data = [['AUTOGENERATED FILE DO NOT EDIT'],
+ ['See //tools/perf/generate_perf_data.py to make changes'],
+ ['Benchmark name', 'Individual owners', 'Component']
+ ]
+ data = []
+ for benchmark_name in NON_TELEMETRY_BENCHMARKS:
+ data.append([
+ benchmark_name,
+ NON_TELEMETRY_BENCHMARKS[benchmark_name].emails,
+ NON_TELEMETRY_BENCHMARKS[benchmark_name].component
+ ])
+ for benchmark in benchmark_list:
+ emails = decorators.GetEmails(benchmark)
+ if emails:
+ emails = ", ".join(emails)
+ data.append([
+ benchmark.Name(),
+ emails,
+ decorators.GetComponent(benchmark)
+ ])
+
+ data = sorted(data, key=lambda b: b[0])
+ data = header_data + data
+ perf_dir = os.path.join(src_dir(), 'tools', 'perf')
+ benchmark_file = os.path.join(perf_dir, 'benchmark.csv')
+ with open(benchmark_file, 'wb') as f:
+ writer = csv.writer(f)
+ writer.writerows(data)
+
+
def main(args):
parser = argparse.ArgumentParser(
- description=('Generate perf test\' json config. This need to be done '
- 'anytime you add/remove any existing benchmarks in '
- 'tools/perf/benchmarks.'))
+ description=('Generate perf test\' json config and benchmark.csv. '
+ 'This needs to be done anytime you add/remove any existing'
+ 'benchmarks in tools/perf/benchmarks.'))
parser.add_argument(
'--validate-only', action='store_true', default=False,
help=('Validate whether the perf json generated will be the same as the '
@@ -842,4 +902,5 @@ def main(args):
else:
update_all_tests(fyi_waterfall)
update_all_tests(waterfall)
+ update_benchmark_csv()
return 0
« tools/perf/benchmark.csv ('K') | « tools/perf/benchmark.csv ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698