Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1919)

Unified Diff: tools/perf/core/perf_json_generator.py

Issue 2754883002: Generating benchmark.csv file (Closed)
Patch Set: Fixing silly Python mistake Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« tools/perf/benchmark.csv ('K') | « tools/perf/benchmark.csv ('k') | no next file » | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: tools/perf/core/perf_json_generator.py
diff --git a/tools/perf/core/perf_json_generator.py b/tools/perf/core/perf_json_generator.py
index 5e908f6f53d8be4f045c53ba7daf1b58e89ebc84..459b584336c9887c7afff93617e826b022f9f402 100755
--- a/tools/perf/core/perf_json_generator.py
+++ b/tools/perf/core/perf_json_generator.py
@@ -4,10 +4,11 @@
# found in the LICENSE file.
"""Script to generate chromium.perf.json and chromium.perf.fyi.json in
-the src/testing/buildbot directory. Maintaining these files by hand is
-too unwieldy.
+the src/testing/buildbot directory and benchmark.csv in the src/tools/perf
+directory. Maintaining these files by hand is too unwieldy.
"""
import argparse
+import csv
import json
import os
import sys
@@ -16,6 +17,7 @@ from chrome_telemetry_build import chromium_config
sys.path.append(chromium_config.GetTelemetryDir())
from telemetry import benchmark as benchmark_module
+from telemetry import decorators
from telemetry.core import discover
from telemetry.util import bot_utils
@@ -654,19 +656,21 @@ LEGACY_DEVICE_AFFIINITY_ALGORITHM = [
'Win 10 High-DPI Perf',
]
-def current_benchmarks(use_whitelist):
+def current_benchmarks(use_whitelist, use_blacklist = True):
benchmarks_dir = os.path.join(src_dir(), 'tools', 'perf', 'benchmarks')
top_level_dir = os.path.dirname(benchmarks_dir)
all_benchmarks = discover.DiscoverClasses(
benchmarks_dir, top_level_dir, benchmark_module.Benchmark,
index_by_class_name=True).values()
- # Remove all blacklisted benchmarks
- for blacklisted in BENCHMARK_NAME_BLACKLIST:
- for benchmark in all_benchmarks:
- if benchmark.Name() == blacklisted:
- all_benchmarks.remove(benchmark)
- break
+
+ if use_blacklist:
+ # Remove all blacklisted benchmarks
+ for blacklisted in BENCHMARK_NAME_BLACKLIST:
+ for benchmark in all_benchmarks:
+ if benchmark.Name() == blacklisted:
+ all_benchmarks.remove(benchmark)
+ break
if use_whitelist:
all_benchmarks = (
@@ -813,6 +817,48 @@ def src_dir():
os.path.dirname(os.path.dirname(file_path))))
+MANUAL_BENCHMARKS = [
nednguyen 2017/03/16 19:11:18 Let name this NON_TElEMETRY_BENCHMARKS. Also can y
ashleymarie1 2017/03/16 19:40:51 Done. I had never heard of namedtuple before today
+ ["angle_perftests", "jmadill@chromium.org", None],
+ ["cc_perftest", "enne@chromium.org", None],
+ ["gpu_perftests", "reveman@chromium.org", None],
+ ["tracing_perftests", "kkraynov@chromium.org, primiano@chromium.org",
+ None],
+ ["indexeddb_perf", "cmumford@chromium.org", None],
+ ["load_library_perf_tests", None, None],
+ ["media_perftests", "crouleau@chromium.org", None],
+ ["performance_browser_tests",
+ "hubbe@chromium.org, justinlin@chromium.org, miu@chromium.org", None]
+]
+
+
+def update_benchmark_csv():
+ """Updates go/chrome-benchmarks.
+
+ Updates telemetry/perf/benchmark.csv containing the current benchmark names,
+ owners, and components.
+ """
+ benchmark_list = current_benchmarks(
+ use_whitelist = False, use_blacklist = False)
+ data = MANUAL_BENCHMARKS
+ for benchmark in benchmark_list:
+ emails = decorators.GetEmails(benchmark)
+ if emails:
+ emails = ", ".join(emails)
+ data.append([
+ benchmark.Name(),
+ emails,
+ decorators.GetComponent(benchmark)
+ ])
+
+ data = sorted(data, key=lambda b: b[0])
+
+ perf_dir = os.path.join(src_dir(), 'tools', 'perf')
+ benchmark_file = os.path.join(perf_dir, 'benchmark.csv')
+ with open(benchmark_file, 'wb') as f:
+ writer = csv.writer(f)
+ writer.writerows(data)
+
+
def main(args):
parser = argparse.ArgumentParser(
description=('Generate perf test\' json config. This need to be done '
@@ -825,6 +871,8 @@ def main(args):
'configs'))
options = parser.parse_args(args)
+ update_benchmark_csv()
nednguyen 2017/03/16 19:11:18 If --validate-only flag is enabled, we should not
ashleymarie1 2017/03/16 19:40:51 Done.
+
waterfall = get_waterfall_config()
waterfall['name'] = 'chromium.perf'
fyi_waterfall = get_fyi_waterfall_config()
« tools/perf/benchmark.csv ('K') | « tools/perf/benchmark.csv ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698