Chromium Code Reviews| Index: tools/run-benchmarks.py |
| diff --git a/tools/run-benchmarks.py b/tools/run-benchmarks.py |
| new file mode 100755 |
| index 0000000000000000000000000000000000000000..522f42284d77131b3a8a996ec5b92293de75bf83 |
| --- /dev/null |
| +++ b/tools/run-benchmarks.py |
| @@ -0,0 +1,247 @@ |
| +#!/usr/bin/env python |
| +# Copyright 2014 the V8 project authors. All rights reserved. |
| +# Use of this source code is governed by a BSD-style license that can be |
| +# found in the LICENSE file. |
| + |
| +""" |
| +Performance runner for d8. |
| + |
| +Call e.g. with tools/run-benchmarks.py --arch ia32 some_suite.json |
| + |
| +TODO: units - score vs ms. |
| + |
| +The suite json format is expected to be: |
| +{ |
| + "path": <relative path to benchmark resources and main file>, |
| + "name": <optional suite name, file name is default>, |
| + "archs": [<architecture name for which this suite is run>, ...], |
| + "flags": [<flag to d8>, ...], |
| + "run_count": <how often will this suite run (optional)>, |
| + "run_count_XXX": <how often will this suite run for arch XXX (optional)>, |
| + "resources": [<js file to be loaded before main>, ...] |
| + "main": <main js benchmark runner file>, |
| + "results_regexp": <optional regexp>", |
| + "benchmarks": [ |
| + { |
| + "name": <name of the benchmark>, |
| + "results_regexp": <optional more specific regexp>, |
| + }, ... |
| + ] |
| +} |
| + |
| +The benchmarks field can also nest other suites in arbitrary depth. A suite |
| +with a "main" file is a leaf suite that can contain one more level of |
| +benchmarks. |
| + |
| +A suite's results_regexp is exected to have one string place holder |
| +"%s" for the benchmark name. A benchmark's results_regexp overwrites suite |
| +defaults. |
| + |
| +A suite without "benchmarks" is considered a benchmark itself. |
| + |
| +Full example (suite with one runner): |
| +{ |
| + "path": ".", |
| + "flags": ["--expose-gc"], |
| + "archs": ["ia32", "x64"], |
| + "run_count": 5, |
| + "run_count_ia32": 3, |
| + "main": "run.js", |
| + "results_regexp": "^%s: (\\d+)$", |
|
ulan
2014/05/23 10:02:35
Note that some benchmarks have custom result parse
Michael Achenbach
2014/05/23 13:57:39
I added the ability for custom results processors.
|
| + "benchmarks": [ |
| + {"name": "Richards"}, |
| + {"name": "DeltaBlue"}, |
| + {"name": "NavierStokes", |
| + "results_regexp": "^NavierStokes: (\\d+)$"} |
| + ] |
| +} |
| + |
| +Full example (suite with several runners): |
| +{ |
| + "path": ".", |
| + "flags": ["--expose-gc"], |
| + "archs": ["ia32", "x64"], |
| + "run_count": 5, |
| + "benchmarks": [ |
| + {"name": "Richards", |
| + "path": "richards", |
| + "main": "run.js", |
| + "run_count": 3, |
| + "results_regexp": "^Richards: (\\d+)$"}, |
| + {"name": "NavierStokes", |
| + "path": "navier_stokes", |
| + "main": "run.js", |
| + "results_regexp": "^NavierStokes: (\\d+)$"} |
| + ] |
| +} |
| + |
| +Path pieces are concatenated. D8 is always run with the suite's path as cwd. |
|
ulan
2014/05/23 10:02:35
Would be nice to the binary name (d8) as a paramet
Michael Achenbach
2014/05/23 13:57:39
Done. Can be configured arbitrarily now with d8 as
|
| +""" |
| + |
| +import json |
| +import optparse |
| +import os |
| +import re |
| +import sys |
| + |
| +from testrunner.local import commands |
| +from testrunner.local import utils |
| + |
| +ARCH_GUESS = utils.DefaultArch() |
| +SUPPORTED_ARCHS = ["android_arm", |
| + "android_arm64", |
| + "android_ia32", |
| + "arm", |
| + "ia32", |
| + "mips", |
| + "mipsel", |
| + "nacl_ia32", |
| + "nacl_x64", |
| + "x64", |
| + "arm64"] |
| + |
| +RUN_COUNT_DEFAULT = 10 |
| + |
| + |
| +def GetBenchmark(benchmark, graphs): |
| + result = {"graphs": list(graphs) + [benchmark["name"]]} |
| + if benchmark.get("results_regexp"): |
| + result["results_regexp"] = benchmark["results_regexp"] |
| + return result |
| + |
| + |
| +def GetSuites(suite, |
| + arch, |
| + run_count_default=RUN_COUNT_DEFAULT, |
| + path=None, |
| + graphs=None, |
| + flags=None, |
| + resources=None, |
| + results_regexp=None): |
| + # Local copies for changing the state. |
| + path = list(path or []) |
| + graphs = list(graphs or []) |
| + flags = list(flags or []) |
| + resources = list(resources or []) |
| + |
| + # TODO(machenbach): Implement notion of cpu type? |
| + if arch not in suite.get("archs", ["ia32", "x64"]): |
| + return [] |
| + |
| + # Default values for nesting level "suite". |
| + path.append(suite.get("path", "")) |
| + graphs.append(suite["name"]) |
| + run_count = suite.get("run_count", run_count_default) |
| + run_count = suite.get("run_count_%s" % arch, run_count) |
| + flags += suite.get("flags", []) |
| + resources += suite.get("resources", []) |
| + results_regexp = suite.get("results_regexp", results_regexp) |
| + |
| + benchmarks = [] |
| + if suite.get("main"): |
| + # This suite nesting level has the runner. |
| + benchmarks.append({ |
| + "graphs": graphs, |
| + "run_count": run_count, |
| + "flags": flags, |
| + "resources": resources, |
| + "main": suite["main"], |
| + "results_regexp": results_regexp, |
| + # Allow one more nesting level below the runner with trace name and |
| + # specific results regexp. |
| + "benchmarks": [GetBenchmark(benchmark, graphs) |
| + for benchmark in suite.get("benchmarks", [])], |
| + }) |
| + else: |
| + # This suite nesting is just having subsuites. |
| + for subsuite in suite.get("benchmarks", []): |
| + benchmarks.extend(GetSuites( |
| + subsuite, arch, run_count, path, graphs, flags, resources, |
| + results_regexp)) |
| + return benchmarks |
| + |
| + |
| +def Main(): |
| + parser = optparse.OptionParser() |
| + parser.add_option("--arch", |
| + help=("The architecture to run tests for, " |
| + "'auto' or 'native' for auto-detect"), |
| + default="x64") |
| + parser.add_option("--buildbot", |
| + help="Adapt to path structure used on buildbots", |
| + default=False, action="store_true") |
| + parser.add_option("--json-test-results", |
| + help="Path to a file for storing json results.") |
| + parser.add_option("-m", "--mode", |
| + help="The test mode in which to run", |
| + default="release") |
| + parser.add_option("--outdir", help="Base directory with compile output", |
| + default="out") |
| + (options, args) = parser.parse_args() |
| + |
| + if len(args) == 0: |
| + parser.print_help() |
| + return 1 |
| + |
| + if not options.mode.lower() in ["debug", "release", "optdebug"]: |
| + print "Unknown mode %s" % options.mode |
| + return 1 |
| + |
| + if options.arch in ["auto", "native"]: |
| + options.arch = ARCH_GUESS |
| + |
| + if not options.arch in SUPPORTED_ARCHS: |
| + print "Unknown architecture %s" % options.arch |
| + return False |
| + |
| + workspace = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "..")) |
| + |
| + if options.buildbot: |
| + shell_dir = os.path.join(workspace, options.outdir, options.mode) |
| + mode = mode.lower() |
| + else: |
| + shell_dir = os.path.join(workspace, options.outdir, |
| + "%s.%s" % (options.arch, options.mode)) |
| + |
| + suites = [] |
| + for path in args: |
| + path = os.path.abspath(path) |
| + if not os.path.exists(path): |
| + print "Benchmark file %s does not exist." % path |
| + return 1 |
| + with open(path) as f: |
| + suite = json.loads(f.read()) |
| + # Remember location of the suite configuration as the benchmarks are |
| + # supposed to be relative to it. |
| + suite["config_path"] = os.path.abspath(os.path.dirname(path)) |
| + # If no name is given, default to the file name without .json. |
| + suite.setdefault("name", os.path.basename(path)[:-5]) |
| + suites.append(suite) |
| + |
| + results = [] |
| + for suite in suites: |
| + for subsuite in GetSuites(suite, options.arch): |
| + # TODO rerun |
| + command = ([os.path.join(shell_dir, "d8")] + |
| + subsuite.get("flags", []) + |
| + subsuite.get("resources", []) + |
| + [subsuite.get("main")]) |
| + os.chdir(os.path.join(suite["config_path"], |
| + subsuite.get("path", "."))) |
| + output = commands.Execute(command, timeout=60) |
| + regexp_default = subsuite.get("results_regexp") |
| + for benchmark in subsuite.get("benchmarks"): |
| + name = benchmark["graphs"][-1] |
| + regexp = benchmark.get("results_regexp", regexp_default % name) |
| + assert regexp |
| + results.append({ |
| + "graphs": benchmark["graphs"], |
| + "result": re.search(regexp, output.stdout, re.M).group(1), |
| + }) |
| + |
| + print output.stdout |
| + print subsuite |
| + print results |
| + |
| +if __name__ == "__main__": |
| + sys.exit(Main()) |