Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(400)

Side by Side Diff: tools/run-benchmarks.py

Issue 293023006: Add new benchmark suite runner. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Added simple example json. Created 6 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« benchmarks/octane.json ('K') | « benchmarks/octane.json ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 #!/usr/bin/env python
2 # Copyright 2014 the V8 project authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
5
6 """
7 Performance runner for d8.
8
9 Call e.g. with tools/run-benchmarks.py --arch ia32 some_suite.json
10
11 TODO: units - score vs ms.
12
13 The suite json format is expected to be:
14 {
15 "path": <relative path to benchmark resources and main file>,
16 "name": <optional suite name, file name is default>,
17 "archs": [<architecture name for which this suite is run>, ...],
18 "flags": [<flag to d8>, ...],
19 "run_count": <how often will this suite run (optional)>,
20 "run_count_XXX": <how often will this suite run for arch XXX (optional)>,
21 "resources": [<js file to be loaded before main>, ...]
22 "main": <main js benchmark runner file>,
23 "results_regexp": <optional regexp>",
24 "benchmarks": [
25 {
26 "name": <name of the benchmark>,
27 "results_regexp": <optional more specific regexp>,
28 }, ...
29 ]
30 }
31
32 The benchmarks field can also nest other suites in arbitrary depth. A suite
33 with a "main" file is a leaf suite that can contain one more level of
34 benchmarks.
35
36 A suite's results_regexp is exected to have one string place holder
37 "%s" for the benchmark name. A benchmark's results_regexp overwrites suite
38 defaults.
39
40 A suite without "benchmarks" is considered a benchmark itself.
41
42 Full example (suite with one runner):
43 {
44 "path": ".",
45 "flags": ["--expose-gc"],
46 "archs": ["ia32", "x64"],
47 "run_count": 5,
48 "run_count_ia32": 3,
49 "main": "run.js",
50 "results_regexp": "^%s: (\\d+)$",
ulan 2014/05/23 10:02:35 Note that some benchmarks have custom result parse
Michael Achenbach 2014/05/23 13:57:39 I added the ability for custom results processors.
51 "benchmarks": [
52 {"name": "Richards"},
53 {"name": "DeltaBlue"},
54 {"name": "NavierStokes",
55 "results_regexp": "^NavierStokes: (\\d+)$"}
56 ]
57 }
58
59 Full example (suite with several runners):
60 {
61 "path": ".",
62 "flags": ["--expose-gc"],
63 "archs": ["ia32", "x64"],
64 "run_count": 5,
65 "benchmarks": [
66 {"name": "Richards",
67 "path": "richards",
68 "main": "run.js",
69 "run_count": 3,
70 "results_regexp": "^Richards: (\\d+)$"},
71 {"name": "NavierStokes",
72 "path": "navier_stokes",
73 "main": "run.js",
74 "results_regexp": "^NavierStokes: (\\d+)$"}
75 ]
76 }
77
78 Path pieces are concatenated. D8 is always run with the suite's path as cwd.
ulan 2014/05/23 10:02:35 Would be nice to the binary name (d8) as a paramet
Michael Achenbach 2014/05/23 13:57:39 Done. Can be configured arbitrarily now with d8 as
79 """
80
81 import json
82 import optparse
83 import os
84 import re
85 import sys
86
87 from testrunner.local import commands
88 from testrunner.local import utils
89
90 ARCH_GUESS = utils.DefaultArch()
91 SUPPORTED_ARCHS = ["android_arm",
92 "android_arm64",
93 "android_ia32",
94 "arm",
95 "ia32",
96 "mips",
97 "mipsel",
98 "nacl_ia32",
99 "nacl_x64",
100 "x64",
101 "arm64"]
102
103 RUN_COUNT_DEFAULT = 10
104
105
106 def GetBenchmark(benchmark, graphs):
107 result = {"graphs": list(graphs) + [benchmark["name"]]}
108 if benchmark.get("results_regexp"):
109 result["results_regexp"] = benchmark["results_regexp"]
110 return result
111
112
113 def GetSuites(suite,
114 arch,
115 run_count_default=RUN_COUNT_DEFAULT,
116 path=None,
117 graphs=None,
118 flags=None,
119 resources=None,
120 results_regexp=None):
121 # Local copies for changing the state.
122 path = list(path or [])
123 graphs = list(graphs or [])
124 flags = list(flags or [])
125 resources = list(resources or [])
126
127 # TODO(machenbach): Implement notion of cpu type?
128 if arch not in suite.get("archs", ["ia32", "x64"]):
129 return []
130
131 # Default values for nesting level "suite".
132 path.append(suite.get("path", ""))
133 graphs.append(suite["name"])
134 run_count = suite.get("run_count", run_count_default)
135 run_count = suite.get("run_count_%s" % arch, run_count)
136 flags += suite.get("flags", [])
137 resources += suite.get("resources", [])
138 results_regexp = suite.get("results_regexp", results_regexp)
139
140 benchmarks = []
141 if suite.get("main"):
142 # This suite nesting level has the runner.
143 benchmarks.append({
144 "graphs": graphs,
145 "run_count": run_count,
146 "flags": flags,
147 "resources": resources,
148 "main": suite["main"],
149 "results_regexp": results_regexp,
150 # Allow one more nesting level below the runner with trace name and
151 # specific results regexp.
152 "benchmarks": [GetBenchmark(benchmark, graphs)
153 for benchmark in suite.get("benchmarks", [])],
154 })
155 else:
156 # This suite nesting is just having subsuites.
157 for subsuite in suite.get("benchmarks", []):
158 benchmarks.extend(GetSuites(
159 subsuite, arch, run_count, path, graphs, flags, resources,
160 results_regexp))
161 return benchmarks
162
163
164 def Main():
165 parser = optparse.OptionParser()
166 parser.add_option("--arch",
167 help=("The architecture to run tests for, "
168 "'auto' or 'native' for auto-detect"),
169 default="x64")
170 parser.add_option("--buildbot",
171 help="Adapt to path structure used on buildbots",
172 default=False, action="store_true")
173 parser.add_option("--json-test-results",
174 help="Path to a file for storing json results.")
175 parser.add_option("-m", "--mode",
176 help="The test mode in which to run",
177 default="release")
178 parser.add_option("--outdir", help="Base directory with compile output",
179 default="out")
180 (options, args) = parser.parse_args()
181
182 if len(args) == 0:
183 parser.print_help()
184 return 1
185
186 if not options.mode.lower() in ["debug", "release", "optdebug"]:
187 print "Unknown mode %s" % options.mode
188 return 1
189
190 if options.arch in ["auto", "native"]:
191 options.arch = ARCH_GUESS
192
193 if not options.arch in SUPPORTED_ARCHS:
194 print "Unknown architecture %s" % options.arch
195 return False
196
197 workspace = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), ".."))
198
199 if options.buildbot:
200 shell_dir = os.path.join(workspace, options.outdir, options.mode)
201 mode = mode.lower()
202 else:
203 shell_dir = os.path.join(workspace, options.outdir,
204 "%s.%s" % (options.arch, options.mode))
205
206 suites = []
207 for path in args:
208 path = os.path.abspath(path)
209 if not os.path.exists(path):
210 print "Benchmark file %s does not exist." % path
211 return 1
212 with open(path) as f:
213 suite = json.loads(f.read())
214 # Remember location of the suite configuration as the benchmarks are
215 # supposed to be relative to it.
216 suite["config_path"] = os.path.abspath(os.path.dirname(path))
217 # If no name is given, default to the file name without .json.
218 suite.setdefault("name", os.path.basename(path)[:-5])
219 suites.append(suite)
220
221 results = []
222 for suite in suites:
223 for subsuite in GetSuites(suite, options.arch):
224 # TODO rerun
225 command = ([os.path.join(shell_dir, "d8")] +
226 subsuite.get("flags", []) +
227 subsuite.get("resources", []) +
228 [subsuite.get("main")])
229 os.chdir(os.path.join(suite["config_path"],
230 subsuite.get("path", ".")))
231 output = commands.Execute(command, timeout=60)
232 regexp_default = subsuite.get("results_regexp")
233 for benchmark in subsuite.get("benchmarks"):
234 name = benchmark["graphs"][-1]
235 regexp = benchmark.get("results_regexp", regexp_default % name)
236 assert regexp
237 results.append({
238 "graphs": benchmark["graphs"],
239 "result": re.search(regexp, output.stdout, re.M).group(1),
240 })
241
242 print output.stdout
243 print subsuite
244 print results
245
246 if __name__ == "__main__":
247 sys.exit(Main())
OLDNEW
« benchmarks/octane.json ('K') | « benchmarks/octane.json ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698