OLD | NEW |
---|---|
(Empty) | |
1 #!/usr/bin/env python | |
2 # Copyright 2014 the V8 project authors. All rights reserved. | |
3 # Use of this source code is governed by a BSD-style license that can be | |
4 # found in the LICENSE file. | |
5 | |
6 """ | |
7 Performance runner for d8. | |
8 | |
9 Call e.g. with tools/run-benchmarks.py --arch ia32 some_suite.json | |
10 | |
11 The suite json format is expected to be: | |
12 { | |
13 "path": <relative path chunks to benchmark resources and main file>, | |
14 "name": <optional suite name, file name is default>, | |
15 "archs": [<architecture name for which this suite is run>, ...], | |
16 "binary": <name of binary to run, default "d8">, | |
17 "flags": [<flag to d8>, ...], | |
18 "run_count": <how often will this suite run (optional)>, | |
19 "run_count_XXX": <how often will this suite run for arch XXX (optional)>, | |
20 "resources": [<js file to be loaded before main>, ...] | |
21 "main": <main js benchmark runner file>, | |
22 "results_regexp": <optional regexp>, | |
23 "results_processor": <optional python results processor script>, | |
24 "units": <the unit specification for the performance dashboard>, | |
25 "benchmarks": [ | |
26 { | |
27 "name": <name of the benchmark>, | |
28 "results_regexp": <optional more specific regexp>, | |
29 "results_processor": <optional python results processor script>, | |
30 "units": <the unit specification for the performance dashboard>, | |
31 }, ... | |
32 ] | |
33 } | |
34 | |
35 The benchmarks field can also nest other suites in arbitrary depth. A suite | |
36 with a "main" file is a leaf suite that can contain one more level of | |
37 benchmarks. | |
38 | |
39 A suite's results_regexp is expected to have one string place holder | |
40 "%s" for the benchmark name. A benchmark's results_regexp overwrites suite | |
41 defaults. | |
42 | |
43 A suite's results_processor may point to an optional python script. If | |
44 specified, it is called after running the benchmarks like this (with a path | |
45 relatve to the suite level's path): | |
46 <results_processor file> <same flags as for d8> <suite level name> <output> | |
47 | |
48 The <output> is a temporary file containing d8 output. The results_regexp will | |
49 be applied to the output of this script. | |
50 | |
51 A suite without "benchmarks" is considered a benchmark itself. | |
52 | |
53 Full example (suite with one runner): | |
54 { | |
55 "path": ["."], | |
56 "flags": ["--expose-gc"], | |
57 "archs": ["ia32", "x64"], | |
58 "run_count": 5, | |
59 "run_count_ia32": 3, | |
60 "main": "run.js", | |
61 "results_regexp": "^%s: (.+)$", | |
62 "units": "score", | |
63 "benchmarks": [ | |
64 {"name": "Richards"}, | |
65 {"name": "DeltaBlue"}, | |
66 {"name": "NavierStokes", | |
67 "results_regexp": "^NavierStokes: (.+)$"} | |
68 ] | |
69 } | |
70 | |
71 Full example (suite with several runners): | |
72 { | |
73 "path": ["."], | |
74 "flags": ["--expose-gc"], | |
75 "archs": ["ia32", "x64"], | |
76 "run_count": 5, | |
77 "units": "score", | |
78 "benchmarks": [ | |
79 {"name": "Richards", | |
80 "path": ["richards"], | |
81 "main": "run.js", | |
82 "run_count": 3, | |
83 "results_regexp": "^Richards: (.+)$"}, | |
84 {"name": "NavierStokes", | |
85 "path": ["navier_stokes"], | |
86 "main": "run.js", | |
87 "results_regexp": "^NavierStokes: (.+)$"} | |
88 ] | |
89 } | |
90 | |
91 Path pieces are concatenated. D8 is always run with the suite's path as cwd. | |
92 """ | |
93 | |
94 import json | |
95 import optparse | |
96 import os | |
97 import re | |
98 import sys | |
99 | |
100 from testrunner.local import commands | |
101 from testrunner.local import utils | |
102 | |
103 ARCH_GUESS = utils.DefaultArch() | |
104 SUPPORTED_ARCHS = ["android_arm", | |
105 "android_arm64", | |
106 "android_ia32", | |
107 "arm", | |
108 "ia32", | |
109 "mips", | |
110 "mipsel", | |
111 "nacl_ia32", | |
112 "nacl_x64", | |
113 "x64", | |
114 "arm64"] | |
115 | |
116 | |
117 class Results(object): | |
118 """Place holder for result traces.""" | |
119 def __init__(self, traces=None, errors=None): | |
120 self.traces = traces or [] | |
121 self.errors = errors or [] | |
122 | |
123 def ToDict(self): | |
124 return {"traces": self.traces, "errors": self.errors} | |
125 | |
126 def WriteToFile(self, file_name): | |
127 with open(file_name, "w") as f: | |
128 f.write(json.dumps(self.ToDict())) | |
129 | |
130 def __add__(self, other): | |
131 self.traces += other.traces | |
132 self.errors += other.errors | |
133 return self | |
134 | |
135 def __str__(self): # pragma: no cover | |
136 return str(self.ToDict()) | |
137 | |
138 | |
139 class Node(object): | |
140 """Represents a node in the benchmark suite tree structure.""" | |
141 def __init__(self, *args): | |
142 self._children = [] | |
143 | |
144 def AppendChild(self, child): | |
145 self._children.append(child) | |
146 | |
147 | |
148 class DefaultSentinel(Node): | |
149 """Fake parent node with all default values.""" | |
150 def __init__(self): | |
151 super(DefaultSentinel, self).__init__() | |
152 self.binary = "d8" | |
153 self.run_count = 10 | |
154 self.path = [] | |
155 self.graphs = [] | |
156 self.flags = [] | |
157 self.resources = [] | |
158 self.results_regexp = None | |
159 self.units = "score" | |
160 | |
161 | |
162 class Graph(Node): | |
163 """Represents a benchmark suite definition. | |
164 | |
165 Can either be a leaf or an inner node that provides default values. | |
166 """ | |
167 def __init__(self, suite, parent, arch): | |
168 super(Graph, self).__init__() | |
169 self._suite = suite | |
170 | |
171 assert isinstance(suite.get("path", []), list) | |
172 assert isinstance(suite["name"], basestring) | |
173 assert isinstance(suite.get("flags", []), list) | |
174 assert isinstance(suite.get("resources", []), list) | |
175 | |
176 # Accumulated values. | |
177 self.path = parent.path[:] + suite.get("path", []) | |
178 self.graphs = parent.graphs[:] + [suite["name"]] | |
179 self.flags = parent.flags[:] + suite.get("flags", []) | |
180 self.resources = parent.resources[:] + suite.get("resources", []) | |
181 | |
182 # Descrete values (with parent defaults). | |
183 self.binary = suite.get("binary", parent.binary) | |
184 self.run_count = suite.get("run_count", parent.run_count) | |
185 self.run_count = suite.get("run_count_%s" % arch, self.run_count) | |
186 self.units = suite.get("units", parent.units) | |
187 | |
188 # A regular expression for results. If the parent graph provides a | |
189 # regexp and the current suite has none, a string place holder for the | |
190 # suite name is expected. | |
191 # TODO(machenbach): Currently that makes only sense for the leaf level. | |
192 # Multiple place holders for multiple levels are not supported. | |
193 if parent.results_regexp: | |
194 regexp_default = parent.results_regexp % suite["name"] | |
195 else: | |
196 regexp_default = None | |
197 self.results_regexp = suite.get("results_regexp", regexp_default) | |
198 | |
199 | |
200 class Trace(Graph): | |
201 """Represents a leaf in the benchmark suite tree structure. | |
202 | |
203 Handles collection of measurements. | |
204 """ | |
205 def __init__(self, suite, parent, arch): | |
206 super(Trace, self).__init__(suite, parent, arch) | |
207 assert self.results_regexp | |
208 self.results = [] | |
209 self.errors = [] | |
210 | |
211 def ConsumeOutput(self, stdout): | |
212 try: | |
213 self.results.append( | |
214 re.search(self.results_regexp, stdout, re.M).group(1)) | |
215 except: | |
216 self.errors.append("Regexp \"%s\" didn't match for benchmark %s." | |
217 % (self.results_regexp, self.graphs[-1])) | |
218 | |
219 def GetResults(self): | |
220 return Results([{ | |
221 "graphs": self.graphs, | |
222 "units": self.units, | |
223 "results": self.results, | |
224 }], self.errors) | |
225 | |
226 | |
227 class Runnable(Graph): | |
228 """Represents a runnable benchmark suite definition (i.e. has a main file). | |
229 """ | |
230 @property | |
231 def main(self): | |
232 return self._suite["main"] | |
233 | |
234 def ChangeCWD(self, suite_path): | |
235 """Changes the cwd to to path defined in the current graph. | |
236 | |
237 The benchmarks are supposed to be relative to the suite configuration. | |
238 """ | |
239 suite_dir = os.path.abspath(os.path.dirname(suite_path)) | |
240 bench_dir = os.path.normpath(os.path.join(*self.path)) | |
241 os.chdir(os.path.join(suite_dir, bench_dir)) | |
242 | |
243 def GetCommand(self, shell_dir): | |
244 # TODO(machenbach): This requires +.exe if ran on windows. | |
Jakob Kummerow
2014/05/28 11:15:05
nit: s/ran/run/
Michael Achenbach
2014/05/28 12:54:49
Done.
| |
245 return ( | |
246 [os.path.join(shell_dir, self.binary)] + | |
247 self.flags + | |
248 self.resources + | |
249 [self.main] | |
250 ) | |
251 | |
252 def Run(self, runner): | |
253 """Iterates over several runs and handles the output for all traces.""" | |
254 for stdout in runner(): | |
255 for trace in self._children: | |
256 trace.ConsumeOutput(stdout) | |
257 return reduce(lambda r, t: r + t.GetResults(), self._children, Results()) | |
258 | |
259 | |
260 class RunnableTrace(Trace, Runnable): | |
261 """Represents a runnable benchmark suite definition that is a leaf.""" | |
262 def __init__(self, suite, parent, arch): | |
263 super(RunnableTrace, self).__init__(suite, parent, arch) | |
264 | |
265 def Run(self, runner): | |
266 """Iterates over several runs and handles the output.""" | |
267 for stdout in runner(): | |
268 self.ConsumeOutput(stdout) | |
269 return self.GetResults() | |
270 | |
271 | |
272 def MakeGraph(suite, arch, parent): | |
273 """Factory method for making graph objects.""" | |
274 if isinstance(parent, Runnable): | |
275 # Below a runnable can only be traces. | |
276 return Trace(suite, parent, arch) | |
277 elif suite.get("main"): | |
278 # A main file makes this graph runnable. | |
279 if suite.get("benchmarks"): | |
280 # This graph has subbenchmarks (traces). | |
281 return Runnable(suite, parent, arch) | |
282 else: | |
283 # This graph has no subbenchmarks, it's a leaf. | |
284 return RunnableTrace(suite, parent, arch) | |
285 elif suite.get("benchmarks"): | |
286 # This is neither a leaf nor a runnable. | |
287 return Graph(suite, parent, arch) | |
288 else: # pragma: no cover | |
289 raise Exception("Invalid benchmark suite configuration.") | |
290 | |
291 | |
292 def BuildGraphs(suite, arch, parent=None): | |
293 """Builds a tree structure of graph objects that corresponds to the suite | |
294 configuration. | |
295 """ | |
296 parent = parent or DefaultSentinel() | |
297 | |
298 # TODO(machenbach): Implement notion of cpu type? | |
299 if arch not in suite.get("archs", ["ia32", "x64"]): | |
300 return None | |
301 | |
302 graph = MakeGraph(suite, arch, parent) | |
303 for subsuite in suite.get("benchmarks", []): | |
304 BuildGraphs(subsuite, arch, graph) | |
305 parent.AppendChild(graph) | |
306 return graph | |
307 | |
308 | |
309 def FlattenRunnables(node): | |
310 """Generator that traverses the tree structure and iterates over all | |
311 runnables. | |
312 """ | |
313 if isinstance(node, Runnable): | |
314 yield node | |
315 elif isinstance(node, Node): | |
316 for child in node._children: | |
317 for result in FlattenRunnables(child): | |
318 yield result | |
319 else: # pragma: no cover | |
320 raise Exception("Invalid benchmark suite configuration.") | |
321 | |
322 | |
323 # TODO: Implement results_processor. | |
324 def Main(args): | |
325 parser = optparse.OptionParser() | |
326 parser.add_option("--arch", | |
327 help=("The architecture to run tests for, " | |
328 "'auto' or 'native' for auto-detect"), | |
329 default="x64") | |
330 parser.add_option("--buildbot", | |
331 help="Adapt to path structure used on buildbots", | |
332 default=False, action="store_true") | |
333 parser.add_option("--json-test-results", | |
334 help="Path to a file for storing json results.") | |
335 parser.add_option("-m", "--mode", | |
336 help="The test mode in which to run", | |
337 default="release") | |
338 parser.add_option("--outdir", help="Base directory with compile output", | |
339 default="out") | |
340 (options, args) = parser.parse_args(args) | |
341 | |
342 if len(args) == 0: # pragma: no cover | |
343 parser.print_help() | |
344 return 1 | |
345 | |
346 if not options.mode.lower() in ["debug", "release", "optdebug"]: | |
Jakob Kummerow
2014/05/28 11:15:05
arguably running benchmarks in anything other than
Michael Achenbach
2014/05/28 12:54:49
Done. Dropped the option completely.
| |
347 print "Unknown mode %s" % options.mode # pragma: no cover | |
348 return 1 # pragma: no cover | |
349 | |
350 if options.arch in ["auto", "native"]: # pragma: no cover | |
351 options.arch = ARCH_GUESS | |
352 | |
353 if not options.arch in SUPPORTED_ARCHS: # pragma: no cover | |
354 print "Unknown architecture %s" % options.arch | |
355 return False | |
356 | |
357 workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) | |
358 | |
359 if options.buildbot: | |
360 shell_dir = os.path.join(workspace, options.outdir, options.mode) | |
361 options.mode = options.mode.lower() | |
362 else: | |
363 shell_dir = os.path.join(workspace, options.outdir, | |
364 "%s.%s" % (options.arch, options.mode)) | |
365 | |
366 results = Results() | |
367 for path in args: | |
368 path = os.path.abspath(path) | |
369 | |
370 if not os.path.exists(path): # pragma: no cover | |
371 results.errors.append("Benchmark file %s does not exist." % path) | |
372 continue | |
373 | |
374 with open(path) as f: | |
375 suite = json.loads(f.read()) | |
376 | |
377 # If no name is given, default to the file name without .json. | |
378 suite.setdefault("name", os.path.splitext(os.path.basename(path))[0]) | |
379 | |
380 for runnable in FlattenRunnables(BuildGraphs(suite, options.arch)): | |
381 print ">>> Running suite: %s" % "/".join(runnable.graphs) | |
382 runnable.ChangeCWD(path) | |
383 | |
384 def Runner(): | |
385 """Output generator that reruns several times.""" | |
386 for i in xrange(0, max(1, runnable.run_count)): | |
387 output = commands.Execute(runnable.GetCommand(shell_dir), timeout=60) | |
Jakob Kummerow
2014/05/28 11:15:05
for slow (mobile) devices 60 seconds might be too
Michael Achenbach
2014/05/28 12:54:49
Will make it configurable in a followup. Marked as
| |
388 print ">>> Stdout (#%d):" % (i + 1) | |
389 print output.stdout | |
390 if output.stderr: # pragma: no cover | |
391 # Print stderr for debugging. | |
392 print ">>> Stderr (#%d):" % (i + 1) | |
393 print output.stderr | |
394 yield output.stdout | |
395 | |
396 # Let runnable iterate over all runs and handle output. | |
397 results += runnable.Run(Runner) | |
398 | |
399 if options.json_test_results: | |
400 results.WriteToFile(options.json_test_results) | |
401 else: # pragma: no cover | |
402 print results | |
403 | |
404 if __name__ == "__main__": # pragma: no cover | |
405 sys.exit(Main(sys.argv[1:])) | |
OLD | NEW |