OLD | NEW |
| (Empty) |
1 #!/usr/bin/env python | |
2 # Copyright 2014 the V8 project authors. All rights reserved. | |
3 # Use of this source code is governed by a BSD-style license that can be | |
4 # found in the LICENSE file. | |
5 | |
6 from collections import namedtuple | |
7 import coverage | |
8 import json | |
9 from mock import DEFAULT | |
10 from mock import MagicMock | |
11 import os | |
12 from os import path, sys | |
13 import shutil | |
14 import tempfile | |
15 import unittest | |
16 | |
17 # Requires python-coverage and python-mock. Native python coverage | |
18 # version >= 3.7.1 should be installed to get the best speed. | |
19 | |
20 TEST_WORKSPACE = path.join(tempfile.gettempdir(), "test-v8-run-benchmarks") | |
21 | |
22 V8_JSON = { | |
23 "path": ["."], | |
24 "binary": "d7", | |
25 "flags": ["--flag"], | |
26 "main": "run.js", | |
27 "run_count": 1, | |
28 "results_regexp": "^%s: (.+)$", | |
29 "benchmarks": [ | |
30 {"name": "Richards"}, | |
31 {"name": "DeltaBlue"}, | |
32 ] | |
33 } | |
34 | |
35 V8_NESTED_SUITES_JSON = { | |
36 "path": ["."], | |
37 "flags": ["--flag"], | |
38 "run_count": 1, | |
39 "units": "score", | |
40 "benchmarks": [ | |
41 {"name": "Richards", | |
42 "path": ["richards"], | |
43 "binary": "d7", | |
44 "main": "run.js", | |
45 "resources": ["file1.js", "file2.js"], | |
46 "run_count": 2, | |
47 "results_regexp": "^Richards: (.+)$"}, | |
48 {"name": "Sub", | |
49 "path": ["sub"], | |
50 "benchmarks": [ | |
51 {"name": "Leaf", | |
52 "path": ["leaf"], | |
53 "run_count_x64": 3, | |
54 "units": "ms", | |
55 "main": "run.js", | |
56 "results_regexp": "^Simple: (.+) ms.$"}, | |
57 ] | |
58 }, | |
59 {"name": "DeltaBlue", | |
60 "path": ["delta_blue"], | |
61 "main": "run.js", | |
62 "flags": ["--flag2"], | |
63 "results_regexp": "^DeltaBlue: (.+)$"}, | |
64 {"name": "ShouldntRun", | |
65 "path": ["."], | |
66 "archs": ["arm"], | |
67 "main": "run.js"}, | |
68 ] | |
69 } | |
70 | |
71 V8_GENERIC_JSON = { | |
72 "path": ["."], | |
73 "binary": "cc", | |
74 "flags": ["--flag"], | |
75 "generic": True, | |
76 "run_count": 1, | |
77 "units": "ms", | |
78 } | |
79 | |
80 Output = namedtuple("Output", "stdout, stderr") | |
81 | |
82 class BenchmarksTest(unittest.TestCase): | |
83 @classmethod | |
84 def setUpClass(cls): | |
85 cls.base = path.dirname(path.dirname(path.abspath(__file__))) | |
86 sys.path.append(cls.base) | |
87 cls._cov = coverage.coverage( | |
88 include=([os.path.join(cls.base, "run_benchmarks.py")])) | |
89 cls._cov.start() | |
90 import run_benchmarks | |
91 from testrunner.local import commands | |
92 global commands | |
93 global run_benchmarks | |
94 | |
95 @classmethod | |
96 def tearDownClass(cls): | |
97 cls._cov.stop() | |
98 print "" | |
99 print cls._cov.report() | |
100 | |
101 def setUp(self): | |
102 self.maxDiff = None | |
103 if path.exists(TEST_WORKSPACE): | |
104 shutil.rmtree(TEST_WORKSPACE) | |
105 os.makedirs(TEST_WORKSPACE) | |
106 | |
107 def tearDown(self): | |
108 if path.exists(TEST_WORKSPACE): | |
109 shutil.rmtree(TEST_WORKSPACE) | |
110 | |
111 def _WriteTestInput(self, json_content): | |
112 self._test_input = path.join(TEST_WORKSPACE, "test.json") | |
113 with open(self._test_input, "w") as f: | |
114 f.write(json.dumps(json_content)) | |
115 | |
116 def _MockCommand(self, *args): | |
117 # Fake output for each benchmark run. | |
118 benchmark_outputs = [Output(stdout=arg, stderr=None) for arg in args[1]] | |
119 def execute(*args, **kwargs): | |
120 return benchmark_outputs.pop() | |
121 commands.Execute = MagicMock(side_effect=execute) | |
122 | |
123 # Check that d8 is called from the correct cwd for each benchmark run. | |
124 dirs = [path.join(TEST_WORKSPACE, arg) for arg in args[0]] | |
125 def chdir(*args, **kwargs): | |
126 self.assertEquals(dirs.pop(), args[0]) | |
127 os.chdir = MagicMock(side_effect=chdir) | |
128 | |
129 def _CallMain(self, *args): | |
130 self._test_output = path.join(TEST_WORKSPACE, "results.json") | |
131 all_args=[ | |
132 "--json-test-results", | |
133 self._test_output, | |
134 self._test_input, | |
135 ] | |
136 all_args += args | |
137 return run_benchmarks.Main(all_args) | |
138 | |
139 def _LoadResults(self): | |
140 with open(self._test_output) as f: | |
141 return json.load(f) | |
142 | |
143 def _VerifyResults(self, suite, units, traces): | |
144 self.assertEquals([ | |
145 {"units": units, | |
146 "graphs": [suite, trace["name"]], | |
147 "results": trace["results"], | |
148 "stddev": trace["stddev"]} for trace in traces], | |
149 self._LoadResults()["traces"]) | |
150 | |
151 def _VerifyErrors(self, errors): | |
152 self.assertEquals(errors, self._LoadResults()["errors"]) | |
153 | |
154 def _VerifyMock(self, binary, *args): | |
155 arg = [path.join(path.dirname(self.base), binary)] | |
156 arg += args | |
157 commands.Execute.assert_called_with(arg, timeout=60) | |
158 | |
159 def _VerifyMockMultiple(self, *args): | |
160 expected = [] | |
161 for arg in args: | |
162 a = [path.join(path.dirname(self.base), arg[0])] | |
163 a += arg[1:] | |
164 expected.append(((a,), {"timeout": 60})) | |
165 self.assertEquals(expected, commands.Execute.call_args_list) | |
166 | |
167 def testOneRun(self): | |
168 self._WriteTestInput(V8_JSON) | |
169 self._MockCommand(["."], ["x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n"]) | |
170 self.assertEquals(0, self._CallMain()) | |
171 self._VerifyResults("test", "score", [ | |
172 {"name": "Richards", "results": ["1.234"], "stddev": ""}, | |
173 {"name": "DeltaBlue", "results": ["10657567"], "stddev": ""}, | |
174 ]) | |
175 self._VerifyErrors([]) | |
176 self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js") | |
177 | |
178 def testTwoRuns_Units_SuiteName(self): | |
179 test_input = dict(V8_JSON) | |
180 test_input["run_count"] = 2 | |
181 test_input["name"] = "v8" | |
182 test_input["units"] = "ms" | |
183 self._WriteTestInput(test_input) | |
184 self._MockCommand([".", "."], | |
185 ["Richards: 100\nDeltaBlue: 200\n", | |
186 "Richards: 50\nDeltaBlue: 300\n"]) | |
187 self.assertEquals(0, self._CallMain()) | |
188 self._VerifyResults("v8", "ms", [ | |
189 {"name": "Richards", "results": ["50", "100"], "stddev": ""}, | |
190 {"name": "DeltaBlue", "results": ["300", "200"], "stddev": ""}, | |
191 ]) | |
192 self._VerifyErrors([]) | |
193 self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js") | |
194 | |
195 def testTwoRuns_SubRegexp(self): | |
196 test_input = dict(V8_JSON) | |
197 test_input["run_count"] = 2 | |
198 del test_input["results_regexp"] | |
199 test_input["benchmarks"][0]["results_regexp"] = "^Richards: (.+)$" | |
200 test_input["benchmarks"][1]["results_regexp"] = "^DeltaBlue: (.+)$" | |
201 self._WriteTestInput(test_input) | |
202 self._MockCommand([".", "."], | |
203 ["Richards: 100\nDeltaBlue: 200\n", | |
204 "Richards: 50\nDeltaBlue: 300\n"]) | |
205 self.assertEquals(0, self._CallMain()) | |
206 self._VerifyResults("test", "score", [ | |
207 {"name": "Richards", "results": ["50", "100"], "stddev": ""}, | |
208 {"name": "DeltaBlue", "results": ["300", "200"], "stddev": ""}, | |
209 ]) | |
210 self._VerifyErrors([]) | |
211 self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js") | |
212 | |
213 def testNestedSuite(self): | |
214 self._WriteTestInput(V8_NESTED_SUITES_JSON) | |
215 self._MockCommand(["delta_blue", "sub/leaf", "richards"], | |
216 ["DeltaBlue: 200\n", | |
217 "Simple: 1 ms.\n", | |
218 "Simple: 2 ms.\n", | |
219 "Simple: 3 ms.\n", | |
220 "Richards: 100\n", | |
221 "Richards: 50\n"]) | |
222 self.assertEquals(0, self._CallMain()) | |
223 self.assertEquals([ | |
224 {"units": "score", | |
225 "graphs": ["test", "Richards"], | |
226 "results": ["50", "100"], | |
227 "stddev": ""}, | |
228 {"units": "ms", | |
229 "graphs": ["test", "Sub", "Leaf"], | |
230 "results": ["3", "2", "1"], | |
231 "stddev": ""}, | |
232 {"units": "score", | |
233 "graphs": ["test", "DeltaBlue"], | |
234 "results": ["200"], | |
235 "stddev": ""}, | |
236 ], self._LoadResults()["traces"]) | |
237 self._VerifyErrors([]) | |
238 self._VerifyMockMultiple( | |
239 (path.join("out", "x64.release", "d7"), "--flag", "file1.js", | |
240 "file2.js", "run.js"), | |
241 (path.join("out", "x64.release", "d7"), "--flag", "file1.js", | |
242 "file2.js", "run.js"), | |
243 (path.join("out", "x64.release", "d8"), "--flag", "run.js"), | |
244 (path.join("out", "x64.release", "d8"), "--flag", "run.js"), | |
245 (path.join("out", "x64.release", "d8"), "--flag", "run.js"), | |
246 (path.join("out", "x64.release", "d8"), "--flag", "--flag2", "run.js")) | |
247 | |
248 def testOneRunStdDevRegExp(self): | |
249 test_input = dict(V8_JSON) | |
250 test_input["stddev_regexp"] = "^%s\-stddev: (.+)$" | |
251 self._WriteTestInput(test_input) | |
252 self._MockCommand(["."], ["Richards: 1.234\nRichards-stddev: 0.23\n" | |
253 "DeltaBlue: 10657567\nDeltaBlue-stddev: 106\n"]) | |
254 self.assertEquals(0, self._CallMain()) | |
255 self._VerifyResults("test", "score", [ | |
256 {"name": "Richards", "results": ["1.234"], "stddev": "0.23"}, | |
257 {"name": "DeltaBlue", "results": ["10657567"], "stddev": "106"}, | |
258 ]) | |
259 self._VerifyErrors([]) | |
260 self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js") | |
261 | |
262 def testTwoRunsStdDevRegExp(self): | |
263 test_input = dict(V8_JSON) | |
264 test_input["stddev_regexp"] = "^%s\-stddev: (.+)$" | |
265 test_input["run_count"] = 2 | |
266 self._WriteTestInput(test_input) | |
267 self._MockCommand(["."], ["Richards: 3\nRichards-stddev: 0.7\n" | |
268 "DeltaBlue: 6\nDeltaBlue-boom: 0.9\n", | |
269 "Richards: 2\nRichards-stddev: 0.5\n" | |
270 "DeltaBlue: 5\nDeltaBlue-stddev: 0.8\n"]) | |
271 self.assertEquals(1, self._CallMain()) | |
272 self._VerifyResults("test", "score", [ | |
273 {"name": "Richards", "results": ["2", "3"], "stddev": "0.7"}, | |
274 {"name": "DeltaBlue", "results": ["5", "6"], "stddev": "0.8"}, | |
275 ]) | |
276 self._VerifyErrors( | |
277 ["Benchmark Richards should only run once since a stddev is provided " | |
278 "by the benchmark.", | |
279 "Benchmark DeltaBlue should only run once since a stddev is provided " | |
280 "by the benchmark.", | |
281 "Regexp \"^DeltaBlue\-stddev: (.+)$\" didn't match for benchmark " | |
282 "DeltaBlue."]) | |
283 self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js") | |
284 | |
285 def testBuildbot(self): | |
286 self._WriteTestInput(V8_JSON) | |
287 self._MockCommand(["."], ["Richards: 1.234\nDeltaBlue: 10657567\n"]) | |
288 self.assertEquals(0, self._CallMain("--buildbot")) | |
289 self._VerifyResults("test", "score", [ | |
290 {"name": "Richards", "results": ["1.234"], "stddev": ""}, | |
291 {"name": "DeltaBlue", "results": ["10657567"], "stddev": ""}, | |
292 ]) | |
293 self._VerifyErrors([]) | |
294 self._VerifyMock(path.join("out", "Release", "d7"), "--flag", "run.js") | |
295 | |
296 def testBuildbotWithTotal(self): | |
297 test_input = dict(V8_JSON) | |
298 test_input["total"] = True | |
299 self._WriteTestInput(test_input) | |
300 self._MockCommand(["."], ["Richards: 1.234\nDeltaBlue: 10657567\n"]) | |
301 self.assertEquals(0, self._CallMain("--buildbot")) | |
302 self._VerifyResults("test", "score", [ | |
303 {"name": "Richards", "results": ["1.234"], "stddev": ""}, | |
304 {"name": "DeltaBlue", "results": ["10657567"], "stddev": ""}, | |
305 {"name": "Total", "results": ["3626.49109719"], "stddev": ""}, | |
306 ]) | |
307 self._VerifyErrors([]) | |
308 self._VerifyMock(path.join("out", "Release", "d7"), "--flag", "run.js") | |
309 | |
310 def testBuildbotWithTotalAndErrors(self): | |
311 test_input = dict(V8_JSON) | |
312 test_input["total"] = True | |
313 self._WriteTestInput(test_input) | |
314 self._MockCommand(["."], ["x\nRichaards: 1.234\nDeltaBlue: 10657567\ny\n"]) | |
315 self.assertEquals(1, self._CallMain("--buildbot")) | |
316 self._VerifyResults("test", "score", [ | |
317 {"name": "Richards", "results": [], "stddev": ""}, | |
318 {"name": "DeltaBlue", "results": ["10657567"], "stddev": ""}, | |
319 ]) | |
320 self._VerifyErrors( | |
321 ["Regexp \"^Richards: (.+)$\" didn't match for benchmark Richards.", | |
322 "Not all traces have the same number of results."]) | |
323 self._VerifyMock(path.join("out", "Release", "d7"), "--flag", "run.js") | |
324 | |
325 def testRegexpNoMatch(self): | |
326 self._WriteTestInput(V8_JSON) | |
327 self._MockCommand(["."], ["x\nRichaards: 1.234\nDeltaBlue: 10657567\ny\n"]) | |
328 self.assertEquals(1, self._CallMain()) | |
329 self._VerifyResults("test", "score", [ | |
330 {"name": "Richards", "results": [], "stddev": ""}, | |
331 {"name": "DeltaBlue", "results": ["10657567"], "stddev": ""}, | |
332 ]) | |
333 self._VerifyErrors( | |
334 ["Regexp \"^Richards: (.+)$\" didn't match for benchmark Richards."]) | |
335 self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js") | |
336 | |
337 def testOneRunGeneric(self): | |
338 test_input = dict(V8_GENERIC_JSON) | |
339 self._WriteTestInput(test_input) | |
340 self._MockCommand(["."], [ | |
341 "Trace(Test1), Result(1.234), StdDev(0.23)\n" | |
342 "Trace(Test2), Result(10657567), StdDev(106)\n"]) | |
343 self.assertEquals(0, self._CallMain()) | |
344 self._VerifyResults("test", "ms", [ | |
345 {"name": "Test1", "results": ["1.234"], "stddev": "0.23"}, | |
346 {"name": "Test2", "results": ["10657567"], "stddev": "106"}, | |
347 ]) | |
348 self._VerifyErrors([]) | |
349 self._VerifyMock(path.join("out", "x64.release", "cc"), "--flag", "") | |
OLD | NEW |