Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(56)

Side by Side Diff: tools/unittests/run_perf_test.py

Issue 532673002: Revert "Refactoring: Make gtest testsuite the default." (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « tools/testrunner/local/utils.py ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright 2014 the V8 project authors. All rights reserved. 2 # Copyright 2014 the V8 project authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 from collections import namedtuple 6 from collections import namedtuple
7 import coverage 7 import coverage
8 import json 8 import json
9 from mock import DEFAULT 9 from mock import DEFAULT
10 from mock import MagicMock 10 from mock import MagicMock
11 import os 11 import os
12 from os import path, sys 12 from os import path, sys
13 import shutil 13 import shutil
14 import tempfile 14 import tempfile
15 import unittest 15 import unittest
16 16
17 # Requires python-coverage and python-mock. Native python coverage 17 # Requires python-coverage and python-mock. Native python coverage
18 # version >= 3.7.1 should be installed to get the best speed. 18 # version >= 3.7.1 should be installed to get the best speed.
19 19
20 TEST_WORKSPACE = path.join(tempfile.gettempdir(), "test-v8-run-benchmarks") 20 TEST_WORKSPACE = path.join(tempfile.gettempdir(), "test-v8-run-perf")
21 21
22 V8_JSON = { 22 V8_JSON = {
23 "path": ["."], 23 "path": ["."],
24 "binary": "d7", 24 "binary": "d7",
25 "flags": ["--flag"], 25 "flags": ["--flag"],
26 "main": "run.js", 26 "main": "run.js",
27 "run_count": 1, 27 "run_count": 1,
28 "results_regexp": "^%s: (.+)$", 28 "results_regexp": "^%s: (.+)$",
29 "benchmarks": [ 29 "tests": [
30 {"name": "Richards"}, 30 {"name": "Richards"},
31 {"name": "DeltaBlue"}, 31 {"name": "DeltaBlue"},
32 ] 32 ]
33 } 33 }
34 34
35 V8_NESTED_SUITES_JSON = { 35 V8_NESTED_SUITES_JSON = {
36 "path": ["."], 36 "path": ["."],
37 "flags": ["--flag"], 37 "flags": ["--flag"],
38 "run_count": 1, 38 "run_count": 1,
39 "units": "score", 39 "units": "score",
40 "benchmarks": [ 40 "tests": [
41 {"name": "Richards", 41 {"name": "Richards",
42 "path": ["richards"], 42 "path": ["richards"],
43 "binary": "d7", 43 "binary": "d7",
44 "main": "run.js", 44 "main": "run.js",
45 "resources": ["file1.js", "file2.js"], 45 "resources": ["file1.js", "file2.js"],
46 "run_count": 2, 46 "run_count": 2,
47 "results_regexp": "^Richards: (.+)$"}, 47 "results_regexp": "^Richards: (.+)$"},
48 {"name": "Sub", 48 {"name": "Sub",
49 "path": ["sub"], 49 "path": ["sub"],
50 "benchmarks": [ 50 "tests": [
51 {"name": "Leaf", 51 {"name": "Leaf",
52 "path": ["leaf"], 52 "path": ["leaf"],
53 "run_count_x64": 3, 53 "run_count_x64": 3,
54 "units": "ms", 54 "units": "ms",
55 "main": "run.js", 55 "main": "run.js",
56 "results_regexp": "^Simple: (.+) ms.$"}, 56 "results_regexp": "^Simple: (.+) ms.$"},
57 ] 57 ]
58 }, 58 },
59 {"name": "DeltaBlue", 59 {"name": "DeltaBlue",
60 "path": ["delta_blue"], 60 "path": ["delta_blue"],
(...skipping 11 matching lines...) Expand all
72 "path": ["."], 72 "path": ["."],
73 "binary": "cc", 73 "binary": "cc",
74 "flags": ["--flag"], 74 "flags": ["--flag"],
75 "generic": True, 75 "generic": True,
76 "run_count": 1, 76 "run_count": 1,
77 "units": "ms", 77 "units": "ms",
78 } 78 }
79 79
80 Output = namedtuple("Output", "stdout, stderr") 80 Output = namedtuple("Output", "stdout, stderr")
81 81
82 class BenchmarksTest(unittest.TestCase): 82 class PerfTest(unittest.TestCase):
83 @classmethod 83 @classmethod
84 def setUpClass(cls): 84 def setUpClass(cls):
85 cls.base = path.dirname(path.dirname(path.abspath(__file__))) 85 cls.base = path.dirname(path.dirname(path.abspath(__file__)))
86 sys.path.append(cls.base) 86 sys.path.append(cls.base)
87 cls._cov = coverage.coverage( 87 cls._cov = coverage.coverage(
88 include=([os.path.join(cls.base, "run_benchmarks.py")])) 88 include=([os.path.join(cls.base, "run_perf.py")]))
89 cls._cov.start() 89 cls._cov.start()
90 import run_benchmarks 90 import run_perf
91 from testrunner.local import commands 91 from testrunner.local import commands
92 global commands 92 global commands
93 global run_benchmarks 93 global run_perf
94 94
95 @classmethod 95 @classmethod
96 def tearDownClass(cls): 96 def tearDownClass(cls):
97 cls._cov.stop() 97 cls._cov.stop()
98 print "" 98 print ""
99 print cls._cov.report() 99 print cls._cov.report()
100 100
101 def setUp(self): 101 def setUp(self):
102 self.maxDiff = None 102 self.maxDiff = None
103 if path.exists(TEST_WORKSPACE): 103 if path.exists(TEST_WORKSPACE):
104 shutil.rmtree(TEST_WORKSPACE) 104 shutil.rmtree(TEST_WORKSPACE)
105 os.makedirs(TEST_WORKSPACE) 105 os.makedirs(TEST_WORKSPACE)
106 106
107 def tearDown(self): 107 def tearDown(self):
108 if path.exists(TEST_WORKSPACE): 108 if path.exists(TEST_WORKSPACE):
109 shutil.rmtree(TEST_WORKSPACE) 109 shutil.rmtree(TEST_WORKSPACE)
110 110
111 def _WriteTestInput(self, json_content): 111 def _WriteTestInput(self, json_content):
112 self._test_input = path.join(TEST_WORKSPACE, "test.json") 112 self._test_input = path.join(TEST_WORKSPACE, "test.json")
113 with open(self._test_input, "w") as f: 113 with open(self._test_input, "w") as f:
114 f.write(json.dumps(json_content)) 114 f.write(json.dumps(json_content))
115 115
116 def _MockCommand(self, *args): 116 def _MockCommand(self, *args):
117 # Fake output for each benchmark run. 117 # Fake output for each test run.
118 benchmark_outputs = [Output(stdout=arg, stderr=None) for arg in args[1]] 118 test_outputs = [Output(stdout=arg, stderr=None) for arg in args[1]]
119 def execute(*args, **kwargs): 119 def execute(*args, **kwargs):
120 return benchmark_outputs.pop() 120 return test_outputs.pop()
121 commands.Execute = MagicMock(side_effect=execute) 121 commands.Execute = MagicMock(side_effect=execute)
122 122
123 # Check that d8 is called from the correct cwd for each benchmark run. 123 # Check that d8 is called from the correct cwd for each test run.
124 dirs = [path.join(TEST_WORKSPACE, arg) for arg in args[0]] 124 dirs = [path.join(TEST_WORKSPACE, arg) for arg in args[0]]
125 def chdir(*args, **kwargs): 125 def chdir(*args, **kwargs):
126 self.assertEquals(dirs.pop(), args[0]) 126 self.assertEquals(dirs.pop(), args[0])
127 os.chdir = MagicMock(side_effect=chdir) 127 os.chdir = MagicMock(side_effect=chdir)
128 128
129 def _CallMain(self, *args): 129 def _CallMain(self, *args):
130 self._test_output = path.join(TEST_WORKSPACE, "results.json") 130 self._test_output = path.join(TEST_WORKSPACE, "results.json")
131 all_args=[ 131 all_args=[
132 "--json-test-results", 132 "--json-test-results",
133 self._test_output, 133 self._test_output,
134 self._test_input, 134 self._test_input,
135 ] 135 ]
136 all_args += args 136 all_args += args
137 return run_benchmarks.Main(all_args) 137 return run_perf.Main(all_args)
138 138
139 def _LoadResults(self): 139 def _LoadResults(self):
140 with open(self._test_output) as f: 140 with open(self._test_output) as f:
141 return json.load(f) 141 return json.load(f)
142 142
143 def _VerifyResults(self, suite, units, traces): 143 def _VerifyResults(self, suite, units, traces):
144 self.assertEquals([ 144 self.assertEquals([
145 {"units": units, 145 {"units": units,
146 "graphs": [suite, trace["name"]], 146 "graphs": [suite, trace["name"]],
147 "results": trace["results"], 147 "results": trace["results"],
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
189 {"name": "Richards", "results": ["50", "100"], "stddev": ""}, 189 {"name": "Richards", "results": ["50", "100"], "stddev": ""},
190 {"name": "DeltaBlue", "results": ["300", "200"], "stddev": ""}, 190 {"name": "DeltaBlue", "results": ["300", "200"], "stddev": ""},
191 ]) 191 ])
192 self._VerifyErrors([]) 192 self._VerifyErrors([])
193 self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js") 193 self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
194 194
195 def testTwoRuns_SubRegexp(self): 195 def testTwoRuns_SubRegexp(self):
196 test_input = dict(V8_JSON) 196 test_input = dict(V8_JSON)
197 test_input["run_count"] = 2 197 test_input["run_count"] = 2
198 del test_input["results_regexp"] 198 del test_input["results_regexp"]
199 test_input["benchmarks"][0]["results_regexp"] = "^Richards: (.+)$" 199 test_input["tests"][0]["results_regexp"] = "^Richards: (.+)$"
200 test_input["benchmarks"][1]["results_regexp"] = "^DeltaBlue: (.+)$" 200 test_input["tests"][1]["results_regexp"] = "^DeltaBlue: (.+)$"
201 self._WriteTestInput(test_input) 201 self._WriteTestInput(test_input)
202 self._MockCommand([".", "."], 202 self._MockCommand([".", "."],
203 ["Richards: 100\nDeltaBlue: 200\n", 203 ["Richards: 100\nDeltaBlue: 200\n",
204 "Richards: 50\nDeltaBlue: 300\n"]) 204 "Richards: 50\nDeltaBlue: 300\n"])
205 self.assertEquals(0, self._CallMain()) 205 self.assertEquals(0, self._CallMain())
206 self._VerifyResults("test", "score", [ 206 self._VerifyResults("test", "score", [
207 {"name": "Richards", "results": ["50", "100"], "stddev": ""}, 207 {"name": "Richards", "results": ["50", "100"], "stddev": ""},
208 {"name": "DeltaBlue", "results": ["300", "200"], "stddev": ""}, 208 {"name": "DeltaBlue", "results": ["300", "200"], "stddev": ""},
209 ]) 209 ])
210 self._VerifyErrors([]) 210 self._VerifyErrors([])
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
267 self._MockCommand(["."], ["Richards: 3\nRichards-stddev: 0.7\n" 267 self._MockCommand(["."], ["Richards: 3\nRichards-stddev: 0.7\n"
268 "DeltaBlue: 6\nDeltaBlue-boom: 0.9\n", 268 "DeltaBlue: 6\nDeltaBlue-boom: 0.9\n",
269 "Richards: 2\nRichards-stddev: 0.5\n" 269 "Richards: 2\nRichards-stddev: 0.5\n"
270 "DeltaBlue: 5\nDeltaBlue-stddev: 0.8\n"]) 270 "DeltaBlue: 5\nDeltaBlue-stddev: 0.8\n"])
271 self.assertEquals(1, self._CallMain()) 271 self.assertEquals(1, self._CallMain())
272 self._VerifyResults("test", "score", [ 272 self._VerifyResults("test", "score", [
273 {"name": "Richards", "results": ["2", "3"], "stddev": "0.7"}, 273 {"name": "Richards", "results": ["2", "3"], "stddev": "0.7"},
274 {"name": "DeltaBlue", "results": ["5", "6"], "stddev": "0.8"}, 274 {"name": "DeltaBlue", "results": ["5", "6"], "stddev": "0.8"},
275 ]) 275 ])
276 self._VerifyErrors( 276 self._VerifyErrors(
277 ["Benchmark Richards should only run once since a stddev is provided " 277 ["Test Richards should only run once since a stddev is provided "
278 "by the benchmark.", 278 "by the test.",
279 "Benchmark DeltaBlue should only run once since a stddev is provided " 279 "Test DeltaBlue should only run once since a stddev is provided "
280 "by the benchmark.", 280 "by the test.",
281 "Regexp \"^DeltaBlue\-stddev: (.+)$\" didn't match for benchmark " 281 "Regexp \"^DeltaBlue\-stddev: (.+)$\" didn't match for test "
282 "DeltaBlue."]) 282 "DeltaBlue."])
283 self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js") 283 self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
284 284
285 def testBuildbot(self): 285 def testBuildbot(self):
286 self._WriteTestInput(V8_JSON) 286 self._WriteTestInput(V8_JSON)
287 self._MockCommand(["."], ["Richards: 1.234\nDeltaBlue: 10657567\n"]) 287 self._MockCommand(["."], ["Richards: 1.234\nDeltaBlue: 10657567\n"])
288 self.assertEquals(0, self._CallMain("--buildbot")) 288 self.assertEquals(0, self._CallMain("--buildbot"))
289 self._VerifyResults("test", "score", [ 289 self._VerifyResults("test", "score", [
290 {"name": "Richards", "results": ["1.234"], "stddev": ""}, 290 {"name": "Richards", "results": ["1.234"], "stddev": ""},
291 {"name": "DeltaBlue", "results": ["10657567"], "stddev": ""}, 291 {"name": "DeltaBlue", "results": ["10657567"], "stddev": ""},
(...skipping 19 matching lines...) Expand all
311 test_input = dict(V8_JSON) 311 test_input = dict(V8_JSON)
312 test_input["total"] = True 312 test_input["total"] = True
313 self._WriteTestInput(test_input) 313 self._WriteTestInput(test_input)
314 self._MockCommand(["."], ["x\nRichaards: 1.234\nDeltaBlue: 10657567\ny\n"]) 314 self._MockCommand(["."], ["x\nRichaards: 1.234\nDeltaBlue: 10657567\ny\n"])
315 self.assertEquals(1, self._CallMain("--buildbot")) 315 self.assertEquals(1, self._CallMain("--buildbot"))
316 self._VerifyResults("test", "score", [ 316 self._VerifyResults("test", "score", [
317 {"name": "Richards", "results": [], "stddev": ""}, 317 {"name": "Richards", "results": [], "stddev": ""},
318 {"name": "DeltaBlue", "results": ["10657567"], "stddev": ""}, 318 {"name": "DeltaBlue", "results": ["10657567"], "stddev": ""},
319 ]) 319 ])
320 self._VerifyErrors( 320 self._VerifyErrors(
321 ["Regexp \"^Richards: (.+)$\" didn't match for benchmark Richards.", 321 ["Regexp \"^Richards: (.+)$\" didn't match for test Richards.",
322 "Not all traces have the same number of results."]) 322 "Not all traces have the same number of results."])
323 self._VerifyMock(path.join("out", "Release", "d7"), "--flag", "run.js") 323 self._VerifyMock(path.join("out", "Release", "d7"), "--flag", "run.js")
324 324
325 def testRegexpNoMatch(self): 325 def testRegexpNoMatch(self):
326 self._WriteTestInput(V8_JSON) 326 self._WriteTestInput(V8_JSON)
327 self._MockCommand(["."], ["x\nRichaards: 1.234\nDeltaBlue: 10657567\ny\n"]) 327 self._MockCommand(["."], ["x\nRichaards: 1.234\nDeltaBlue: 10657567\ny\n"])
328 self.assertEquals(1, self._CallMain()) 328 self.assertEquals(1, self._CallMain())
329 self._VerifyResults("test", "score", [ 329 self._VerifyResults("test", "score", [
330 {"name": "Richards", "results": [], "stddev": ""}, 330 {"name": "Richards", "results": [], "stddev": ""},
331 {"name": "DeltaBlue", "results": ["10657567"], "stddev": ""}, 331 {"name": "DeltaBlue", "results": ["10657567"], "stddev": ""},
332 ]) 332 ])
333 self._VerifyErrors( 333 self._VerifyErrors(
334 ["Regexp \"^Richards: (.+)$\" didn't match for benchmark Richards."]) 334 ["Regexp \"^Richards: (.+)$\" didn't match for test Richards."])
335 self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js") 335 self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
336 336
337 def testOneRunGeneric(self): 337 def testOneRunGeneric(self):
338 test_input = dict(V8_GENERIC_JSON) 338 test_input = dict(V8_GENERIC_JSON)
339 self._WriteTestInput(test_input) 339 self._WriteTestInput(test_input)
340 self._MockCommand(["."], [ 340 self._MockCommand(["."], [
341 "Trace(Test1), Result(1.234), StdDev(0.23)\n" 341 "Trace(Test1), Result(1.234), StdDev(0.23)\n"
342 "Trace(Test2), Result(10657567), StdDev(106)\n"]) 342 "Trace(Test2), Result(10657567), StdDev(106)\n"])
343 self.assertEquals(0, self._CallMain()) 343 self.assertEquals(0, self._CallMain())
344 self._VerifyResults("test", "ms", [ 344 self._VerifyResults("test", "ms", [
345 {"name": "Test1", "results": ["1.234"], "stddev": "0.23"}, 345 {"name": "Test1", "results": ["1.234"], "stddev": "0.23"},
346 {"name": "Test2", "results": ["10657567"], "stddev": "106"}, 346 {"name": "Test2", "results": ["10657567"], "stddev": "106"},
347 ]) 347 ])
348 self._VerifyErrors([]) 348 self._VerifyErrors([])
349 self._VerifyMock(path.join("out", "x64.release", "cc"), "--flag", "") 349 self._VerifyMock(path.join("out", "x64.release", "cc"), "--flag", "")
OLDNEW
« no previous file with comments | « tools/testrunner/local/utils.py ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698