Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(24)

Side by Side Diff: tools/unittests/run_perf_test.py

Issue 1215273003: [test] Let perf runner interleave try executions. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Test case Created 5 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« tools/run_perf.py ('K') | « tools/run_perf.py ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright 2014 the V8 project authors. All rights reserved. 2 # Copyright 2014 the V8 project authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 from collections import namedtuple 6 from collections import namedtuple
7 import coverage 7 import coverage
8 import json 8 import json
9 from mock import DEFAULT 9 from mock import DEFAULT
10 from mock import MagicMock 10 from mock import MagicMock
(...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after
132 def _CallMain(self, *args): 132 def _CallMain(self, *args):
133 self._test_output = path.join(TEST_WORKSPACE, "results.json") 133 self._test_output = path.join(TEST_WORKSPACE, "results.json")
134 all_args=[ 134 all_args=[
135 "--json-test-results", 135 "--json-test-results",
136 self._test_output, 136 self._test_output,
137 self._test_input, 137 self._test_input,
138 ] 138 ]
139 all_args += args 139 all_args += args
140 return run_perf.Main(all_args) 140 return run_perf.Main(all_args)
141 141
142 def _LoadResults(self): 142 def _LoadResults(self, file_name=None):
143 with open(self._test_output) as f: 143 with open(file_name or self._test_output) as f:
144 return json.load(f) 144 return json.load(f)
145 145
146 def _VerifyResults(self, suite, units, traces): 146 def _VerifyResults(self, suite, units, traces, file_name=None):
147 self.assertEquals([ 147 self.assertEquals([
148 {"units": units, 148 {"units": units,
149 "graphs": [suite, trace["name"]], 149 "graphs": [suite, trace["name"]],
150 "results": trace["results"], 150 "results": trace["results"],
151 "stddev": trace["stddev"]} for trace in traces], 151 "stddev": trace["stddev"]} for trace in traces],
152 self._LoadResults()["traces"]) 152 self._LoadResults(file_name)["traces"])
153 153
154 def _VerifyErrors(self, errors): 154 def _VerifyErrors(self, errors):
155 self.assertEquals(errors, self._LoadResults()["errors"]) 155 self.assertEquals(errors, self._LoadResults()["errors"])
156 156
157 def _VerifyMock(self, binary, *args, **kwargs): 157 def _VerifyMock(self, binary, *args, **kwargs):
158 arg = [path.join(path.dirname(self.base), binary)] 158 arg = [path.join(path.dirname(self.base), binary)]
159 arg += args 159 arg += args
160 commands.Execute.assert_called_with( 160 commands.Execute.assert_called_with(
161 arg, timeout=kwargs.get("timeout", 60)) 161 arg, timeout=kwargs.get("timeout", 60))
162 162
(...skipping 232 matching lines...) Expand 10 before | Expand all | Expand 10 after
395 "Regexp \"^Richards: (.+)$\" didn't match for test Richards.", 395 "Regexp \"^Richards: (.+)$\" didn't match for test Richards.",
396 "Regexp \"^DeltaBlue: (.+)$\" didn't match for test DeltaBlue.", 396 "Regexp \"^DeltaBlue: (.+)$\" didn't match for test DeltaBlue.",
397 ]) 397 ])
398 self._VerifyMock( 398 self._VerifyMock(
399 path.join("out", "x64.release", "d7"), "--flag", "run.js", timeout=70) 399 path.join("out", "x64.release", "d7"), "--flag", "run.js", timeout=70)
400 400
401 # Simple test that mocks out the android platform. Testing the platform would 401 # Simple test that mocks out the android platform. Testing the platform would
402 # require lots of complicated mocks for the android tools. 402 # require lots of complicated mocks for the android tools.
403 def testAndroid(self): 403 def testAndroid(self):
404 self._WriteTestInput(V8_JSON) 404 self._WriteTestInput(V8_JSON)
405 platform = run_perf.Platform 405 # FIXME(machenbach): This is not test-local!
406 platform = run_perf.AndroidPlatform
406 platform.PreExecution = MagicMock(return_value=None) 407 platform.PreExecution = MagicMock(return_value=None)
407 platform.PostExecution = MagicMock(return_value=None) 408 platform.PostExecution = MagicMock(return_value=None)
408 platform.PreTests = MagicMock(return_value=None) 409 platform.PreTests = MagicMock(return_value=None)
409 platform.Run = MagicMock( 410 platform.Run = MagicMock(
410 return_value="Richards: 1.234\nDeltaBlue: 10657567\n") 411 return_value=("Richards: 1.234\nDeltaBlue: 10657567\n", None))
411 run_perf.AndroidPlatform = MagicMock(return_value=platform) 412 run_perf.AndroidPlatform = MagicMock(return_value=platform)
412 self.assertEquals( 413 self.assertEquals(
413 0, self._CallMain("--android-build-tools", "/some/dir", 414 0, self._CallMain("--android-build-tools", "/some/dir",
414 "--arch", "android_arm")) 415 "--arch", "arm"))
Michael Achenbach 2015/07/10 10:45:32 Unrelated fix.
415 self._VerifyResults("test", "score", [ 416 self._VerifyResults("test", "score", [
416 {"name": "Richards", "results": ["1.234"], "stddev": ""}, 417 {"name": "Richards", "results": ["1.234"], "stddev": ""},
417 {"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""}, 418 {"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
418 ]) 419 ])
420
421 def testTwoRuns_Trybot(self):
422 test_input = dict(V8_JSON)
423 test_input["run_count"] = 2
424 self._WriteTestInput(test_input)
425 self._MockCommand([".", ".", ".", "."],
426 ["Richards: 100\nDeltaBlue: 200\n",
427 "Richards: 200\nDeltaBlue: 20\n",
428 "Richards: 50\nDeltaBlue: 200\n",
429 "Richards: 100\nDeltaBlue: 20\n"])
430 test_output_no_patch = path.join(TEST_WORKSPACE, "results_no_patch.json")
431 self.assertEquals(0, self._CallMain(
432 "--outdir-no-patch", "out-no-patch",
433 "--json-test-results-no-patch", test_output_no_patch,
434 ))
435 self._VerifyResults("test", "score", [
436 {"name": "Richards", "results": ["100.0", "200.0"], "stddev": ""},
437 {"name": "DeltaBlue", "results": ["20.0", "20.0"], "stddev": ""},
438 ])
439 self._VerifyResults("test", "score", [
440 {"name": "Richards", "results": ["50.0", "100.0"], "stddev": ""},
441 {"name": "DeltaBlue", "results": ["200.0", "200.0"], "stddev": ""},
442 ], test_output_no_patch)
443 self._VerifyErrors([])
444 self._VerifyMockMultiple(
445 (path.join("out", "x64.release", "d7"), "--flag", "run.js"),
446 (path.join("out-no-patch", "x64.release", "d7"), "--flag", "run.js"),
447 (path.join("out", "x64.release", "d7"), "--flag", "run.js"),
448 (path.join("out-no-patch", "x64.release", "d7"), "--flag", "run.js"),
449 )
450
451 def testUnchain(self):
452 def Gen():
453 for i in [1, 2, 3]:
454 yield i, i + 1
455 l, r = run_perf.Unchain(Gen())
456 self.assertEquals([1, 2, 3], list(l()))
457 self.assertEquals([2, 3, 4], list(r()))
OLDNEW
« tools/run_perf.py ('K') | « tools/run_perf.py ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698