OLD | NEW |
(Empty) | |
| 1 #!/usr/bin/env python |
| 2 # Copyright 2014 the V8 project authors. All rights reserved. |
| 3 # Use of this source code is governed by a BSD-style license that can be |
| 4 # found in the LICENSE file. |
| 5 |
| 6 from collections import namedtuple |
| 7 import coverage |
| 8 import json |
| 9 from mock import DEFAULT |
| 10 from mock import MagicMock |
| 11 import os |
| 12 from os import path, sys |
| 13 import shutil |
| 14 import tempfile |
| 15 import unittest |
| 16 |
| 17 # Requires python-coverage and python-mock. Native python coverage |
| 18 # version >= 3.7.1 should be installed to get the best speed. |
| 19 |
| 20 TEST_WORKSPACE = path.join(tempfile.gettempdir(), "test-v8-run-benchmarks") |
| 21 |
| 22 V8_JSON = { |
| 23 "path": ["."], |
| 24 "binary": "d7", |
| 25 "flags": ["--flag"], |
| 26 "main": "run.js", |
| 27 "run_count": 1, |
| 28 "results_regexp": "^%s: (.+)$", |
| 29 "benchmarks": [ |
| 30 {"name": "Richards"}, |
| 31 {"name": "DeltaBlue"}, |
| 32 ] |
| 33 } |
| 34 |
| 35 V8_NESTED_SUITES_JSON = { |
| 36 "path": ["."], |
| 37 "flags": ["--flag"], |
| 38 "run_count": 1, |
| 39 "units": "score", |
| 40 "benchmarks": [ |
| 41 {"name": "Richards", |
| 42 "path": ["richards"], |
| 43 "binary": "d7", |
| 44 "main": "run.js", |
| 45 "resources": ["file1.js", "file2.js"], |
| 46 "run_count": 2, |
| 47 "results_regexp": "^Richards: (.+)$"}, |
| 48 {"name": "Sub", |
| 49 "path": ["sub"], |
| 50 "benchmarks": [ |
| 51 {"name": "Leaf", |
| 52 "path": ["leaf"], |
| 53 "run_count_x64": 3, |
| 54 "units": "ms", |
| 55 "main": "run.js", |
| 56 "results_regexp": "^Simple: (.+) ms.$"}, |
| 57 ] |
| 58 }, |
| 59 {"name": "DeltaBlue", |
| 60 "path": ["delta_blue"], |
| 61 "main": "run.js", |
| 62 "flags": ["--flag2"], |
| 63 "results_regexp": "^DeltaBlue: (.+)$"}, |
| 64 {"name": "ShouldntRun", |
| 65 "path": ["."], |
| 66 "archs": ["arm"], |
| 67 "main": "run.js"}, |
| 68 ] |
| 69 } |
| 70 |
| 71 Output = namedtuple("Output", "stdout, stderr") |
| 72 |
| 73 class BenchmarksTest(unittest.TestCase): |
| 74 @classmethod |
| 75 def setUpClass(cls): |
| 76 cls.base = path.dirname(path.dirname(path.abspath(__file__))) |
| 77 sys.path.append(cls.base) |
| 78 cls._cov = coverage.coverage( |
| 79 include=([os.path.join(cls.base, "run_benchmarks.py")])) |
| 80 cls._cov.start() |
| 81 import run_benchmarks |
| 82 from testrunner.local import commands |
| 83 global commands |
| 84 global run_benchmarks |
| 85 |
| 86 @classmethod |
| 87 def tearDownClass(cls): |
| 88 cls._cov.stop() |
| 89 print "" |
| 90 print cls._cov.report() |
| 91 |
| 92 def setUp(self): |
| 93 self.maxDiff = None |
| 94 if path.exists(TEST_WORKSPACE): |
| 95 shutil.rmtree(TEST_WORKSPACE) |
| 96 os.makedirs(TEST_WORKSPACE) |
| 97 |
| 98 def tearDown(self): |
| 99 if path.exists(TEST_WORKSPACE): |
| 100 shutil.rmtree(TEST_WORKSPACE) |
| 101 |
| 102 def _WriteTestInput(self, json_content): |
| 103 self._test_input = path.join(TEST_WORKSPACE, "test.json") |
| 104 with open(self._test_input, "w") as f: |
| 105 f.write(json.dumps(json_content)) |
| 106 |
| 107 def _MockCommand(self, *args): |
| 108 # Fake output for each benchmark run. |
| 109 benchmark_outputs = [Output(stdout=arg, stderr=None) for arg in args[1]] |
| 110 def execute(*args, **kwargs): |
| 111 return benchmark_outputs.pop() |
| 112 commands.Execute = MagicMock(side_effect=execute) |
| 113 |
| 114 # Check that d8 is called from the correct cwd for each benchmark run. |
| 115 dirs = [path.join(TEST_WORKSPACE, arg) for arg in args[0]] |
| 116 def chdir(*args, **kwargs): |
| 117 self.assertEquals(dirs.pop(), args[0]) |
| 118 os.chdir = MagicMock(side_effect=chdir) |
| 119 |
| 120 def _CallMain(self, *args): |
| 121 self._test_output = path.join(TEST_WORKSPACE, "results.json") |
| 122 all_args=[ |
| 123 "--json-test-results", |
| 124 self._test_output, |
| 125 self._test_input, |
| 126 ] |
| 127 all_args += args |
| 128 run_benchmarks.Main(all_args) |
| 129 |
| 130 def _LoadResults(self): |
| 131 with open(self._test_output) as f: |
| 132 return json.load(f) |
| 133 |
| 134 def _VerifyResults(self, suite, units, traces): |
| 135 self.assertEquals([ |
| 136 {"units": units, |
| 137 "graphs": [suite, trace["name"]], |
| 138 "results": trace["results"]} for trace in traces], |
| 139 self._LoadResults()["traces"]) |
| 140 |
| 141 def _VerifyErrors(self, errors): |
| 142 self.assertEquals(errors, self._LoadResults()["errors"]) |
| 143 |
| 144 def _VerifyMock(self, binary, *args): |
| 145 arg = [path.join(path.dirname(self.base), binary)] |
| 146 arg += args |
| 147 commands.Execute.assert_called_with(arg, timeout=60) |
| 148 |
| 149 def _VerifyMockMultiple(self, *args): |
| 150 expected = [] |
| 151 for arg in args: |
| 152 a = [path.join(path.dirname(self.base), arg[0])] |
| 153 a += arg[1:] |
| 154 expected.append(((a,), {"timeout": 60})) |
| 155 self.assertEquals(expected, commands.Execute.call_args_list) |
| 156 |
| 157 def testOneRun(self): |
| 158 self._WriteTestInput(V8_JSON) |
| 159 self._MockCommand(["."], ["x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n"]) |
| 160 self._CallMain() |
| 161 self._VerifyResults("test", "score", [ |
| 162 {"name": "Richards", "results": ["1.234"]}, |
| 163 {"name": "DeltaBlue", "results": ["10657567"]}, |
| 164 ]) |
| 165 self._VerifyErrors([]) |
| 166 self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js") |
| 167 |
| 168 def testTwoRuns_Units_SuiteName(self): |
| 169 test_input = dict(V8_JSON) |
| 170 test_input["run_count"] = 2 |
| 171 test_input["name"] = "v8" |
| 172 test_input["units"] = "ms" |
| 173 self._WriteTestInput(test_input) |
| 174 self._MockCommand([".", "."], |
| 175 ["Richards: 100\nDeltaBlue: 200\n", |
| 176 "Richards: 50\nDeltaBlue: 300\n"]) |
| 177 self._CallMain() |
| 178 self._VerifyResults("v8", "ms", [ |
| 179 {"name": "Richards", "results": ["50", "100"]}, |
| 180 {"name": "DeltaBlue", "results": ["300", "200"]}, |
| 181 ]) |
| 182 self._VerifyErrors([]) |
| 183 self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js") |
| 184 |
| 185 def testTwoRuns_SubRegexp(self): |
| 186 test_input = dict(V8_JSON) |
| 187 test_input["run_count"] = 2 |
| 188 del test_input["results_regexp"] |
| 189 test_input["benchmarks"][0]["results_regexp"] = "^Richards: (.+)$" |
| 190 test_input["benchmarks"][1]["results_regexp"] = "^DeltaBlue: (.+)$" |
| 191 self._WriteTestInput(test_input) |
| 192 self._MockCommand([".", "."], |
| 193 ["Richards: 100\nDeltaBlue: 200\n", |
| 194 "Richards: 50\nDeltaBlue: 300\n"]) |
| 195 self._CallMain() |
| 196 self._VerifyResults("test", "score", [ |
| 197 {"name": "Richards", "results": ["50", "100"]}, |
| 198 {"name": "DeltaBlue", "results": ["300", "200"]}, |
| 199 ]) |
| 200 self._VerifyErrors([]) |
| 201 self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js") |
| 202 |
| 203 def testNestedSuite(self): |
| 204 self._WriteTestInput(V8_NESTED_SUITES_JSON) |
| 205 self._MockCommand(["delta_blue", "sub/leaf", "richards"], |
| 206 ["DeltaBlue: 200\n", |
| 207 "Simple: 1 ms.\n", |
| 208 "Simple: 2 ms.\n", |
| 209 "Simple: 3 ms.\n", |
| 210 "Richards: 100\n", |
| 211 "Richards: 50\n"]) |
| 212 self._CallMain() |
| 213 self.assertEquals([ |
| 214 {"units": "score", |
| 215 "graphs": ["test", "Richards"], |
| 216 "results": ["50", "100"]}, |
| 217 {"units": "ms", |
| 218 "graphs": ["test", "Sub", "Leaf"], |
| 219 "results": ["3", "2", "1"]}, |
| 220 {"units": "score", |
| 221 "graphs": ["test", "DeltaBlue"], |
| 222 "results": ["200"]}, |
| 223 ], self._LoadResults()["traces"]) |
| 224 self._VerifyErrors([]) |
| 225 self._VerifyMockMultiple( |
| 226 (path.join("out", "x64.release", "d7"), "--flag", "file1.js", |
| 227 "file2.js", "run.js"), |
| 228 (path.join("out", "x64.release", "d7"), "--flag", "file1.js", |
| 229 "file2.js", "run.js"), |
| 230 (path.join("out", "x64.release", "d8"), "--flag", "run.js"), |
| 231 (path.join("out", "x64.release", "d8"), "--flag", "run.js"), |
| 232 (path.join("out", "x64.release", "d8"), "--flag", "run.js"), |
| 233 (path.join("out", "x64.release", "d8"), "--flag", "--flag2", "run.js")) |
| 234 |
| 235 def testBuildbot(self): |
| 236 self._WriteTestInput(V8_JSON) |
| 237 self._MockCommand(["."], ["Richards: 1.234\nDeltaBlue: 10657567\n"]) |
| 238 self._CallMain("--buildbot") |
| 239 self._VerifyResults("test", "score", [ |
| 240 {"name": "Richards", "results": ["1.234"]}, |
| 241 {"name": "DeltaBlue", "results": ["10657567"]}, |
| 242 ]) |
| 243 self._VerifyErrors([]) |
| 244 self._VerifyMock(path.join("out", "Release", "d7"), "--flag", "run.js") |
| 245 |
| 246 def testRegexpNoMatch(self): |
| 247 self._WriteTestInput(V8_JSON) |
| 248 self._MockCommand(["."], ["x\nRichaards: 1.234\nDeltaBlue: 10657567\ny\n"]) |
| 249 self._CallMain() |
| 250 self._VerifyResults("test", "score", [ |
| 251 {"name": "Richards", "results": []}, |
| 252 {"name": "DeltaBlue", "results": ["10657567"]}, |
| 253 ]) |
| 254 self._VerifyErrors( |
| 255 ["Regexp \"^Richards: (.+)$\" didn't match for benchmark Richards."]) |
| 256 self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js") |
OLD | NEW |