Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(237)

Unified Diff: tools/unittests/run_perf_test.py

Issue 532673002: Revert "Refactoring: Make gtest testsuite the default." (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « tools/testrunner/local/utils.py ('k') | no next file » | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: tools/unittests/run_perf_test.py
diff --git a/tools/unittests/run_benchmarks_test.py b/tools/unittests/run_perf_test.py
similarity index 91%
copy from tools/unittests/run_benchmarks_test.py
copy to tools/unittests/run_perf_test.py
index d17025232c98356bbc1a69449f914ffb16c705c1..86c81993c7cfd7f8d71849feef243bc4a705787e 100644
--- a/tools/unittests/run_benchmarks_test.py
+++ b/tools/unittests/run_perf_test.py
@@ -17,7 +17,7 @@ import unittest
# Requires python-coverage and python-mock. Native python coverage
# version >= 3.7.1 should be installed to get the best speed.
-TEST_WORKSPACE = path.join(tempfile.gettempdir(), "test-v8-run-benchmarks")
+TEST_WORKSPACE = path.join(tempfile.gettempdir(), "test-v8-run-perf")
V8_JSON = {
"path": ["."],
@@ -26,7 +26,7 @@ V8_JSON = {
"main": "run.js",
"run_count": 1,
"results_regexp": "^%s: (.+)$",
- "benchmarks": [
+ "tests": [
{"name": "Richards"},
{"name": "DeltaBlue"},
]
@@ -37,7 +37,7 @@ V8_NESTED_SUITES_JSON = {
"flags": ["--flag"],
"run_count": 1,
"units": "score",
- "benchmarks": [
+ "tests": [
{"name": "Richards",
"path": ["richards"],
"binary": "d7",
@@ -47,7 +47,7 @@ V8_NESTED_SUITES_JSON = {
"results_regexp": "^Richards: (.+)$"},
{"name": "Sub",
"path": ["sub"],
- "benchmarks": [
+ "tests": [
{"name": "Leaf",
"path": ["leaf"],
"run_count_x64": 3,
@@ -79,18 +79,18 @@ V8_GENERIC_JSON = {
Output = namedtuple("Output", "stdout, stderr")
-class BenchmarksTest(unittest.TestCase):
+class PerfTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.base = path.dirname(path.dirname(path.abspath(__file__)))
sys.path.append(cls.base)
cls._cov = coverage.coverage(
- include=([os.path.join(cls.base, "run_benchmarks.py")]))
+ include=([os.path.join(cls.base, "run_perf.py")]))
cls._cov.start()
- import run_benchmarks
+ import run_perf
from testrunner.local import commands
global commands
- global run_benchmarks
+ global run_perf
@classmethod
def tearDownClass(cls):
@@ -114,13 +114,13 @@ class BenchmarksTest(unittest.TestCase):
f.write(json.dumps(json_content))
def _MockCommand(self, *args):
- # Fake output for each benchmark run.
- benchmark_outputs = [Output(stdout=arg, stderr=None) for arg in args[1]]
+ # Fake output for each test run.
+ test_outputs = [Output(stdout=arg, stderr=None) for arg in args[1]]
def execute(*args, **kwargs):
- return benchmark_outputs.pop()
+ return test_outputs.pop()
commands.Execute = MagicMock(side_effect=execute)
- # Check that d8 is called from the correct cwd for each benchmark run.
+ # Check that d8 is called from the correct cwd for each test run.
dirs = [path.join(TEST_WORKSPACE, arg) for arg in args[0]]
def chdir(*args, **kwargs):
self.assertEquals(dirs.pop(), args[0])
@@ -134,7 +134,7 @@ class BenchmarksTest(unittest.TestCase):
self._test_input,
]
all_args += args
- return run_benchmarks.Main(all_args)
+ return run_perf.Main(all_args)
def _LoadResults(self):
with open(self._test_output) as f:
@@ -196,8 +196,8 @@ class BenchmarksTest(unittest.TestCase):
test_input = dict(V8_JSON)
test_input["run_count"] = 2
del test_input["results_regexp"]
- test_input["benchmarks"][0]["results_regexp"] = "^Richards: (.+)$"
- test_input["benchmarks"][1]["results_regexp"] = "^DeltaBlue: (.+)$"
+ test_input["tests"][0]["results_regexp"] = "^Richards: (.+)$"
+ test_input["tests"][1]["results_regexp"] = "^DeltaBlue: (.+)$"
self._WriteTestInput(test_input)
self._MockCommand([".", "."],
["Richards: 100\nDeltaBlue: 200\n",
@@ -274,11 +274,11 @@ class BenchmarksTest(unittest.TestCase):
{"name": "DeltaBlue", "results": ["5", "6"], "stddev": "0.8"},
])
self._VerifyErrors(
- ["Benchmark Richards should only run once since a stddev is provided "
- "by the benchmark.",
- "Benchmark DeltaBlue should only run once since a stddev is provided "
- "by the benchmark.",
- "Regexp \"^DeltaBlue\-stddev: (.+)$\" didn't match for benchmark "
+ ["Test Richards should only run once since a stddev is provided "
+ "by the test.",
+ "Test DeltaBlue should only run once since a stddev is provided "
+ "by the test.",
+ "Regexp \"^DeltaBlue\-stddev: (.+)$\" didn't match for test "
"DeltaBlue."])
self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
@@ -318,7 +318,7 @@ class BenchmarksTest(unittest.TestCase):
{"name": "DeltaBlue", "results": ["10657567"], "stddev": ""},
])
self._VerifyErrors(
- ["Regexp \"^Richards: (.+)$\" didn't match for benchmark Richards.",
+ ["Regexp \"^Richards: (.+)$\" didn't match for test Richards.",
"Not all traces have the same number of results."])
self._VerifyMock(path.join("out", "Release", "d7"), "--flag", "run.js")
@@ -331,7 +331,7 @@ class BenchmarksTest(unittest.TestCase):
{"name": "DeltaBlue", "results": ["10657567"], "stddev": ""},
])
self._VerifyErrors(
- ["Regexp \"^Richards: (.+)$\" didn't match for benchmark Richards."])
+ ["Regexp \"^Richards: (.+)$\" didn't match for test Richards."])
self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
def testOneRunGeneric(self):
« no previous file with comments | « tools/testrunner/local/utils.py ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698