| Index: tools/run_perf.py
|
| diff --git a/tools/run_benchmarks.py b/tools/run_perf.py
|
| similarity index 88%
|
| copy from tools/run_benchmarks.py
|
| copy to tools/run_perf.py
|
| index cc0bb2c5b87f15ae9e386101e37e6fbdec58d3ca..0022c8e5d02f9e46b1da0696e4cb7e5a38c5435e 100755
|
| --- a/tools/run_benchmarks.py
|
| +++ b/tools/run_perf.py
|
| @@ -6,11 +6,11 @@
|
| """
|
| Performance runner for d8.
|
|
|
| -Call e.g. with tools/run-benchmarks.py --arch ia32 some_suite.json
|
| +Call e.g. with tools/run-perf.py --arch ia32 some_suite.json
|
|
|
| The suite json format is expected to be:
|
| {
|
| - "path": <relative path chunks to benchmark resources and main file>,
|
| + "path": <relative path chunks to perf resources and main file>,
|
| "name": <optional suite name, file name is default>,
|
| "archs": [<architecture name for which this suite is run>, ...],
|
| "binary": <name of binary to run, default "d8">,
|
| @@ -18,13 +18,13 @@ The suite json format is expected to be:
|
| "run_count": <how often will this suite run (optional)>,
|
| "run_count_XXX": <how often will this suite run for arch XXX (optional)>,
|
| "resources": [<js file to be loaded before main>, ...]
|
| - "main": <main js benchmark runner file>,
|
| + "main": <main js perf runner file>,
|
| "results_regexp": <optional regexp>,
|
| "results_processor": <optional python results processor script>,
|
| "units": <the unit specification for the performance dashboard>,
|
| - "benchmarks": [
|
| + "tests": [
|
| {
|
| - "name": <name of the benchmark>,
|
| + "name": <name of the trace>,
|
| "results_regexp": <optional more specific regexp>,
|
| "results_processor": <optional python results processor script>,
|
| "units": <the unit specification for the performance dashboard>,
|
| @@ -32,23 +32,23 @@ The suite json format is expected to be:
|
| ]
|
| }
|
|
|
| -The benchmarks field can also nest other suites in arbitrary depth. A suite
|
| +The tests field can also nest other suites in arbitrary depth. A suite
|
| with a "main" file is a leaf suite that can contain one more level of
|
| -benchmarks.
|
| +tests.
|
|
|
| A suite's results_regexp is expected to have one string place holder
|
| -"%s" for the benchmark name. A benchmark's results_regexp overwrites suite
|
| +"%s" for the trace name. A trace's results_regexp overwrites suite
|
| defaults.
|
|
|
| A suite's results_processor may point to an optional python script. If
|
| -specified, it is called after running the benchmarks like this (with a path
|
| +specified, it is called after running the tests like this (with a path
|
| relatve to the suite level's path):
|
| <results_processor file> <same flags as for d8> <suite level name> <output>
|
|
|
| The <output> is a temporary file containing d8 output. The results_regexp will
|
| be applied to the output of this script.
|
|
|
| -A suite without "benchmarks" is considered a benchmark itself.
|
| +A suite without "tests" is considered a performance test itself.
|
|
|
| Full example (suite with one runner):
|
| {
|
| @@ -60,7 +60,7 @@ Full example (suite with one runner):
|
| "main": "run.js",
|
| "results_regexp": "^%s: (.+)$",
|
| "units": "score",
|
| - "benchmarks": [
|
| + "tests": [
|
| {"name": "Richards"},
|
| {"name": "DeltaBlue"},
|
| {"name": "NavierStokes",
|
| @@ -75,7 +75,7 @@ Full example (suite with several runners):
|
| "archs": ["ia32", "x64"],
|
| "run_count": 5,
|
| "units": "score",
|
| - "benchmarks": [
|
| + "tests": [
|
| {"name": "Richards",
|
| "path": ["richards"],
|
| "main": "run.js",
|
| @@ -150,7 +150,7 @@ class Results(object):
|
|
|
|
|
| class Node(object):
|
| - """Represents a node in the benchmark suite tree structure."""
|
| + """Represents a node in the suite tree structure."""
|
| def __init__(self, *args):
|
| self._children = []
|
|
|
| @@ -175,7 +175,7 @@ class DefaultSentinel(Node):
|
|
|
|
|
| class Graph(Node):
|
| - """Represents a benchmark suite definition.
|
| + """Represents a suite definition.
|
|
|
| Can either be a leaf or an inner node that provides default values.
|
| """
|
| @@ -221,7 +221,7 @@ class Graph(Node):
|
|
|
|
|
| class Trace(Graph):
|
| - """Represents a leaf in the benchmark suite tree structure.
|
| + """Represents a leaf in the suite tree structure.
|
|
|
| Handles collection of measurements.
|
| """
|
| @@ -237,17 +237,17 @@ class Trace(Graph):
|
| self.results.append(
|
| re.search(self.results_regexp, stdout, re.M).group(1))
|
| except:
|
| - self.errors.append("Regexp \"%s\" didn't match for benchmark %s."
|
| + self.errors.append("Regexp \"%s\" didn't match for test %s."
|
| % (self.results_regexp, self.graphs[-1]))
|
|
|
| try:
|
| if self.stddev_regexp and self.stddev:
|
| - self.errors.append("Benchmark %s should only run once since a stddev "
|
| - "is provided by the benchmark." % self.graphs[-1])
|
| + self.errors.append("Test %s should only run once since a stddev "
|
| + "is provided by the test." % self.graphs[-1])
|
| if self.stddev_regexp:
|
| self.stddev = re.search(self.stddev_regexp, stdout, re.M).group(1)
|
| except:
|
| - self.errors.append("Regexp \"%s\" didn't match for benchmark %s."
|
| + self.errors.append("Regexp \"%s\" didn't match for test %s."
|
| % (self.stddev_regexp, self.graphs[-1]))
|
|
|
| def GetResults(self):
|
| @@ -260,7 +260,7 @@ class Trace(Graph):
|
|
|
|
|
| class Runnable(Graph):
|
| - """Represents a runnable benchmark suite definition (i.e. has a main file).
|
| + """Represents a runnable suite definition (i.e. has a main file).
|
| """
|
| @property
|
| def main(self):
|
| @@ -269,7 +269,7 @@ class Runnable(Graph):
|
| def ChangeCWD(self, suite_path):
|
| """Changes the cwd to to path defined in the current graph.
|
|
|
| - The benchmarks are supposed to be relative to the suite configuration.
|
| + The tests are supposed to be relative to the suite configuration.
|
| """
|
| suite_dir = os.path.abspath(os.path.dirname(suite_path))
|
| bench_dir = os.path.normpath(os.path.join(*self.path))
|
| @@ -314,7 +314,7 @@ class Runnable(Graph):
|
| return res
|
|
|
| class RunnableTrace(Trace, Runnable):
|
| - """Represents a runnable benchmark suite definition that is a leaf."""
|
| + """Represents a runnable suite definition that is a leaf."""
|
| def __init__(self, suite, parent, arch):
|
| super(RunnableTrace, self).__init__(suite, parent, arch)
|
|
|
| @@ -326,7 +326,7 @@ class RunnableTrace(Trace, Runnable):
|
|
|
|
|
| class RunnableGeneric(Runnable):
|
| - """Represents a runnable benchmark suite definition with generic traces."""
|
| + """Represents a runnable suite definition with generic traces."""
|
| def __init__(self, suite, parent, arch):
|
| super(RunnableGeneric, self).__init__(suite, parent, arch)
|
|
|
| @@ -359,21 +359,21 @@ def MakeGraph(suite, arch, parent):
|
| return Trace(suite, parent, arch)
|
| elif suite.get("main"):
|
| # A main file makes this graph runnable.
|
| - if suite.get("benchmarks"):
|
| - # This graph has subbenchmarks (traces).
|
| + if suite.get("tests"):
|
| + # This graph has subgraphs (traces).
|
| return Runnable(suite, parent, arch)
|
| else:
|
| - # This graph has no subbenchmarks, it's a leaf.
|
| + # This graph has no subgraphs, it's a leaf.
|
| return RunnableTrace(suite, parent, arch)
|
| elif suite.get("generic"):
|
| # This is a generic suite definition. It is either a runnable executable
|
| # or has a main js file.
|
| return RunnableGeneric(suite, parent, arch)
|
| - elif suite.get("benchmarks"):
|
| + elif suite.get("tests"):
|
| # This is neither a leaf nor a runnable.
|
| return Graph(suite, parent, arch)
|
| else: # pragma: no cover
|
| - raise Exception("Invalid benchmark suite configuration.")
|
| + raise Exception("Invalid suite configuration.")
|
|
|
|
|
| def BuildGraphs(suite, arch, parent=None):
|
| @@ -387,7 +387,7 @@ def BuildGraphs(suite, arch, parent=None):
|
| return None
|
|
|
| graph = MakeGraph(suite, arch, parent)
|
| - for subsuite in suite.get("benchmarks", []):
|
| + for subsuite in suite.get("tests", []):
|
| BuildGraphs(subsuite, arch, graph)
|
| parent.AppendChild(graph)
|
| return graph
|
| @@ -404,7 +404,7 @@ def FlattenRunnables(node):
|
| for result in FlattenRunnables(child):
|
| yield result
|
| else: # pragma: no cover
|
| - raise Exception("Invalid benchmark suite configuration.")
|
| + raise Exception("Invalid suite configuration.")
|
|
|
|
|
| # TODO: Implement results_processor.
|
| @@ -447,7 +447,7 @@ def Main(args):
|
| path = os.path.abspath(path)
|
|
|
| if not os.path.exists(path): # pragma: no cover
|
| - results.errors.append("Benchmark file %s does not exist." % path)
|
| + results.errors.append("Configuration file %s does not exist." % path)
|
| continue
|
|
|
| with open(path) as f:
|
|
|