Index: tools/run_perf.py |
diff --git a/tools/run_perf.py b/tools/run_perf.py |
index db4245f499fc07c3a32f435a78ec08d5f06f58a3..dec8c3ae7ee65e9eb4e75dacd8d2bc144a149af1 100755 |
--- a/tools/run_perf.py |
+++ b/tools/run_perf.py |
@@ -190,7 +190,7 @@ class Measurement(object): |
def ConsumeOutput(self, stdout): |
try: |
result = re.search(self.results_regexp, stdout, re.M).group(1) |
- self.results.append(str(float(result))) |
+ self.results.append(float(result)) |
except ValueError: |
self.errors.append("Regexp \"%s\" returned a non-numeric for test %s." |
% (self.results_regexp, self.name)) |
@@ -208,12 +208,34 @@ class Measurement(object): |
self.errors.append("Regexp \"%s\" didn't match for test %s." |
% (self.stddev_regexp, self.name)) |
+ def GetDeviation(self): |
+ # If the benchmark provided a stddev use that directly. |
+ if self.stddev: |
+ return self.stddev |
+ # If no stddev was provided calculate it from the results. |
+ if len(self.results) == 0: |
+ return 0 |
+ mean = self.GetMean() |
+ square_deviation = sum((x-mean)**2 for x in self.results) |
+ return (square_deviation / len(self.results)) ** 0.5 |
+ |
+ def GetMean(self): |
+ if len(self.results) == 0: |
+ return 0 |
+ # If the tests provided a stddev the results consists of one single average |
+ # value, so return that instead. |
+ if self.stddev: |
+ return self.results[0] |
+ # For a non-zero length results list calculate the average here. |
+ return sum(self.results) / len(self.results) |
+ |
def GetResults(self): |
return Results([{ |
"graphs": self.graphs, |
"units": self.units, |
- "results": self.results, |
- "stddev": self.stddev, |
+ "results": [str(x) for x in self.results], |
+ "average": self.GetMean(), |
+ "stddev": self.GetDeviation(), |
}], self.errors) |
@@ -405,7 +427,10 @@ class GraphConfig(Node): |
# TODO(machenbach): Currently that makes only sense for the leaf level. |
# Multiple place holders for multiple levels are not supported. |
if parent.results_regexp: |
- regexp_default = parent.results_regexp % re.escape(suite["name"]) |
+ try: |
+ regexp_default = parent.results_regexp % re.escape(suite["name"]) |
+ except TypeError: |
+ regexp_default = parent.results_regexp |
else: |
regexp_default = None |
self.results_regexp = suite.get("results_regexp", regexp_default) |
@@ -587,6 +612,21 @@ class Platform(object): |
else: |
return DesktopPlatform(options) |
Camillo Bruni
2016/02/11 13:20:29
Moved all basic "public" methods up here.
|
+ def GetPrettyFormatted(self, options): |
+ return self |
+ |
+ def PreExecution(self): |
+ pass |
+ |
+ def PostExecution(self): |
+ pass |
+ |
+ def PreTests(self, node, path): |
+ pass |
+ |
+ def PrintResult(self, result): |
+ pass |
+ |
def _Run(self, runnable, count, no_patch=False): |
raise NotImplementedError() # pragma: no cover |
@@ -609,20 +649,56 @@ class Platform(object): |
return stdout, None |
+class PlatformFormattedMixin(object): |
+ """ |
+ Helper mixin that adds formatted output used when running benchmarks |
+ with the --pretty flag. |
+ """ |
+ def PrintResult(self, result): |
+ if result.errors: |
+ print "\r:Errors:" |
+ print "\n".join(set(result.errors)) |
+ else: |
+ trace = result.traces[0] |
+ average = trace['average'] |
+ stdev = trace['stddev'] |
+ stdev_percentage = 100 * stdev / average if average != 0 else 0 |
+ result_string = "\r %s +/- %3.2f%% %s" % ( |
+ average, stdev_percentage, trace['units']) |
+ sys.stdout.write(result_string) |
+ # Fill with spaces up to 80 characters. |
+ sys.stdout.write(' '*max(0, 80-len(result_string))) |
+ sys.stdout.write("\n") |
+ sys.stdout.flush() |
+ |
+ def _PrintStdout(self, title, output): |
+ sys.stdout.write("\r") |
+ if output.exit_code != 0: |
+ print output.stdout |
+ return |
+ # Assume the time is on the last line |
+ result_line = output.stdout.splitlines()[-1].strip() |
+ sys.stdout.write(result_line) |
+ # Fill with spaces up to 80 characters. |
+ sys.stdout.write(' '*max(0, 80-len(result_line))) |
+ sys.stdout.flush() |
+ |
+ |
class DesktopPlatform(Platform): |
def __init__(self, options): |
super(DesktopPlatform, self).__init__(options) |
- def PreExecution(self): |
- pass |
- |
- def PostExecution(self): |
- pass |
+ def GetPrettyFormatted(self, options): |
+ return PrettyFormattedDesktopPlatform(options) |
def PreTests(self, node, path): |
if isinstance(node, RunnableConfig): |
node.ChangeCWD(path) |
+ def _PrintStdout(self, title, output): |
+ print title % "Stdout" |
+ print output.stdout |
+ |
def _Run(self, runnable, count, no_patch=False): |
suffix = ' - without patch' if no_patch else '' |
shell_dir = self.shell_dir_no_patch if no_patch else self.shell_dir |
@@ -636,8 +712,7 @@ class DesktopPlatform(Platform): |
print title % "OSError" |
print e |
return "" |
- print title % "Stdout" |
- print output.stdout |
+ self._PrintStdout(title, output) |
if output.stderr: # pragma: no cover |
# Print stderr for debugging. |
print title % "Stderr" |
@@ -654,6 +729,10 @@ class DesktopPlatform(Platform): |
return output.stdout |
+class PrettyFormattedDesktopPlatform(PlatformFormattedMixin, DesktopPlatform): |
+ pass |
+ |
+ |
class AndroidPlatform(Platform): # pragma: no cover |
DEVICE_DIR = "/data/local/tmp/v8/" |
@@ -671,6 +750,9 @@ class AndroidPlatform(Platform): # pragma: no cover |
self.adb_wrapper = adb_wrapper.AdbWrapper(options.device) |
self.device = device_utils.DeviceUtils(self.adb_wrapper) |
+ def GetPrettyFormatted(self, options): |
+ return PrettyFormattedAndroidPlatform(options) |
+ |
def PreExecution(self): |
perf = perf_control.PerfControl(self.device) |
perf.SetHighPerfMode() |
@@ -789,6 +871,10 @@ class AndroidPlatform(Platform): # pragma: no cover |
return stdout |
+class PrettyFormattedAndroidPlatform(PlatformFormattedMixin, AndroidPlatform): |
+ pass |
+ |
+ |
# TODO: Implement results_processor. |
def Main(args): |
logging.getLogger().setLevel(logging.INFO) |
@@ -818,6 +904,9 @@ def Main(args): |
default="out") |
parser.add_option("--outdir-no-patch", |
help="Base directory with compile output without patch") |
+ parser.add_option("--pretty", |
+ help="Print human readable output", |
+ default=False, action="store_true") |
parser.add_option("--binary-override-path", |
help="JavaScript engine binary. By default, d8 under " |
"architecture-specific build dir. " |
@@ -873,6 +962,8 @@ def Main(args): |
options.shell_dir_no_patch = None |
platform = Platform.GetPlatform(options) |
+ if options.pretty: |
+ platform = platform.GetPrettyFormatted(options) |
results = Results() |
results_no_patch = Results() |
@@ -914,6 +1005,7 @@ def Main(args): |
# Let runnable iterate over all runs and handle output. |
result, result_no_patch = runnable.Run( |
Runner, trybot=options.shell_dir_no_patch) |
+ platform.PrintResult(result) |
results += result |
results_no_patch += result_no_patch |
platform.PostExecution() |
@@ -921,12 +1013,14 @@ def Main(args): |
if options.json_test_results: |
results.WriteToFile(options.json_test_results) |
else: # pragma: no cover |
- print results |
+ if not options.pretty: |
+ print results |
if options.json_test_results_no_patch: |
results_no_patch.WriteToFile(options.json_test_results_no_patch) |
else: # pragma: no cover |
- print results_no_patch |
+ if not options.pretty: |
+ print results_no_patch |
return min(1, len(results.errors)) |