Index: build/android/pylib/perf_tests_helper.py |
diff --git a/build/android/pylib/perf_tests_helper.py b/build/android/pylib/perf_tests_helper.py |
index e09221bbf930941fbf1d1a0c09ed6dad073f1587..f320e4f1b64c6cbbbb6a293f09e4f4059489adeb 100644 |
--- a/build/android/pylib/perf_tests_helper.py |
+++ b/build/android/pylib/perf_tests_helper.py |
@@ -61,6 +61,7 @@ def _MeanAndStdDevFromList(values): |
value = ", ".join(values) |
else: |
value = values[0] |
+ avg = values[0] |
return value, avg, sd |
@@ -87,28 +88,43 @@ def PrintPerfResult(measurement, trace, values, units, result_type='default', |
""" |
assert result_type in RESULT_TYPES, 'result type: %s is invalid' % result_type |
- if result_type in ['unimportant', 'default', 'informational']: |
+ trace_name = _EscapePerfResult(trace) |
+ if result_type in ['histogram', 'unimportant-histogram']: |
assert isinstance(values, list) |
assert len(values) |
- assert '/' not in measurement |
- value, avg, sd = _MeanAndStdDevFromList(values) |
+ # Print out each histogram separately. We can't print the units, otherwise |
+ # the histogram json output can't be parsed easily. |
+ output = '' |
+ ix = 1 |
+ for value in values: |
+ name = '%s.%s_%d' % (_EscapePerfResult(measurement), trace_name, ix) |
+ output += '%s%s%s : %s = %s' % ( |
+ '\n' if ix > 1 else '', |
+ RESULT_TYPES[result_type], |
+ name, |
+ name, |
+ value) |
+ ix += 1 |
+ means_and_sds = [GeomMeanAndStdDevFromHistogram(value) for value in values] |
+ print means_and_sds |
+ _, avg, sd = _MeanAndStdDevFromList([mean for (mean, _) in means_and_sds ]) |
else: |
- value = values[0] |
- # We can't print the units, otherwise parsing the histogram json output |
- # can't be parsed easily. |
- units = '' |
- avg, sd = GeomMeanAndStdDevFromHistogram(value) |
- |
- trace_name = _EscapePerfResult(trace) |
- output = '%s%s: %s%s%s %s' % ( |
- RESULT_TYPES[result_type], |
- _EscapePerfResult(measurement), |
- trace_name, |
- # Do not show equal sign if the trace is empty. Usually it happens when |
- # measurement is enough clear to describe the result. |
- '= ' if trace_name else '', |
- value, |
- units) |
+ if result_type in ['unimportant', 'default', 'informational']: |
+ assert isinstance(values, list) |
+ assert len(values) |
+ assert '/' not in measurement |
+ value, avg, sd = _MeanAndStdDevFromList(values) |
+ else: |
+ value = values[0] |
bulach
2012/11/22 18:21:59
nit: is this possible to reach this? my understand
marja
2012/11/23 09:08:33
Done. Yes, I had misinterpreted the association of
|
+ output = '%s%s: %s%s%s %s' % ( |
+ RESULT_TYPES[result_type], |
+ _EscapePerfResult(measurement), |
+ trace_name, |
+ # Do not show equal sign if the trace is empty. Usually it happens when |
+ # measurement is enough clear to describe the result. |
+ '= ' if trace_name else '', |
+ value, |
+ units) |
if avg: |
output += '\nAvg %s: %f%s' % (measurement, avg, units) |
if sd: |