OLD | NEW |
1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 import re | 5 import re |
6 | 6 |
7 import android_commands | 7 import android_commands |
8 import json | 8 import json |
9 import math | 9 import math |
10 | 10 |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
54 try: | 54 try: |
55 value = '[%s]' % ','.join([str(v) for v in values]) | 55 value = '[%s]' % ','.join([str(v) for v in values]) |
56 avg = sum([float(v) for v in values]) / len(values) | 56 avg = sum([float(v) for v in values]) / len(values) |
57 sqdiffs = [(float(v) - avg) ** 2 for v in values] | 57 sqdiffs = [(float(v) - avg) ** 2 for v in values] |
58 variance = sum(sqdiffs) / (len(values) - 1) | 58 variance = sum(sqdiffs) / (len(values) - 1) |
59 sd = math.sqrt(variance) | 59 sd = math.sqrt(variance) |
60 except ValueError: | 60 except ValueError: |
61 value = ", ".join(values) | 61 value = ", ".join(values) |
62 else: | 62 else: |
63 value = values[0] | 63 value = values[0] |
64 avg = values[0] | |
65 return value, avg, sd | 64 return value, avg, sd |
66 | 65 |
67 | 66 |
68 def PrintPerfResult(measurement, trace, values, units, result_type='default', | 67 def PrintPerfResult(measurement, trace, values, units, result_type='default', |
69 print_to_stdout=True): | 68 print_to_stdout=True): |
70 """Prints numerical data to stdout in the format required by perf tests. | 69 """Prints numerical data to stdout in the format required by perf tests. |
71 | 70 |
72 The string args may be empty but they must not contain any colons (:) or | 71 The string args may be empty but they must not contain any colons (:) or |
73 equals signs (=). | 72 equals signs (=). |
74 | 73 |
75 Args: | 74 Args: |
76 measurement: A description of the quantity being measured, e.g. "vm_peak". | 75 measurement: A description of the quantity being measured, e.g. "vm_peak". |
77 trace: A description of the particular data point, e.g. "reference". | 76 trace: A description of the particular data point, e.g. "reference". |
78 values: A list of numeric measured values. | 77 values: A list of numeric measured values. |
79 units: A description of the units of measure, e.g. "bytes". | 78 units: A description of the units of measure, e.g. "bytes". |
80 result_type: Accepts values of RESULT_TYPES. | 79 result_type: A tri-state that accepts values of ['unimportant', 'default', |
| 80 'informational']. 'unimportant' prints RESULT, 'default' prints *RESULT |
| 81 and 'informational' prints nothing. |
81 print_to_stdout: If True, prints the output in stdout instead of returning | 82 print_to_stdout: If True, prints the output in stdout instead of returning |
82 the output to caller. | 83 the output to caller. |
83 | 84 |
84 Returns: | 85 Returns: |
85 String of the formated perf result. | 86 String of the formated perf result. |
86 """ | 87 """ |
87 assert result_type in RESULT_TYPES, 'result type: %s is invalid' % result_type | 88 assert result_type in RESULT_TYPES, 'result type: %s is invalid' % result_type |
88 | 89 |
89 trace_name = _EscapePerfResult(trace) | |
90 | |
91 if result_type in ['unimportant', 'default', 'informational']: | 90 if result_type in ['unimportant', 'default', 'informational']: |
92 assert isinstance(values, list) | 91 assert isinstance(values, list) |
93 assert len(values) | 92 assert len(values) |
94 assert '/' not in measurement | 93 assert '/' not in measurement |
95 value, avg, sd = _MeanAndStdDevFromList(values) | 94 value, avg, sd = _MeanAndStdDevFromList(values) |
96 output = '%s%s: %s%s%s %s' % ( | |
97 RESULT_TYPES[result_type], | |
98 _EscapePerfResult(measurement), | |
99 trace_name, | |
100 # Do not show equal sign if the trace is empty. Usually it happens when | |
101 # measurement is enough clear to describe the result. | |
102 '= ' if trace_name else '', | |
103 value, | |
104 units) | |
105 else: | 95 else: |
106 assert(result_type in ['histogram', 'unimportant-histogram']) | 96 value = values[0] |
107 assert isinstance(values, list) | 97 # We can't print the units, otherwise parsing the histogram json output |
108 assert len(values) | 98 # can't be parsed easily. |
109 # Print out each histogram separately. We can't print the units, otherwise | 99 units = '' |
110 # the histogram json output can't be parsed easily. | 100 avg, sd = GeomMeanAndStdDevFromHistogram(value) |
111 output = '' | |
112 ix = 1 | |
113 for value in values: | |
114 name = '%s.%s_%d' % (_EscapePerfResult(measurement), trace_name, ix) | |
115 output += '%s%s%s : %s = %s' % ( | |
116 '\n' if ix > 1 else '', | |
117 RESULT_TYPES[result_type], | |
118 name, | |
119 name, | |
120 value) | |
121 ix += 1 | |
122 measurement = '%s.%s' % (measurement, trace_name) | |
123 means_and_sds = [GeomMeanAndStdDevFromHistogram(value) for value in values] | |
124 _, avg, sd = _MeanAndStdDevFromList([mean for (mean, _) in means_and_sds ]) | |
125 | 101 |
| 102 trace_name = _EscapePerfResult(trace) |
| 103 output = '%s%s: %s%s%s %s' % ( |
| 104 RESULT_TYPES[result_type], |
| 105 _EscapePerfResult(measurement), |
| 106 trace_name, |
| 107 # Do not show equal sign if the trace is empty. Usually it happens when |
| 108 # measurement is enough clear to describe the result. |
| 109 '= ' if trace_name else '', |
| 110 value, |
| 111 units) |
126 if avg: | 112 if avg: |
127 output += '\nAvg %s: %f%s' % (measurement, avg, units) | 113 output += '\nAvg %s: %f%s' % (measurement, avg, units) |
128 if sd: | 114 if sd: |
129 output += '\nSd %s: %f%s' % (measurement, sd, units) | 115 output += '\nSd %s: %f%s' % (measurement, sd, units) |
130 if print_to_stdout: | 116 if print_to_stdout: |
131 print output | 117 print output |
132 return output | 118 return output |
133 | 119 |
134 | 120 |
135 class PerfTestSetup(object): | 121 class PerfTestSetup(object): |
(...skipping 28 matching lines...) Expand all Loading... |
164 def TearDown(self): | 150 def TearDown(self): |
165 """Tears down performance tests.""" | 151 """Tears down performance tests.""" |
166 if self._original_scaling_governor: | 152 if self._original_scaling_governor: |
167 self._SetScalingGovernorInternal(self._original_scaling_governor) | 153 self._SetScalingGovernorInternal(self._original_scaling_governor) |
168 self._original_scaling_governor = None | 154 self._original_scaling_governor = None |
169 | 155 |
170 def _SetScalingGovernorInternal(self, value): | 156 def _SetScalingGovernorInternal(self, value): |
171 for cpu in range(self._num_cpus): | 157 for cpu in range(self._num_cpus): |
172 self._adb.RunShellCommand( | 158 self._adb.RunShellCommand( |
173 ('echo %s > ' + PerfTestSetup._SCALING_GOVERNOR) % (value, cpu)) | 159 ('echo %s > ' + PerfTestSetup._SCALING_GOVERNOR) % (value, cpu)) |
OLD | NEW |