Index: chrome/test/functional/perf.py |
diff --git a/chrome/test/functional/perf.py b/chrome/test/functional/perf.py |
index dfb70b0124f8a8a5aaefa03378a252f3777ccbce..7aabbe6e1cf06ed1045d6bfc451a19cdede7e61d 100644 |
--- a/chrome/test/functional/perf.py |
+++ b/chrome/test/functional/perf.py |
@@ -3,6 +3,17 @@ |
# Use of this source code is governed by a BSD-style license that can be |
# found in the LICENSE file. |
+"""Basic pyauto performance tests. |
+ |
+By default, each test in this file runs a single experimental trial (which |
+consists of a simple action that is measured for performance, such as opening |
+a new tab). An environment variable 'PYAUTO_PERF_TEST_NUM_TRIALS' can |
+optionally be set, which must be an integer >= 0. If set, this refers to the |
+number of additional experimental trials to run for each test after the initial |
+trial. The average and standard deviation of the additional trials will be |
+computed. |
+""" |
+ |
import logging |
import math |
import os |
@@ -15,7 +26,18 @@ import pyauto |
class PerfTest(pyauto.PyUITest): |
"""Basic performance tests.""" |
- _NUM_ITERATIONS = 50 |
+ def setUp(self): |
+ """Performs necessary setup work before running each test.""" |
Nirnimesh
2011/08/11 18:26:36
So if it's not defined in the environment, the tes
dennis_jeffrey
2011/08/11 19:29:32
No, if that variable is not defined in the environ
|
+ self._num_trials = 0 |
+ if 'PYAUTO_PERF_TEST_NUM_TRIALS' in os.environ: |
+ try: |
+ self._num_trials = int(os.environ['PYAUTO_PERF_TEST_NUM_TRIALS']) |
Nirnimesh
2011/08/11 18:26:36
That's a env var no one will remember. Use NUM_ITE
dennis_jeffrey
2011/08/11 19:29:32
Done.
|
+ if self._num_trials < 0: |
+ raise ValueError('Environment variable PYAUTO_PERF_TEST_NUM_TRIALS ' |
+ 'must be an integer >= 0.') |
+ except ValueError, e: |
+ self.fail('Error processing environment variable: %s' % e) |
+ pyauto.PyUITest.setUp(self) |
def _MeasureElapsedTime(self, python_command, num_invocations): |
"""Measures time (in msec) to execute a python command one or more times. |
@@ -58,14 +80,15 @@ class PerfTest(pyauto.PyUITest): |
values: A list of numeric value measurements. |
units: A string specifying the units for the specified measurements. |
""" |
- avg, std_dev = self._AvgAndStdDev(values) |
- logging.debug('First trial: %.2f %s', first_val, units) |
- logging.info('Number of iterations: %d', num_iter) |
- for val in values: |
- logging.info(' %.2f %s', val, units) |
- logging.info(' --------------------------') |
- logging.info(' Average: %.2f %s', avg, units) |
- logging.info(' Std dev: %.2f %s', std_dev, units) |
+ logging.debug('Single trial: %.2f %s', first_val, units) |
+ if self._num_trials: |
+ avg, std_dev = self._AvgAndStdDev(values) |
+ logging.info('Number of iterations: %d', num_iter) |
+ for val in values: |
+ logging.info(' %.2f %s', val, units) |
+ logging.info(' --------------------------') |
+ logging.info(' Average: %.2f %s', avg, units) |
+ logging.info(' Std dev: %.2f %s', std_dev, units) |
def _RunNewTabTest(self, open_tab_command, num_tabs=1): |
"""Runs a perf test that involves opening new tab(s). |
@@ -87,7 +110,7 @@ class PerfTest(pyauto.PyUITest): |
self.GetBrowserWindow(0).GetTab(1).Close(True) |
timings = [] |
- for _ in range(self._NUM_ITERATIONS): |
+ for _ in range(self._num_trials): |
elapsed = self._MeasureElapsedTime(open_tab_command, num_tabs) |
self.assertEqual(1 + num_tabs, self.GetTabCount(), |
msg='Did not open %d new tab(s).' % num_tabs) |
@@ -95,7 +118,7 @@ class PerfTest(pyauto.PyUITest): |
self.GetBrowserWindow(0).GetTab(1).Close(True) |
timings.append(elapsed) |
- self._PrintSummaryResults(orig_elapsed, self._NUM_ITERATIONS, timings, 'ms') |
+ self._PrintSummaryResults(orig_elapsed, self._num_trials, timings, 'ms') |
def testNewTab(self): |
"""Measures time to open a new tab.""" |
@@ -144,13 +167,13 @@ class PerfTest(pyauto.PyUITest): |
msg='Did not clean up after running benchmark suite.') |
scores = [] |
- for _ in range(self._NUM_ITERATIONS): |
+ for _ in range(self._num_trials): |
score = _RunSingleV8BenchmarkSuite() |
self.assertEqual(1, self.GetTabCount(), |
msg='Did not clean up after running benchmark suite.') |
scores.append(score) |
- self._PrintSummaryResults(orig_score, self._NUM_ITERATIONS, scores, 'score') |
+ self._PrintSummaryResults(orig_score, self._num_trials, scores, 'score') |
if __name__ == '__main__': |