Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(144)

Side by Side Diff: chrome/test/functional/perf.py

Issue 7617014: Allow pyauto performance tests in perf.py to run as an autotest. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Minor edits. Created 9 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 #!/usr/bin/python 1 #!/usr/bin/python
2 # Copyright (c) 2011 The Chromium Authors. All rights reserved. 2 # Copyright (c) 2011 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """Basic pyauto performance tests.
7
8 By default, each test in this file runs a single experimental trial (which
9 consists of a simple action that is measured for performance, such as opening
10 a new tab). An environment variable 'PYAUTO_PERF_TEST_NUM_TRIALS' can
11 optionally be set, which must be an integer >= 0. If set, this refers to the
12 number of additional experimental trials to run for each test after the initial
13 trial. The average and standard deviation of the additional trials will be
14 computed.
15 """
16
6 import logging 17 import logging
7 import math 18 import math
8 import os 19 import os
9 import time 20 import time
10 21
11 import pyauto_functional # Must be imported before pyauto. 22 import pyauto_functional # Must be imported before pyauto.
12 import pyauto 23 import pyauto
13 24
14 25
15 class PerfTest(pyauto.PyUITest): 26 class PerfTest(pyauto.PyUITest):
16 """Basic performance tests.""" 27 """Basic performance tests."""
17 28
18 _NUM_ITERATIONS = 50 29 def setUp(self):
30 """Performs necessary setup work before running each test."""
Nirnimesh 2011/08/11 18:26:36 So if it's not defined in the environment, the tes
dennis_jeffrey 2011/08/11 19:29:32 No, if that variable is not defined in the environ
31 self._num_trials = 0
32 if 'PYAUTO_PERF_TEST_NUM_TRIALS' in os.environ:
33 try:
34 self._num_trials = int(os.environ['PYAUTO_PERF_TEST_NUM_TRIALS'])
Nirnimesh 2011/08/11 18:26:36 That's a env var no one will remember. Use NUM_ITE
dennis_jeffrey 2011/08/11 19:29:32 Done.
35 if self._num_trials < 0:
36 raise ValueError('Environment variable PYAUTO_PERF_TEST_NUM_TRIALS '
37 'must be an integer >= 0.')
38 except ValueError, e:
39 self.fail('Error processing environment variable: %s' % e)
40 pyauto.PyUITest.setUp(self)
19 41
20 def _MeasureElapsedTime(self, python_command, num_invocations): 42 def _MeasureElapsedTime(self, python_command, num_invocations):
21 """Measures time (in msec) to execute a python command one or more times. 43 """Measures time (in msec) to execute a python command one or more times.
22 44
23 Args: 45 Args:
24 python_command: A callable. 46 python_command: A callable.
25 num_invocations: An integer number of times to invoke the given command. 47 num_invocations: An integer number of times to invoke the given command.
26 48
27 Returns: 49 Returns:
28 The time required to execute the python command the specified number of 50 The time required to execute the python command the specified number of
(...skipping 22 matching lines...) Expand all
51 73
52 def _PrintSummaryResults(self, first_val, num_iter, values, units): 74 def _PrintSummaryResults(self, first_val, num_iter, values, units):
53 """Logs summary measurement information. 75 """Logs summary measurement information.
54 76
55 Args: 77 Args:
56 first_val: A numeric measurement value for a single initial trial. 78 first_val: A numeric measurement value for a single initial trial.
57 num_iter: An integer number of iterations used for multiple trials. 79 num_iter: An integer number of iterations used for multiple trials.
58 values: A list of numeric value measurements. 80 values: A list of numeric value measurements.
59 units: A string specifying the units for the specified measurements. 81 units: A string specifying the units for the specified measurements.
60 """ 82 """
61 avg, std_dev = self._AvgAndStdDev(values) 83 logging.debug('Single trial: %.2f %s', first_val, units)
62 logging.debug('First trial: %.2f %s', first_val, units) 84 if self._num_trials:
63 logging.info('Number of iterations: %d', num_iter) 85 avg, std_dev = self._AvgAndStdDev(values)
64 for val in values: 86 logging.info('Number of iterations: %d', num_iter)
65 logging.info(' %.2f %s', val, units) 87 for val in values:
66 logging.info(' --------------------------') 88 logging.info(' %.2f %s', val, units)
67 logging.info(' Average: %.2f %s', avg, units) 89 logging.info(' --------------------------')
68 logging.info(' Std dev: %.2f %s', std_dev, units) 90 logging.info(' Average: %.2f %s', avg, units)
91 logging.info(' Std dev: %.2f %s', std_dev, units)
69 92
70 def _RunNewTabTest(self, open_tab_command, num_tabs=1): 93 def _RunNewTabTest(self, open_tab_command, num_tabs=1):
71 """Runs a perf test that involves opening new tab(s). 94 """Runs a perf test that involves opening new tab(s).
72 95
73 This helper function can be called from different tests to do perf testing 96 This helper function can be called from different tests to do perf testing
74 with different types of tabs. It is assumed that the |open_tab_command| 97 with different types of tabs. It is assumed that the |open_tab_command|
75 will open up a single tab. 98 will open up a single tab.
76 99
77 Args: 100 Args:
78 open_tab_command: A callable that will open a single tab. 101 open_tab_command: A callable that will open a single tab.
79 num_tabs: The number of tabs to open, i.e., the number of times to invoke 102 num_tabs: The number of tabs to open, i.e., the number of times to invoke
80 the |open_tab_command|. 103 the |open_tab_command|.
81 """ 104 """
82 assert callable(open_tab_command) 105 assert callable(open_tab_command)
83 orig_elapsed = self._MeasureElapsedTime(open_tab_command, num_tabs) 106 orig_elapsed = self._MeasureElapsedTime(open_tab_command, num_tabs)
84 self.assertEqual(1 + num_tabs, self.GetTabCount(), 107 self.assertEqual(1 + num_tabs, self.GetTabCount(),
85 msg='Did not open %d new tab(s).' % num_tabs) 108 msg='Did not open %d new tab(s).' % num_tabs)
86 for _ in range(num_tabs): 109 for _ in range(num_tabs):
87 self.GetBrowserWindow(0).GetTab(1).Close(True) 110 self.GetBrowserWindow(0).GetTab(1).Close(True)
88 111
89 timings = [] 112 timings = []
90 for _ in range(self._NUM_ITERATIONS): 113 for _ in range(self._num_trials):
91 elapsed = self._MeasureElapsedTime(open_tab_command, num_tabs) 114 elapsed = self._MeasureElapsedTime(open_tab_command, num_tabs)
92 self.assertEqual(1 + num_tabs, self.GetTabCount(), 115 self.assertEqual(1 + num_tabs, self.GetTabCount(),
93 msg='Did not open %d new tab(s).' % num_tabs) 116 msg='Did not open %d new tab(s).' % num_tabs)
94 for _ in range(num_tabs): 117 for _ in range(num_tabs):
95 self.GetBrowserWindow(0).GetTab(1).Close(True) 118 self.GetBrowserWindow(0).GetTab(1).Close(True)
96 timings.append(elapsed) 119 timings.append(elapsed)
97 120
98 self._PrintSummaryResults(orig_elapsed, self._NUM_ITERATIONS, timings, 'ms') 121 self._PrintSummaryResults(orig_elapsed, self._num_trials, timings, 'ms')
99 122
100 def testNewTab(self): 123 def testNewTab(self):
101 """Measures time to open a new tab.""" 124 """Measures time to open a new tab."""
102 self._RunNewTabTest(lambda: self.AppendTab(pyauto.GURL('chrome://newtab'))) 125 self._RunNewTabTest(lambda: self.AppendTab(pyauto.GURL('chrome://newtab')))
103 126
104 def testNewTabPdf(self): 127 def testNewTabPdf(self):
105 """Measures time to open a new tab navigated to a PDF file.""" 128 """Measures time to open a new tab navigated to a PDF file."""
106 url = self.GetFileURLForDataPath('pyauto_private', 'pdf', 'TechCrunch.pdf') 129 url = self.GetFileURLForDataPath('pyauto_private', 'pdf', 'TechCrunch.pdf')
107 self._RunNewTabTest(lambda: self.AppendTab(pyauto.GURL(url))) 130 self._RunNewTabTest(lambda: self.AppendTab(pyauto.GURL(url)))
108 131
(...skipping 28 matching lines...) Expand all
137 val = self.ExecuteJavascript(js, 0, 1) 160 val = self.ExecuteJavascript(js, 0, 1)
138 score = val[val.find(':') + 2:] 161 score = val[val.find(':') + 2:]
139 self.GetBrowserWindow(0).GetTab(1).Close(True) 162 self.GetBrowserWindow(0).GetTab(1).Close(True)
140 return int(score) 163 return int(score)
141 164
142 orig_score = _RunSingleV8BenchmarkSuite() 165 orig_score = _RunSingleV8BenchmarkSuite()
143 self.assertEqual(1, self.GetTabCount(), 166 self.assertEqual(1, self.GetTabCount(),
144 msg='Did not clean up after running benchmark suite.') 167 msg='Did not clean up after running benchmark suite.')
145 168
146 scores = [] 169 scores = []
147 for _ in range(self._NUM_ITERATIONS): 170 for _ in range(self._num_trials):
148 score = _RunSingleV8BenchmarkSuite() 171 score = _RunSingleV8BenchmarkSuite()
149 self.assertEqual(1, self.GetTabCount(), 172 self.assertEqual(1, self.GetTabCount(),
150 msg='Did not clean up after running benchmark suite.') 173 msg='Did not clean up after running benchmark suite.')
151 scores.append(score) 174 scores.append(score)
152 175
153 self._PrintSummaryResults(orig_score, self._NUM_ITERATIONS, scores, 'score') 176 self._PrintSummaryResults(orig_score, self._num_trials, scores, 'score')
154 177
155 178
156 if __name__ == '__main__': 179 if __name__ == '__main__':
157 pyauto_functional.Main() 180 pyauto_functional.Main()
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698