Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(84)

Side by Side Diff: chrome/test/functional/perf.py

Issue 7745007: Pyauto performance tests now output data to be graphed using autotest. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Addressed second round of review comments. Created 9 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 #!/usr/bin/python 1 #!/usr/bin/python
2 # Copyright (c) 2011 The Chromium Authors. All rights reserved. 2 # Copyright (c) 2011 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """Basic pyauto performance tests. 6 """Basic pyauto performance tests.
7 7
8 For tests that need to be run for multiple iterations (e.g., so that average 8 For tests that need to be run for multiple iterations (e.g., so that average
9 and standard deviation values can be reported), the default number of iterations 9 and standard deviation values can be reported), the default number of iterations
10 run for each of these tests is specified by |_DEFAULT_NUM_ITERATIONS|. 10 run for each of these tests is specified by |_DEFAULT_NUM_ITERATIONS|.
11 That value can optionally be tweaked by setting an environment variable 11 That value can optionally be tweaked by setting an environment variable
12 'NUM_ITERATIONS' to a positive integer, representing the number of iterations 12 'NUM_ITERATIONS' to a positive integer, representing the number of iterations
13 to run. 13 to run.
14 """ 14 """
15 15
16 import logging 16 import logging
17 import math 17 import math
18 import os 18 import os
19 import time 19 import time
20 20
21 import pyauto_functional # Must be imported before pyauto. 21 import pyauto_functional # Must be imported before pyauto.
22 import pyauto 22 import pyauto
23 23
24 24
25 class PerfTest(pyauto.PyUITest): 25 class PerfTest(pyauto.PyUITest):
26 """Basic performance tests.""" 26 """Basic performance tests."""
27 27
28 _DEFAULT_NUM_ITERATIONS = 50 28 _DEFAULT_NUM_ITERATIONS = 50
29 _PERF_OUTPUT_MARKER_PRE = '_PERF_PRE_'
30 _PERF_OUTPUT_MARKER_POST = '_PERF_POST_'
29 31
30 def setUp(self): 32 def setUp(self):
31 """Performs necessary setup work before running each test.""" 33 """Performs necessary setup work before running each test."""
32 self._num_iterations = self._DEFAULT_NUM_ITERATIONS 34 self._num_iterations = self._DEFAULT_NUM_ITERATIONS
33 if 'NUM_ITERATIONS' in os.environ: 35 if 'NUM_ITERATIONS' in os.environ:
34 try: 36 try:
35 self._num_iterations = int(os.environ['NUM_ITERATIONS']) 37 self._num_iterations = int(os.environ['NUM_ITERATIONS'])
36 if self._num_iterations <= 0: 38 if self._num_iterations <= 0:
37 raise ValueError('Environment variable NUM_ITERATIONS must be an ' 39 raise ValueError('Environment variable NUM_ITERATIONS must be an '
38 'integer > 0.') 40 'integer > 0.')
39 except ValueError, e: 41 except ValueError, e:
40 self.fail('Error processing environment variable: %s' % e) 42 self.fail('Error processing environment variable: %s' % e)
41 pyauto.PyUITest.setUp(self) 43 pyauto.PyUITest.setUp(self)
42 44
43 def _MeasureElapsedTime(self, python_command, num_invocations): 45 def _MeasureElapsedTime(self, python_command, num_invocations):
truty 2011/08/25 04:25:01 Are you considering that it would make this more r
dennis_jeffrey 2011/08/25 22:33:05 I think this is a great idea, and would like to do
44 """Measures time (in msec) to execute a python command one or more times. 46 """Measures time (in msec) to execute a python command one or more times.
45 47
46 Args: 48 Args:
47 python_command: A callable. 49 python_command: A callable.
48 num_invocations: An integer number of times to invoke the given command. 50 num_invocations: An integer number of times to invoke the given command.
49 51
50 Returns: 52 Returns:
51 The time required to execute the python command the specified number of 53 The time required to execute the python command the specified number of
52 times, in milliseconds as a float. 54 times, in milliseconds as a float.
53 """ 55 """
54 assert callable(python_command) 56 assert callable(python_command)
55 start_time = time.time() 57 start_time = time.time()
56 for _ in range(num_invocations): 58 for _ in range(num_invocations):
57 python_command() 59 python_command()
58 stop_time = time.time() 60 stop_time = time.time()
59 return (stop_time - start_time) * 1000 # Convert to milliseconds. 61 return (stop_time - start_time) * 1000 # Convert to milliseconds.
truty 2011/08/25 04:25:01 Just wondering...why not use timeit? There are is
dennis_jeffrey 2011/08/25 22:33:05 I didn't use it because I didn't know about it - t
60 62
61 def _AvgAndStdDev(self, values): 63 def _AvgAndStdDev(self, values):
62 """Computes the average and standard deviation of a set of values. 64 """Computes the average and standard deviation of a set of values.
63 65
64 Args: 66 Args:
65 values: A list of numeric values. 67 values: A list of numeric values.
66 68
67 Returns: 69 Returns:
68 A 2-tuple of floats (average, standard_deviation). 70 A 2-tuple of floats (average, standard_deviation).
69 """ 71 """
70 avg = float(sum(values)) / len(values) 72 avg = float(sum(values)) / len(values)
71 temp_vals = [math.pow(x - avg, 2) for x in values] 73 temp_vals = [math.pow(x - avg, 2) for x in values]
72 std_dev = math.sqrt(sum(temp_vals) / len(temp_vals)) 74 std_dev = math.sqrt(sum(temp_vals) / len(temp_vals))
truty 2011/08/25 04:25:01 This illustrates why I use a Python stats lib inst
dennis_jeffrey 2011/08/25 22:33:05 Originally Nirnimesh had recommended I use the "nu
truty 2011/08/26 20:33:00 -I use a Python stats library that is opensource -
dennis_jeffrey 2011/08/26 23:23:54 I changed the computation to "sample" std dev base
73 return avg, std_dev 75 return avg, std_dev
74 76
75 def _PrintSummaryResults(self, first_val, units, values=[]): 77 def _OutputPerfGraphValue(self, description, value):
78 """Outputs a performance value to have it graphed on the performance bots.
79
80 Only used for ChromeOS.
81
82 Args:
83 description: A string description of the performance value.
84 value: A numeric value representing a single performance measurement.
85 """
86 if self.IsChromeOS():
87 print '%s(\'%s\', %.2f)%s' % (self._PERF_OUTPUT_MARKER_PRE, description,
88 value, self._PERF_OUTPUT_MARKER_POST)
89
90 def _PrintSummaryResults(self, description, first_val, units, values=[]):
76 """Logs summary measurement information. 91 """Logs summary measurement information.
77 92
78 Args: 93 Args:
94 description: A string description for the specified results.
79 first_val: A numeric measurement value for a single initial trial. 95 first_val: A numeric measurement value for a single initial trial.
80 units: A string specifying the units for the specified measurements. 96 units: A string specifying the units for the specified measurements.
81 values: A list of numeric value measurements. 97 values: A list of numeric value measurements.
82 """ 98 """
99 logging.info('Results for: ' + description)
83 logging.debug('Single trial: %.2f %s', first_val, units) 100 logging.debug('Single trial: %.2f %s', first_val, units)
84 if values: 101 if values:
85 avg, std_dev = self._AvgAndStdDev(values) 102 avg, std_dev = self._AvgAndStdDev(values)
86 logging.info('Number of iterations: %d', len(values)) 103 logging.info('Number of iterations: %d', len(values))
87 for val in values: 104 for val in values:
88 logging.info(' %.2f %s', val, units) 105 logging.info(' %.2f %s', val, units)
89 logging.info(' --------------------------') 106 logging.info(' --------------------------')
90 logging.info(' Average: %.2f %s', avg, units) 107 logging.info(' Average: %.2f %s', avg, units)
91 logging.info(' Std dev: %.2f %s', std_dev, units) 108 logging.info(' Std dev: %.2f %s', std_dev, units)
109 self._OutputPerfGraphValue('%s_%s' % (units, description), avg)
truty 2011/08/25 04:25:01 FYI, it's typical for current ChromeOS perf tests
dennis_jeffrey 2011/08/25 22:33:05 Is this something that happens automatically when
truty 2011/08/26 20:33:00 Yes. Using write_perf_keyval() causes the entire d
110 else:
111 self._OutputPerfGraphValue('%s_%s' % (units, description), first_val)
92 112
93 def _RunNewTabTest(self, open_tab_command, num_tabs=1): 113 def _RunNewTabTest(self, description, open_tab_command, num_tabs=1):
94 """Runs a perf test that involves opening new tab(s). 114 """Runs a perf test that involves opening new tab(s).
95 115
96 This helper function can be called from different tests to do perf testing 116 This helper function can be called from different tests to do perf testing
97 with different types of tabs. It is assumed that the |open_tab_command| 117 with different types of tabs. It is assumed that the |open_tab_command|
98 will open up a single tab. 118 will open up a single tab.
truty 2011/08/25 04:25:01 I think you should explain this pattern of separat
dennis_jeffrey 2011/08/25 22:33:05 This is indeed something that was bugging me a lit
99 119
100 Args: 120 Args:
121 description: A string description of the associated tab test.
101 open_tab_command: A callable that will open a single tab. 122 open_tab_command: A callable that will open a single tab.
102 num_tabs: The number of tabs to open, i.e., the number of times to invoke 123 num_tabs: The number of tabs to open, i.e., the number of times to invoke
103 the |open_tab_command|. 124 the |open_tab_command|.
104 """ 125 """
105 assert callable(open_tab_command) 126 assert callable(open_tab_command)
106 orig_elapsed = self._MeasureElapsedTime(open_tab_command, num_tabs) 127 orig_elapsed = self._MeasureElapsedTime(open_tab_command, num_tabs)
107 self.assertEqual(1 + num_tabs, self.GetTabCount(), 128 self.assertEqual(1 + num_tabs, self.GetTabCount(),
108 msg='Did not open %d new tab(s).' % num_tabs) 129 msg='Did not open %d new tab(s).' % num_tabs)
109 for _ in range(num_tabs): 130 for _ in range(num_tabs):
110 self.GetBrowserWindow(0).GetTab(1).Close(True) 131 self.GetBrowserWindow(0).GetTab(1).Close(True)
111 132
112 timings = [] 133 timings = []
113 for _ in range(self._num_iterations): 134 for _ in range(self._num_iterations):
114 elapsed = self._MeasureElapsedTime(open_tab_command, num_tabs) 135 elapsed = self._MeasureElapsedTime(open_tab_command, num_tabs)
115 self.assertEqual(1 + num_tabs, self.GetTabCount(), 136 self.assertEqual(1 + num_tabs, self.GetTabCount(),
116 msg='Did not open %d new tab(s).' % num_tabs) 137 msg='Did not open %d new tab(s).' % num_tabs)
117 for _ in range(num_tabs): 138 for _ in range(num_tabs):
118 self.GetBrowserWindow(0).GetTab(1).Close(True) 139 self.GetBrowserWindow(0).GetTab(1).Close(True)
119 timings.append(elapsed) 140 timings.append(elapsed)
120 141
121 self._PrintSummaryResults(orig_elapsed, 'ms', values=timings) 142 self._PrintSummaryResults(description, orig_elapsed, 'milliseconds',
143 values=timings)
122 144
123 def testNewTab(self): 145 def testNewTab(self):
124 """Measures time to open a new tab.""" 146 """Measures time to open a new tab."""
125 self._RunNewTabTest(lambda: self.AppendTab(pyauto.GURL('chrome://newtab'))) 147 self._RunNewTabTest('NewTabPage',
148 lambda: self.AppendTab(pyauto.GURL('chrome://newtab')))
126 149
127 def testNewTabPdf(self): 150 def testNewTabPdf(self):
128 """Measures time to open a new tab navigated to a PDF file.""" 151 """Measures time to open a new tab navigated to a PDF file."""
129 url = self.GetFileURLForDataPath('pyauto_private', 'pdf', 'TechCrunch.pdf') 152 url = self.GetFileURLForDataPath('pyauto_private', 'pdf', 'TechCrunch.pdf')
130 self._RunNewTabTest(lambda: self.AppendTab(pyauto.GURL(url))) 153 self._RunNewTabTest('NewTabPdfPage',
154 lambda: self.AppendTab(pyauto.GURL(url)))
131 155
132 def testNewTabFlash(self): 156 def testNewTabFlash(self):
133 """Measures time to open a new tab navigated to a flash page.""" 157 """Measures time to open a new tab navigated to a flash page."""
134 url = self.GetFileURLForDataPath('plugin', 'flash.swf') 158 url = self.GetFileURLForDataPath('plugin', 'flash.swf')
135 self._RunNewTabTest(lambda: self.AppendTab(pyauto.GURL(url))) 159 self._RunNewTabTest('NewTabFlashPage',
160 lambda: self.AppendTab(pyauto.GURL(url)))
136 161
137 def test20Tabs(self): 162 def test20Tabs(self):
138 """Measures time to open 20 tabs.""" 163 """Measures time to open 20 tabs."""
139 self._RunNewTabTest( 164 self._RunNewTabTest(
165 '20TabsNewTabPage',
140 lambda: self.AppendTab(pyauto.GURL('chrome://newtab')), num_tabs=20) 166 lambda: self.AppendTab(pyauto.GURL('chrome://newtab')), num_tabs=20)
141 167
142 def testV8BenchmarkSuite(self): 168 def testV8BenchmarkSuite(self):
143 """Measures score from online v8 benchmark suite.""" 169 """Measures score from online v8 benchmark suite."""
144 url = self.GetFileURLForDataPath('v8_benchmark_v6', 'run.html') 170 url = self.GetFileURLForDataPath('v8_benchmark_v6', 'run.html')
145 self.AppendTab(pyauto.GURL(url)) 171 self.AppendTab(pyauto.GURL(url))
146 js = """ 172 js = """
147 var val = document.getElementById("status").innerHTML; 173 var val = document.getElementById("status").innerHTML;
148 window.domAutomationController.send(val); 174 window.domAutomationController.send(val);
149 """ 175 """
150 self.assertTrue( 176 self.assertTrue(
151 self.WaitUntil( 177 self.WaitUntil(
152 lambda: 'Score:' in self.ExecuteJavascript(js, 0, 1), timeout=300, 178 lambda: 'Score:' in self.ExecuteJavascript(js, 0, 1), timeout=300,
153 expect_retval=True), 179 expect_retval=True),
154 msg='Timed out when waiting for v8 benchmark score.') 180 msg='Timed out when waiting for v8 benchmark score.')
155 val = self.ExecuteJavascript(js, 0, 1) 181 val = self.ExecuteJavascript(js, 0, 1)
156 score = int(val[val.find(':') + 2:]) 182 score = int(val[val.find(':') + 2:])
157 self._PrintSummaryResults(score, 'score') 183 self._PrintSummaryResults('V8Benchmark', score, 'score')
158 184
159 185
160 if __name__ == '__main__': 186 if __name__ == '__main__':
161 pyauto_functional.Main() 187 pyauto_functional.Main()
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698