Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(75)

Side by Side Diff: chrome/test/functional/perf.py

Issue 7745007: Pyauto performance tests now output data to be graphed using autotest. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Now computing sample standard deviation. Created 9 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 #!/usr/bin/python 1 #!/usr/bin/python
2 # Copyright (c) 2011 The Chromium Authors. All rights reserved. 2 # Copyright (c) 2011 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """Basic pyauto performance tests. 6 """Basic pyauto performance tests.
7 7
8 For tests that need to be run for multiple iterations (e.g., so that average 8 For tests that need to be run for multiple iterations (e.g., so that average
9 and standard deviation values can be reported), the default number of iterations 9 and standard deviation values can be reported), the default number of iterations
10 run for each of these tests is specified by |_DEFAULT_NUM_ITERATIONS|. 10 run for each of these tests is specified by |_DEFAULT_NUM_ITERATIONS|.
11 That value can optionally be tweaked by setting an environment variable 11 That value can optionally be tweaked by setting an environment variable
12 'NUM_ITERATIONS' to a positive integer, representing the number of iterations 12 'NUM_ITERATIONS' to a positive integer, representing the number of iterations
13 to run. 13 to run.
14 """ 14 """
15 15
16 import logging 16 import logging
17 import math 17 import math
18 import os 18 import os
19 import time 19 import timeit
20 20
21 import pyauto_functional # Must be imported before pyauto. 21 import pyauto_functional # Must be imported before pyauto.
22 import pyauto 22 import pyauto
23 23
24 24
25 class PerfTest(pyauto.PyUITest): 25 class PerfTest(pyauto.PyUITest):
26 """Basic performance tests.""" 26 """Basic performance tests."""
27 27
28 _DEFAULT_NUM_ITERATIONS = 50 28 _DEFAULT_NUM_ITERATIONS = 50
29 _PERF_OUTPUT_MARKER_PRE = '_PERF_PRE_'
30 _PERF_OUTPUT_MARKER_POST = '_PERF_POST_'
29 31
30 def setUp(self): 32 def setUp(self):
31 """Performs necessary setup work before running each test.""" 33 """Performs necessary setup work before running each test."""
32 self._num_iterations = self._DEFAULT_NUM_ITERATIONS 34 self._num_iterations = self._DEFAULT_NUM_ITERATIONS
33 if 'NUM_ITERATIONS' in os.environ: 35 if 'NUM_ITERATIONS' in os.environ:
34 try: 36 try:
35 self._num_iterations = int(os.environ['NUM_ITERATIONS']) 37 self._num_iterations = int(os.environ['NUM_ITERATIONS'])
36 if self._num_iterations <= 0: 38 if self._num_iterations <= 0:
37 raise ValueError('Environment variable NUM_ITERATIONS must be an ' 39 raise ValueError('Environment variable NUM_ITERATIONS must be an '
38 'integer > 0.') 40 'integer > 0.')
39 except ValueError, e: 41 except ValueError, e:
40 self.fail('Error processing environment variable: %s' % e) 42 self.fail('Error processing environment variable: %s' % e)
41 pyauto.PyUITest.setUp(self) 43 pyauto.PyUITest.setUp(self)
42 44
45 # TODO(dennisjeffrey): Reorganize the code to create a base PerfTest class
46 # to separate out common functionality, then create specialized subclasses
47 # such as TabPerfTest that implement the test-specific functionality.
43 def _MeasureElapsedTime(self, python_command, num_invocations): 48 def _MeasureElapsedTime(self, python_command, num_invocations):
44 """Measures time (in msec) to execute a python command one or more times. 49 """Measures time (in msec) to execute a python command one or more times.
45 50
46 Args: 51 Args:
47 python_command: A callable. 52 python_command: A callable.
48 num_invocations: An integer number of times to invoke the given command. 53 num_invocations: An integer number of times to invoke the given command.
49 54
50 Returns: 55 Returns:
51 The time required to execute the python command the specified number of 56 The time required to execute the python command the specified number of
52 times, in milliseconds as a float. 57 times, in milliseconds as a float.
53 """ 58 """
54 assert callable(python_command) 59 assert callable(python_command)
55 start_time = time.time() 60 def RunCommand():
56 for _ in range(num_invocations): 61 for _ in range(num_invocations):
57 python_command() 62 python_command()
58 stop_time = time.time() 63 timer = timeit.Timer(stmt=lambda: RunCommand())
59 return (stop_time - start_time) * 1000 # Convert to milliseconds. 64 return timer.timeit(number=1) * 1000 # Convert seconds to milliseconds.
60 65
61 def _AvgAndStdDev(self, values): 66 def _AvgAndStdDev(self, values):
62 """Computes the average and standard deviation of a set of values. 67 """Computes the average and standard deviation of a set of values.
63 68
64 Args: 69 Args:
65 values: A list of numeric values. 70 values: A list of numeric values.
66 71
67 Returns: 72 Returns:
68 A 2-tuple of floats (average, standard_deviation). 73 A 2-tuple of floats (average, standard_deviation).
69 """ 74 """
70 avg = float(sum(values)) / len(values) 75 avg = 0.0
71 temp_vals = [math.pow(x - avg, 2) for x in values] 76 std_dev = 0.0
72 std_dev = math.sqrt(sum(temp_vals) / len(temp_vals)) 77 if values:
78 avg = float(sum(values)) / len(values)
79 if len(values) > 1:
80 temp_vals = [math.pow(x - avg, 2) for x in values]
81 std_dev = math.sqrt(sum(temp_vals) / (len(temp_vals) - 1))
73 return avg, std_dev 82 return avg, std_dev
74 83
75 def _PrintSummaryResults(self, first_val, units, values=[]): 84 def _OutputPerfGraphValue(self, description, value):
85 """Outputs a performance value to have it graphed on the performance bots.
86
87 Only used for ChromeOS.
88
89 Args:
90 description: A string description of the performance value.
91 value: A numeric value representing a single performance measurement.
92 """
93 if self.IsChromeOS():
94 print '\n%s(\'%s\', %.2f)%s' % (self._PERF_OUTPUT_MARKER_PRE, description,
95 value, self._PERF_OUTPUT_MARKER_POST)
96
97 def _PrintSummaryResults(self, description, first_val, units, values=[]):
76 """Logs summary measurement information. 98 """Logs summary measurement information.
77 99
78 Args: 100 Args:
101 description: A string description for the specified results.
79 first_val: A numeric measurement value for a single initial trial. 102 first_val: A numeric measurement value for a single initial trial.
80 units: A string specifying the units for the specified measurements. 103 units: A string specifying the units for the specified measurements.
81 values: A list of numeric value measurements. 104 values: A list of numeric value measurements.
82 """ 105 """
106 logging.info('Results for: ' + description)
83 logging.debug('Single trial: %.2f %s', first_val, units) 107 logging.debug('Single trial: %.2f %s', first_val, units)
84 if values: 108 if values:
85 avg, std_dev = self._AvgAndStdDev(values) 109 avg, std_dev = self._AvgAndStdDev(values)
86 logging.info('Number of iterations: %d', len(values)) 110 logging.info('Number of iterations: %d', len(values))
87 for val in values: 111 for val in values:
88 logging.info(' %.2f %s', val, units) 112 logging.info(' %.2f %s', val, units)
89 logging.info(' --------------------------') 113 logging.info(' --------------------------')
90 logging.info(' Average: %.2f %s', avg, units) 114 logging.info(' Average: %.2f %s', avg, units)
91 logging.info(' Std dev: %.2f %s', std_dev, units) 115 logging.info(' Std dev: %.2f %s', std_dev, units)
116 self._OutputPerfGraphValue('%s_%s' % (units, description), avg)
117 else:
118 self._OutputPerfGraphValue('%s_%s' % (units, description), first_val)
92 119
93 def _RunNewTabTest(self, open_tab_command, num_tabs=1): 120 def _RunNewTabTest(self, description, open_tab_command, num_tabs=1):
94 """Runs a perf test that involves opening new tab(s). 121 """Runs a perf test that involves opening new tab(s).
95 122
96 This helper function can be called from different tests to do perf testing 123 This helper function can be called from different tests to do perf testing
97 with different types of tabs. It is assumed that the |open_tab_command| 124 with different types of tabs. It is assumed that the |open_tab_command|
98 will open up a single tab. 125 will open up a single tab.
99 126
100 Args: 127 Args:
128 description: A string description of the associated tab test.
101 open_tab_command: A callable that will open a single tab. 129 open_tab_command: A callable that will open a single tab.
102 num_tabs: The number of tabs to open, i.e., the number of times to invoke 130 num_tabs: The number of tabs to open, i.e., the number of times to invoke
103 the |open_tab_command|. 131 the |open_tab_command|.
104 """ 132 """
105 assert callable(open_tab_command) 133 assert callable(open_tab_command)
134
135 # TODO(dennisjeffrey): Consider not taking an initial sample here.
106 orig_elapsed = self._MeasureElapsedTime(open_tab_command, num_tabs) 136 orig_elapsed = self._MeasureElapsedTime(open_tab_command, num_tabs)
107 self.assertEqual(1 + num_tabs, self.GetTabCount(), 137 self.assertEqual(1 + num_tabs, self.GetTabCount(),
108 msg='Did not open %d new tab(s).' % num_tabs) 138 msg='Did not open %d new tab(s).' % num_tabs)
109 for _ in range(num_tabs): 139 for _ in range(num_tabs):
110 self.GetBrowserWindow(0).GetTab(1).Close(True) 140 self.GetBrowserWindow(0).GetTab(1).Close(True)
111 141
112 timings = [] 142 timings = []
113 for _ in range(self._num_iterations): 143 for _ in range(self._num_iterations):
114 elapsed = self._MeasureElapsedTime(open_tab_command, num_tabs) 144 elapsed = self._MeasureElapsedTime(open_tab_command, num_tabs)
115 self.assertEqual(1 + num_tabs, self.GetTabCount(), 145 self.assertEqual(1 + num_tabs, self.GetTabCount(),
116 msg='Did not open %d new tab(s).' % num_tabs) 146 msg='Did not open %d new tab(s).' % num_tabs)
117 for _ in range(num_tabs): 147 for _ in range(num_tabs):
118 self.GetBrowserWindow(0).GetTab(1).Close(True) 148 self.GetBrowserWindow(0).GetTab(1).Close(True)
119 timings.append(elapsed) 149 timings.append(elapsed)
120 150
121 self._PrintSummaryResults(orig_elapsed, 'ms', values=timings) 151 self._PrintSummaryResults(description, orig_elapsed, 'milliseconds',
152 values=timings)
122 153
123 def testNewTab(self): 154 def testNewTab(self):
124 """Measures time to open a new tab.""" 155 """Measures time to open a new tab."""
125 self._RunNewTabTest(lambda: self.AppendTab(pyauto.GURL('chrome://newtab'))) 156 self._RunNewTabTest('NewTabPage',
157 lambda: self.AppendTab(pyauto.GURL('chrome://newtab')))
126 158
127 def testNewTabPdf(self): 159 def testNewTabPdf(self):
128 """Measures time to open a new tab navigated to a PDF file.""" 160 """Measures time to open a new tab navigated to a PDF file."""
129 url = self.GetFileURLForDataPath('pyauto_private', 'pdf', 'TechCrunch.pdf') 161 url = self.GetFileURLForDataPath('pyauto_private', 'pdf', 'TechCrunch.pdf')
130 self._RunNewTabTest(lambda: self.AppendTab(pyauto.GURL(url))) 162 self._RunNewTabTest('NewTabPdfPage',
163 lambda: self.AppendTab(pyauto.GURL(url)))
131 164
132 def testNewTabFlash(self): 165 def testNewTabFlash(self):
133 """Measures time to open a new tab navigated to a flash page.""" 166 """Measures time to open a new tab navigated to a flash page."""
134 url = self.GetFileURLForDataPath('plugin', 'flash.swf') 167 url = self.GetFileURLForDataPath('plugin', 'flash.swf')
135 self._RunNewTabTest(lambda: self.AppendTab(pyauto.GURL(url))) 168 self._RunNewTabTest('NewTabFlashPage',
169 lambda: self.AppendTab(pyauto.GURL(url)))
136 170
137 def test20Tabs(self): 171 def test20Tabs(self):
138 """Measures time to open 20 tabs.""" 172 """Measures time to open 20 tabs."""
139 self._RunNewTabTest( 173 self._RunNewTabTest(
174 '20TabsNewTabPage',
140 lambda: self.AppendTab(pyauto.GURL('chrome://newtab')), num_tabs=20) 175 lambda: self.AppendTab(pyauto.GURL('chrome://newtab')), num_tabs=20)
141 176
142 def testV8BenchmarkSuite(self): 177 def testV8BenchmarkSuite(self):
143 """Measures score from online v8 benchmark suite.""" 178 """Measures score from online v8 benchmark suite."""
144 url = self.GetFileURLForDataPath('v8_benchmark_v6', 'run.html') 179 url = self.GetFileURLForDataPath('v8_benchmark_v6', 'run.html')
145 self.AppendTab(pyauto.GURL(url)) 180 self.AppendTab(pyauto.GURL(url))
146 js = """ 181 js = """
147 var val = document.getElementById("status").innerHTML; 182 var val = document.getElementById("status").innerHTML;
148 window.domAutomationController.send(val); 183 window.domAutomationController.send(val);
149 """ 184 """
150 self.assertTrue( 185 self.assertTrue(
151 self.WaitUntil( 186 self.WaitUntil(
152 lambda: 'Score:' in self.ExecuteJavascript(js, 0, 1), timeout=300, 187 lambda: 'Score:' in self.ExecuteJavascript(js, 0, 1), timeout=300,
153 expect_retval=True), 188 expect_retval=True),
154 msg='Timed out when waiting for v8 benchmark score.') 189 msg='Timed out when waiting for v8 benchmark score.')
155 val = self.ExecuteJavascript(js, 0, 1) 190 val = self.ExecuteJavascript(js, 0, 1)
156 score = int(val[val.find(':') + 2:]) 191 score = int(val[val.find(':') + 2:])
157 self._PrintSummaryResults(score, 'score') 192 self._PrintSummaryResults('V8Benchmark', score, 'score')
158 193
159 194
160 if __name__ == '__main__': 195 if __name__ == '__main__':
161 pyauto_functional.Main() 196 pyauto_functional.Main()
OLDNEW
« no previous file with comments | « chrome/test/chromeos/autotest/files/client/site_tests/desktopui_PyAutoPerfTests/desktopui_PyAutoPerfTests.py ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698