Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(440)

Side by Side Diff: chrome/test/functional/perf.py

Issue 7745007: Pyauto performance tests now output data to be graphed using autotest. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 9 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 #!/usr/bin/python 1 #!/usr/bin/python
2 # Copyright (c) 2011 The Chromium Authors. All rights reserved. 2 # Copyright (c) 2011 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """Basic pyauto performance tests. 6 """Basic pyauto performance tests.
7 7
8 For tests that need to be run for multiple iterations (e.g., so that average 8 For tests that need to be run for multiple iterations (e.g., so that average
9 and standard deviation values can be reported), the default number of iterations 9 and standard deviation values can be reported), the default number of iterations
10 run for each of these tests is specified by |_DEFAULT_NUM_ITERATIONS|. 10 run for each of these tests is specified by |_DEFAULT_NUM_ITERATIONS|.
11 That value can optionally be tweaked by setting an environment variable 11 That value can optionally be tweaked by setting an environment variable
12 'NUM_ITERATIONS' to a positive integer, representing the number of iterations 12 'NUM_ITERATIONS' to a positive integer, representing the number of iterations
13 to run. 13 to run.
14 """ 14 """
15 15
16 import logging 16 import logging
17 import math 17 import math
18 import os 18 import os
19 import time 19 import time
20 20
21 import pyauto_functional # Must be imported before pyauto. 21 import pyauto_functional # Must be imported before pyauto.
22 import pyauto 22 import pyauto
23 23
24 24
25 class PerfTest(pyauto.PyUITest): 25 class PerfTest(pyauto.PyUITest):
26 """Basic performance tests.""" 26 """Basic performance tests."""
27 27
28 _DEFAULT_NUM_ITERATIONS = 50 28 _DEFAULT_NUM_ITERATIONS = 50
29 _CHROMEOS_OUTPUT_FILE = '/usr/local/tmp/perf_data.txt'
Nirnimesh 2011/08/25 00:23:36 Do not hardcode path. use tempfile.gettempdir()
dennis_jeffrey 2011/08/25 01:30:35 I no longer use a temp file at all.
29 30
30 def setUp(self): 31 def setUp(self):
31 """Performs necessary setup work before running each test.""" 32 """Performs necessary setup work before running each test."""
32 self._num_iterations = self._DEFAULT_NUM_ITERATIONS 33 self._num_iterations = self._DEFAULT_NUM_ITERATIONS
33 if 'NUM_ITERATIONS' in os.environ: 34 if 'NUM_ITERATIONS' in os.environ:
34 try: 35 try:
35 self._num_iterations = int(os.environ['NUM_ITERATIONS']) 36 self._num_iterations = int(os.environ['NUM_ITERATIONS'])
36 if self._num_iterations <= 0: 37 if self._num_iterations <= 0:
37 raise ValueError('Environment variable NUM_ITERATIONS must be an ' 38 raise ValueError('Environment variable NUM_ITERATIONS must be an '
38 'integer > 0.') 39 'integer > 0.')
(...skipping 26 matching lines...) Expand all
65 values: A list of numeric values. 66 values: A list of numeric values.
66 67
67 Returns: 68 Returns:
68 A 2-tuple of floats (average, standard_deviation). 69 A 2-tuple of floats (average, standard_deviation).
69 """ 70 """
70 avg = float(sum(values)) / len(values) 71 avg = float(sum(values)) / len(values)
71 temp_vals = [math.pow(x - avg, 2) for x in values] 72 temp_vals = [math.pow(x - avg, 2) for x in values]
72 std_dev = math.sqrt(sum(temp_vals) / len(temp_vals)) 73 std_dev = math.sqrt(sum(temp_vals) / len(temp_vals))
73 return avg, std_dev 74 return avg, std_dev
74 75
75 def _PrintSummaryResults(self, first_val, units, values=[]): 76 def _OutputPerfGraphValue(self, description, value):
77 """Outputs a performance value to have it graphed on the performance bots.
78
79 Only used for ChromeOS.
80
81 Args:
82 description: A string description of the performance value.
83 value: A numeric value representing a single performance measurement.
84 """
85 if self.IsChromeOS():
86 try:
Nirnimesh 2011/08/25 00:23:36 Why the try block?
dennis_jeffrey 2011/08/25 01:30:35 The try has since been removed.
87 with open(self._CHROMEOS_OUTPUT_FILE, 'a') as f:
88 f.write('(\'%s\', %.2f)\n' % (description, value))
89 except IOError, e:
90 print 'Warning: could not output to performance data file: ' + str(e)
91
92 def _PrintSummaryResults(self, description, first_val, units, values=[]):
76 """Logs summary measurement information. 93 """Logs summary measurement information.
77 94
78 Args: 95 Args:
96 description: A string description for the specified results.
79 first_val: A numeric measurement value for a single initial trial. 97 first_val: A numeric measurement value for a single initial trial.
80 units: A string specifying the units for the specified measurements. 98 units: A string specifying the units for the specified measurements.
81 values: A list of numeric value measurements. 99 values: A list of numeric value measurements.
82 """ 100 """
101 logging.info('Results for: ' + description)
83 logging.debug('Single trial: %.2f %s', first_val, units) 102 logging.debug('Single trial: %.2f %s', first_val, units)
84 if values: 103 if values:
85 avg, std_dev = self._AvgAndStdDev(values) 104 avg, std_dev = self._AvgAndStdDev(values)
86 logging.info('Number of iterations: %d', len(values)) 105 logging.info('Number of iterations: %d', len(values))
87 for val in values: 106 for val in values:
88 logging.info(' %.2f %s', val, units) 107 logging.info(' %.2f %s', val, units)
89 logging.info(' --------------------------') 108 logging.info(' --------------------------')
90 logging.info(' Average: %.2f %s', avg, units) 109 logging.info(' Average: %.2f %s', avg, units)
91 logging.info(' Std dev: %.2f %s', std_dev, units) 110 logging.info(' Std dev: %.2f %s', std_dev, units)
111 self._OutputPerfGraphValue('%s_%s' % (units, description), avg)
112 else:
113 self._OutputPerfGraphValue('%s_%s' % (units, description), first_val)
92 114
93 def _RunNewTabTest(self, open_tab_command, num_tabs=1): 115 def _RunNewTabTest(self, description, open_tab_command, num_tabs=1):
94 """Runs a perf test that involves opening new tab(s). 116 """Runs a perf test that involves opening new tab(s).
95 117
96 This helper function can be called from different tests to do perf testing 118 This helper function can be called from different tests to do perf testing
97 with different types of tabs. It is assumed that the |open_tab_command| 119 with different types of tabs. It is assumed that the |open_tab_command|
98 will open up a single tab. 120 will open up a single tab.
99 121
100 Args: 122 Args:
123 description: A string description of the associated tab test.
101 open_tab_command: A callable that will open a single tab. 124 open_tab_command: A callable that will open a single tab.
102 num_tabs: The number of tabs to open, i.e., the number of times to invoke 125 num_tabs: The number of tabs to open, i.e., the number of times to invoke
103 the |open_tab_command|. 126 the |open_tab_command|.
104 """ 127 """
105 assert callable(open_tab_command) 128 assert callable(open_tab_command)
106 orig_elapsed = self._MeasureElapsedTime(open_tab_command, num_tabs) 129 orig_elapsed = self._MeasureElapsedTime(open_tab_command, num_tabs)
107 self.assertEqual(1 + num_tabs, self.GetTabCount(), 130 self.assertEqual(1 + num_tabs, self.GetTabCount(),
108 msg='Did not open %d new tab(s).' % num_tabs) 131 msg='Did not open %d new tab(s).' % num_tabs)
109 for _ in range(num_tabs): 132 for _ in range(num_tabs):
110 self.GetBrowserWindow(0).GetTab(1).Close(True) 133 self.GetBrowserWindow(0).GetTab(1).Close(True)
111 134
112 timings = [] 135 timings = []
113 for _ in range(self._num_iterations): 136 for _ in range(self._num_iterations):
114 elapsed = self._MeasureElapsedTime(open_tab_command, num_tabs) 137 elapsed = self._MeasureElapsedTime(open_tab_command, num_tabs)
115 self.assertEqual(1 + num_tabs, self.GetTabCount(), 138 self.assertEqual(1 + num_tabs, self.GetTabCount(),
116 msg='Did not open %d new tab(s).' % num_tabs) 139 msg='Did not open %d new tab(s).' % num_tabs)
117 for _ in range(num_tabs): 140 for _ in range(num_tabs):
118 self.GetBrowserWindow(0).GetTab(1).Close(True) 141 self.GetBrowserWindow(0).GetTab(1).Close(True)
119 timings.append(elapsed) 142 timings.append(elapsed)
120 143
121 self._PrintSummaryResults(orig_elapsed, 'ms', values=timings) 144 self._PrintSummaryResults(description, orig_elapsed, 'milliseconds',
145 values=timings)
122 146
123 def testNewTab(self): 147 def testNewTab(self):
124 """Measures time to open a new tab.""" 148 """Measures time to open a new tab."""
125 self._RunNewTabTest(lambda: self.AppendTab(pyauto.GURL('chrome://newtab'))) 149 self._RunNewTabTest('NewTabPage',
150 lambda: self.AppendTab(pyauto.GURL('chrome://newtab')))
126 151
127 def testNewTabPdf(self): 152 def testNewTabPdf(self):
128 """Measures time to open a new tab navigated to a PDF file.""" 153 """Measures time to open a new tab navigated to a PDF file."""
129 url = self.GetFileURLForDataPath('pyauto_private', 'pdf', 'TechCrunch.pdf') 154 url = self.GetFileURLForDataPath('pyauto_private', 'pdf', 'TechCrunch.pdf')
130 self._RunNewTabTest(lambda: self.AppendTab(pyauto.GURL(url))) 155 self._RunNewTabTest('NewTabPdfPage',
156 lambda: self.AppendTab(pyauto.GURL(url)))
131 157
132 def testNewTabFlash(self): 158 def testNewTabFlash(self):
133 """Measures time to open a new tab navigated to a flash page.""" 159 """Measures time to open a new tab navigated to a flash page."""
134 url = self.GetFileURLForDataPath('plugin', 'flash.swf') 160 url = self.GetFileURLForDataPath('plugin', 'flash.swf')
135 self._RunNewTabTest(lambda: self.AppendTab(pyauto.GURL(url))) 161 self._RunNewTabTest('NewTabFlashPage',
162 lambda: self.AppendTab(pyauto.GURL(url)))
136 163
137 def test20Tabs(self): 164 def test20Tabs(self):
138 """Measures time to open 20 tabs.""" 165 """Measures time to open 20 tabs."""
139 self._RunNewTabTest( 166 self._RunNewTabTest(
167 '20TabsNewTabPage',
140 lambda: self.AppendTab(pyauto.GURL('chrome://newtab')), num_tabs=20) 168 lambda: self.AppendTab(pyauto.GURL('chrome://newtab')), num_tabs=20)
141 169
142 def testV8BenchmarkSuite(self): 170 def testV8BenchmarkSuite(self):
143 """Measures score from online v8 benchmark suite.""" 171 """Measures score from online v8 benchmark suite."""
144 url = self.GetFileURLForDataPath('v8_benchmark_v6', 'run.html') 172 url = self.GetFileURLForDataPath('v8_benchmark_v6', 'run.html')
145 self.AppendTab(pyauto.GURL(url)) 173 self.AppendTab(pyauto.GURL(url))
146 js = """ 174 js = """
147 var val = document.getElementById("status").innerHTML; 175 var val = document.getElementById("status").innerHTML;
148 window.domAutomationController.send(val); 176 window.domAutomationController.send(val);
149 """ 177 """
150 self.assertTrue( 178 self.assertTrue(
151 self.WaitUntil( 179 self.WaitUntil(
152 lambda: 'Score:' in self.ExecuteJavascript(js, 0, 1), timeout=300, 180 lambda: 'Score:' in self.ExecuteJavascript(js, 0, 1), timeout=300,
153 expect_retval=True), 181 expect_retval=True),
154 msg='Timed out when waiting for v8 benchmark score.') 182 msg='Timed out when waiting for v8 benchmark score.')
155 val = self.ExecuteJavascript(js, 0, 1) 183 val = self.ExecuteJavascript(js, 0, 1)
156 score = int(val[val.find(':') + 2:]) 184 score = int(val[val.find(':') + 2:])
157 self._PrintSummaryResults(score, 'score') 185 self._PrintSummaryResults('V8Benchmark', score, 'score')
158 186
159 187
160 if __name__ == '__main__': 188 if __name__ == '__main__':
161 pyauto_functional.Main() 189 pyauto_functional.Main()
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698