OLD | NEW |
---|---|
1 #!/usr/bin/python | 1 #!/usr/bin/python |
2 # Copyright (c) 2011 The Chromium Authors. All rights reserved. | 2 # Copyright (c) 2011 The Chromium Authors. All rights reserved. |
3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
5 | 5 |
6 """Basic pyauto performance tests. | |
7 | |
8 For tests that need to be run for multiple iterations (e.g., so that average | |
9 and standard deviation values can be reported), the default number of iterations | |
10 run for each of these tests is specified by |_DEFAULT_NUM_ITERATIONS|. | |
11 That value can optionally be tweaked by setting an environment variable | |
12 'NUM_ITERATIONS' to a positive integer, representing the number of iterations | |
13 to run. | |
14 """ | |
15 | |
6 import logging | 16 import logging |
7 import math | 17 import math |
8 import os | 18 import os |
9 import time | 19 import time |
10 | 20 |
11 import pyauto_functional # Must be imported before pyauto. | 21 import pyauto_functional # Must be imported before pyauto. |
12 import pyauto | 22 import pyauto |
13 | 23 |
14 | 24 |
15 class PerfTest(pyauto.PyUITest): | 25 class PerfTest(pyauto.PyUITest): |
16 """Basic performance tests.""" | 26 """Basic performance tests.""" |
17 | 27 |
18 _NUM_ITERATIONS = 50 | 28 _DEFAULT_NUM_ITERATIONS = 50 |
29 | |
30 def setUp(self): | |
31 """Performs necessary setup work before running each test.""" | |
32 self._num_iterations = self._DEFAULT_NUM_ITERATIONS | |
33 if 'NUM_ITERATIONS' in os.environ: | |
34 try: | |
35 self._num_iterations = int(os.environ['NUM_ITERATIONS']) | |
36 if self._num_iterations <= 0: | |
37 raise ValueError('Environment variable NUM_ITERATIONS must be an ' | |
38 'integer > 0.') | |
39 except ValueError, e: | |
40 self.fail('Error processing environment variable: %s' % e) | |
41 pyauto.PyUITest.setUp(self) | |
19 | 42 |
20 def _MeasureElapsedTime(self, python_command, num_invocations): | 43 def _MeasureElapsedTime(self, python_command, num_invocations): |
21 """Measures time (in msec) to execute a python command one or more times. | 44 """Measures time (in msec) to execute a python command one or more times. |
22 | 45 |
23 Args: | 46 Args: |
24 python_command: A callable. | 47 python_command: A callable. |
25 num_invocations: An integer number of times to invoke the given command. | 48 num_invocations: An integer number of times to invoke the given command. |
26 | 49 |
27 Returns: | 50 Returns: |
28 The time required to execute the python command the specified number of | 51 The time required to execute the python command the specified number of |
(...skipping 13 matching lines...) Expand all Loading... | |
42 values: A list of numeric values. | 65 values: A list of numeric values. |
43 | 66 |
44 Returns: | 67 Returns: |
45 A 2-tuple of floats (average, standard_deviation). | 68 A 2-tuple of floats (average, standard_deviation). |
46 """ | 69 """ |
47 avg = float(sum(values)) / len(values) | 70 avg = float(sum(values)) / len(values) |
48 temp_vals = [math.pow(x - avg, 2) for x in values] | 71 temp_vals = [math.pow(x - avg, 2) for x in values] |
49 std_dev = math.sqrt(sum(temp_vals) / len(temp_vals)) | 72 std_dev = math.sqrt(sum(temp_vals) / len(temp_vals)) |
50 return avg, std_dev | 73 return avg, std_dev |
51 | 74 |
52 def _PrintSummaryResults(self, first_val, num_iter, values, units): | 75 def _PrintSummaryResults(self, first_val, units, values=[]): |
53 """Logs summary measurement information. | 76 """Logs summary measurement information. |
54 | 77 |
55 Args: | 78 Args: |
56 first_val: A numeric measurement value for a single initial trial. | 79 first_val: A numeric measurement value for a single initial trial. |
57 num_iter: An integer number of iterations used for multiple trials. | 80 units: A string specifying the units for the specified measurements. |
58 values: A list of numeric value measurements. | 81 values: A list of numeric value measurements. |
59 units: A string specifying the units for the specified measurements. | |
60 """ | 82 """ |
61 avg, std_dev = self._AvgAndStdDev(values) | 83 logging.debug('Single trial: %.2f %s', first_val, units) |
62 logging.debug('First trial: %.2f %s', first_val, units) | 84 if values: |
63 logging.info('Number of iterations: %d', num_iter) | 85 avg, std_dev = self._AvgAndStdDev(values) |
64 for val in values: | 86 logging.info('Number of iterations: %d', len(values)) |
65 logging.info(' %.2f %s', val, units) | 87 for val in values: |
66 logging.info(' --------------------------') | 88 logging.info(' %.2f %s', val, units) |
67 logging.info(' Average: %.2f %s', avg, units) | 89 logging.info(' --------------------------') |
68 logging.info(' Std dev: %.2f %s', std_dev, units) | 90 logging.info(' Average: %.2f %s', avg, units) |
91 logging.info(' Std dev: %.2f %s', std_dev, units) | |
69 | 92 |
70 def _RunNewTabTest(self, open_tab_command, num_tabs=1): | 93 def _RunNewTabTest(self, open_tab_command, num_tabs=1): |
71 """Runs a perf test that involves opening new tab(s). | 94 """Runs a perf test that involves opening new tab(s). |
72 | 95 |
73 This helper function can be called from different tests to do perf testing | 96 This helper function can be called from different tests to do perf testing |
74 with different types of tabs. It is assumed that the |open_tab_command| | 97 with different types of tabs. It is assumed that the |open_tab_command| |
75 will open up a single tab. | 98 will open up a single tab. |
76 | 99 |
77 Args: | 100 Args: |
78 open_tab_command: A callable that will open a single tab. | 101 open_tab_command: A callable that will open a single tab. |
79 num_tabs: The number of tabs to open, i.e., the number of times to invoke | 102 num_tabs: The number of tabs to open, i.e., the number of times to invoke |
80 the |open_tab_command|. | 103 the |open_tab_command|. |
81 """ | 104 """ |
82 assert callable(open_tab_command) | 105 assert callable(open_tab_command) |
83 orig_elapsed = self._MeasureElapsedTime(open_tab_command, num_tabs) | 106 orig_elapsed = self._MeasureElapsedTime(open_tab_command, num_tabs) |
84 self.assertEqual(1 + num_tabs, self.GetTabCount(), | 107 self.assertEqual(1 + num_tabs, self.GetTabCount(), |
85 msg='Did not open %d new tab(s).' % num_tabs) | 108 msg='Did not open %d new tab(s).' % num_tabs) |
86 for _ in range(num_tabs): | 109 for _ in range(num_tabs): |
87 self.GetBrowserWindow(0).GetTab(1).Close(True) | 110 self.GetBrowserWindow(0).GetTab(1).Close(True) |
88 | 111 |
89 timings = [] | 112 timings = [] |
90 for _ in range(self._NUM_ITERATIONS): | 113 for _ in range(self._num_iterations): |
91 elapsed = self._MeasureElapsedTime(open_tab_command, num_tabs) | 114 elapsed = self._MeasureElapsedTime(open_tab_command, num_tabs) |
92 self.assertEqual(1 + num_tabs, self.GetTabCount(), | 115 self.assertEqual(1 + num_tabs, self.GetTabCount(), |
93 msg='Did not open %d new tab(s).' % num_tabs) | 116 msg='Did not open %d new tab(s).' % num_tabs) |
94 for _ in range(num_tabs): | 117 for _ in range(num_tabs): |
95 self.GetBrowserWindow(0).GetTab(1).Close(True) | 118 self.GetBrowserWindow(0).GetTab(1).Close(True) |
96 timings.append(elapsed) | 119 timings.append(elapsed) |
97 | 120 |
98 self._PrintSummaryResults(orig_elapsed, self._NUM_ITERATIONS, timings, 'ms') | 121 self._PrintSummaryResults(orig_elapsed, 'ms', values=timings) |
99 | 122 |
100 def testNewTab(self): | 123 def testNewTab(self): |
101 """Measures time to open a new tab.""" | 124 """Measures time to open a new tab.""" |
102 self._RunNewTabTest(lambda: self.AppendTab(pyauto.GURL('chrome://newtab'))) | 125 self._RunNewTabTest(lambda: self.AppendTab(pyauto.GURL('chrome://newtab'))) |
103 | 126 |
104 def testNewTabPdf(self): | 127 def testNewTabPdf(self): |
105 """Measures time to open a new tab navigated to a PDF file.""" | 128 """Measures time to open a new tab navigated to a PDF file.""" |
106 url = self.GetFileURLForDataPath('pyauto_private', 'pdf', 'TechCrunch.pdf') | 129 url = self.GetFileURLForDataPath('pyauto_private', 'pdf', 'TechCrunch.pdf') |
107 self._RunNewTabTest(lambda: self.AppendTab(pyauto.GURL(url))) | 130 self._RunNewTabTest(lambda: self.AppendTab(pyauto.GURL(url))) |
108 | 131 |
109 def testNewTabFlash(self): | 132 def testNewTabFlash(self): |
110 """Measures time to open a new tab navigated to a flash page.""" | 133 """Measures time to open a new tab navigated to a flash page.""" |
111 url = self.GetFileURLForDataPath('plugin', 'flash.swf') | 134 url = self.GetFileURLForDataPath('plugin', 'flash.swf') |
112 self._RunNewTabTest(lambda: self.AppendTab(pyauto.GURL(url))) | 135 self._RunNewTabTest(lambda: self.AppendTab(pyauto.GURL(url))) |
113 | 136 |
114 def test20Tabs(self): | 137 def test20Tabs(self): |
115 """Measures time to open 20 tabs.""" | 138 """Measures time to open 20 tabs.""" |
116 self._RunNewTabTest( | 139 self._RunNewTabTest( |
117 lambda: self.AppendTab(pyauto.GURL('chrome://newtab')), num_tabs=20) | 140 lambda: self.AppendTab(pyauto.GURL('chrome://newtab')), num_tabs=20) |
118 | 141 |
119 def testV8BenchmarkSuite(self): | 142 def testV8BenchmarkSuite(self): |
120 """Measures score from online v8 benchmark suite.""" | 143 """Measures score from online v8 benchmark suite.""" |
121 | 144 url = self.GetFileURLForDataPath('v8_benchmark_v6', 'run.html') |
122 def _RunSingleV8BenchmarkSuite(): | 145 self.AppendTab(pyauto.GURL(url)) |
123 """Runs a single v8 benchmark suite test and returns the final score. | 146 js = """ |
124 | 147 var val = document.getElementById("status").innerHTML; |
125 Returns: | 148 window.domAutomationController.send(val); |
126 The integer score computed from running the v8 benchmark suite. | 149 """ |
127 """ | 150 self.WaitUntil( |
Nirnimesh
2011/08/11 21:21:23
wrap this inside assertTrue()
dennis_jeffrey
2011/08/11 21:39:51
Done.
| |
128 url = self.GetFileURLForDataPath('v8_benchmark_v6', 'run.html') | 151 lambda: 'Score:' in self.ExecuteJavascript(js, 0, 1), timeout=300, |
129 self.AppendTab(pyauto.GURL(url)) | 152 expect_retval=True) |
130 js = """ | 153 val = self.ExecuteJavascript(js, 0, 1) |
131 var val = document.getElementById("status").innerHTML; | 154 score = int(val[val.find(':') + 2:]) |
132 window.domAutomationController.send(val); | 155 self._PrintSummaryResults(score, 'score') |
133 """ | |
134 self.WaitUntil( | |
135 lambda: 'Score:' in self.ExecuteJavascript(js, 0, 1), timeout=300, | |
136 expect_retval=True) | |
137 val = self.ExecuteJavascript(js, 0, 1) | |
138 score = val[val.find(':') + 2:] | |
139 self.GetBrowserWindow(0).GetTab(1).Close(True) | |
140 return int(score) | |
141 | |
142 orig_score = _RunSingleV8BenchmarkSuite() | |
143 self.assertEqual(1, self.GetTabCount(), | |
144 msg='Did not clean up after running benchmark suite.') | |
145 | |
146 scores = [] | |
147 for _ in range(self._NUM_ITERATIONS): | |
148 score = _RunSingleV8BenchmarkSuite() | |
149 self.assertEqual(1, self.GetTabCount(), | |
150 msg='Did not clean up after running benchmark suite.') | |
151 scores.append(score) | |
152 | |
153 self._PrintSummaryResults(orig_score, self._NUM_ITERATIONS, scores, 'score') | |
154 | 156 |
155 | 157 |
156 if __name__ == '__main__': | 158 if __name__ == '__main__': |
157 pyauto_functional.Main() | 159 pyauto_functional.Main() |
OLD | NEW |