Index: chrome/test/functional/perf.py |
diff --git a/chrome/test/functional/perf.py b/chrome/test/functional/perf.py |
index e3938351b865950a33659b0420fe567fb011ec53..d71626c360b99b8de3efe1cd93e67f64435930e7 100644 |
--- a/chrome/test/functional/perf.py |
+++ b/chrome/test/functional/perf.py |
@@ -26,6 +26,7 @@ class PerfTest(pyauto.PyUITest): |
"""Basic performance tests.""" |
_DEFAULT_NUM_ITERATIONS = 50 |
+ _CHROMEOS_OUTPUT_FILE = '/usr/local/tmp/perf_data.txt' |
Nirnimesh
2011/08/25 00:23:36
Do not hardcode path. use tempfile.gettempdir()
dennis_jeffrey
2011/08/25 01:30:35
I no longer use a temp file at all.
|
def setUp(self): |
"""Performs necessary setup work before running each test.""" |
@@ -72,14 +73,32 @@ class PerfTest(pyauto.PyUITest): |
std_dev = math.sqrt(sum(temp_vals) / len(temp_vals)) |
return avg, std_dev |
- def _PrintSummaryResults(self, first_val, units, values=[]): |
+ def _OutputPerfGraphValue(self, description, value): |
+ """Outputs a performance value to have it graphed on the performance bots. |
+ |
+ Only used for ChromeOS. |
+ |
+ Args: |
+ description: A string description of the performance value. |
+ value: A numeric value representing a single performance measurement. |
+ """ |
+ if self.IsChromeOS(): |
+ try: |
Nirnimesh
2011/08/25 00:23:36
Why the try block?
dennis_jeffrey
2011/08/25 01:30:35
The try has since been removed.
|
+ with open(self._CHROMEOS_OUTPUT_FILE, 'a') as f: |
+ f.write('(\'%s\', %.2f)\n' % (description, value)) |
+ except IOError, e: |
+ print 'Warning: could not output to performance data file: ' + str(e) |
+ |
+ def _PrintSummaryResults(self, description, first_val, units, values=[]): |
"""Logs summary measurement information. |
Args: |
+ description: A string description for the specified results. |
first_val: A numeric measurement value for a single initial trial. |
units: A string specifying the units for the specified measurements. |
values: A list of numeric value measurements. |
""" |
+ logging.info('Results for: ' + description) |
logging.debug('Single trial: %.2f %s', first_val, units) |
if values: |
avg, std_dev = self._AvgAndStdDev(values) |
@@ -89,8 +108,11 @@ class PerfTest(pyauto.PyUITest): |
logging.info(' --------------------------') |
logging.info(' Average: %.2f %s', avg, units) |
logging.info(' Std dev: %.2f %s', std_dev, units) |
+ self._OutputPerfGraphValue('%s_%s' % (units, description), avg) |
+ else: |
+ self._OutputPerfGraphValue('%s_%s' % (units, description), first_val) |
- def _RunNewTabTest(self, open_tab_command, num_tabs=1): |
+ def _RunNewTabTest(self, description, open_tab_command, num_tabs=1): |
"""Runs a perf test that involves opening new tab(s). |
This helper function can be called from different tests to do perf testing |
@@ -98,6 +120,7 @@ class PerfTest(pyauto.PyUITest): |
will open up a single tab. |
Args: |
+ description: A string description of the associated tab test. |
open_tab_command: A callable that will open a single tab. |
num_tabs: The number of tabs to open, i.e., the number of times to invoke |
the |open_tab_command|. |
@@ -118,25 +141,30 @@ class PerfTest(pyauto.PyUITest): |
self.GetBrowserWindow(0).GetTab(1).Close(True) |
timings.append(elapsed) |
- self._PrintSummaryResults(orig_elapsed, 'ms', values=timings) |
+ self._PrintSummaryResults(description, orig_elapsed, 'milliseconds', |
+ values=timings) |
def testNewTab(self): |
"""Measures time to open a new tab.""" |
- self._RunNewTabTest(lambda: self.AppendTab(pyauto.GURL('chrome://newtab'))) |
+ self._RunNewTabTest('NewTabPage', |
+ lambda: self.AppendTab(pyauto.GURL('chrome://newtab'))) |
def testNewTabPdf(self): |
"""Measures time to open a new tab navigated to a PDF file.""" |
url = self.GetFileURLForDataPath('pyauto_private', 'pdf', 'TechCrunch.pdf') |
- self._RunNewTabTest(lambda: self.AppendTab(pyauto.GURL(url))) |
+ self._RunNewTabTest('NewTabPdfPage', |
+ lambda: self.AppendTab(pyauto.GURL(url))) |
def testNewTabFlash(self): |
"""Measures time to open a new tab navigated to a flash page.""" |
url = self.GetFileURLForDataPath('plugin', 'flash.swf') |
- self._RunNewTabTest(lambda: self.AppendTab(pyauto.GURL(url))) |
+ self._RunNewTabTest('NewTabFlashPage', |
+ lambda: self.AppendTab(pyauto.GURL(url))) |
def test20Tabs(self): |
"""Measures time to open 20 tabs.""" |
self._RunNewTabTest( |
+ '20TabsNewTabPage', |
lambda: self.AppendTab(pyauto.GURL('chrome://newtab')), num_tabs=20) |
def testV8BenchmarkSuite(self): |
@@ -154,7 +182,7 @@ class PerfTest(pyauto.PyUITest): |
msg='Timed out when waiting for v8 benchmark score.') |
val = self.ExecuteJavascript(js, 0, 1) |
score = int(val[val.find(':') + 2:]) |
- self._PrintSummaryResults(score, 'score') |
+ self._PrintSummaryResults('V8Benchmark', score, 'score') |
if __name__ == '__main__': |