Index: chrome/test/functional/perf.py |
diff --git a/chrome/test/functional/perf.py b/chrome/test/functional/perf.py |
index e3938351b865950a33659b0420fe567fb011ec53..cf8c7e4d10fdcffcadea80601023878d34602afe 100644 |
--- a/chrome/test/functional/perf.py |
+++ b/chrome/test/functional/perf.py |
@@ -26,6 +26,8 @@ class PerfTest(pyauto.PyUITest): |
"""Basic performance tests.""" |
_DEFAULT_NUM_ITERATIONS = 50 |
+ _PERF_OUTPUT_MARKER_PRE = '_PERF_PRE_' |
+ _PERF_OUTPUT_MARKER_POST = '_PERF_POST_' |
def setUp(self): |
"""Performs necessary setup work before running each test.""" |
@@ -72,14 +74,29 @@ class PerfTest(pyauto.PyUITest): |
std_dev = math.sqrt(sum(temp_vals) / len(temp_vals)) |
truty
2011/08/25 04:25:01
This illustrates why I use a Python stats lib inst
dennis_jeffrey
2011/08/25 22:33:05
Originally Nirnimesh had recommended I use the "nu
truty
2011/08/26 20:33:00
-I use a Python stats library that is opensource -
dennis_jeffrey
2011/08/26 23:23:54
I changed the computation to "sample" std dev base
|
return avg, std_dev |
- def _PrintSummaryResults(self, first_val, units, values=[]): |
+ def _OutputPerfGraphValue(self, description, value): |
+ """Outputs a performance value to have it graphed on the performance bots. |
+ |
+ Only used for ChromeOS. |
+ |
+ Args: |
+ description: A string description of the performance value. |
+ value: A numeric value representing a single performance measurement. |
+ """ |
+ if self.IsChromeOS(): |
+ print '%s(\'%s\', %.2f)%s' % (self._PERF_OUTPUT_MARKER_PRE, description, |
+ value, self._PERF_OUTPUT_MARKER_POST) |
+ |
+ def _PrintSummaryResults(self, description, first_val, units, values=[]): |
"""Logs summary measurement information. |
Args: |
+ description: A string description for the specified results. |
first_val: A numeric measurement value for a single initial trial. |
units: A string specifying the units for the specified measurements. |
values: A list of numeric value measurements. |
""" |
+ logging.info('Results for: ' + description) |
logging.debug('Single trial: %.2f %s', first_val, units) |
if values: |
avg, std_dev = self._AvgAndStdDev(values) |
@@ -89,8 +106,11 @@ class PerfTest(pyauto.PyUITest): |
logging.info(' --------------------------') |
logging.info(' Average: %.2f %s', avg, units) |
logging.info(' Std dev: %.2f %s', std_dev, units) |
+ self._OutputPerfGraphValue('%s_%s' % (units, description), avg) |
truty
2011/08/25 04:25:01
FYI, it's typical for current ChromeOS perf tests
dennis_jeffrey
2011/08/25 22:33:05
Is this something that happens automatically when
truty
2011/08/26 20:33:00
Yes. Using write_perf_keyval() causes the entire d
|
+ else: |
+ self._OutputPerfGraphValue('%s_%s' % (units, description), first_val) |
- def _RunNewTabTest(self, open_tab_command, num_tabs=1): |
+ def _RunNewTabTest(self, description, open_tab_command, num_tabs=1): |
"""Runs a perf test that involves opening new tab(s). |
This helper function can be called from different tests to do perf testing |
@@ -98,6 +118,7 @@ class PerfTest(pyauto.PyUITest): |
will open up a single tab. |
truty
2011/08/25 04:25:01
I think you should explain this pattern of separat
dennis_jeffrey
2011/08/25 22:33:05
This is indeed something that was bugging me a lit
|
Args: |
+ description: A string description of the associated tab test. |
open_tab_command: A callable that will open a single tab. |
num_tabs: The number of tabs to open, i.e., the number of times to invoke |
the |open_tab_command|. |
@@ -118,25 +139,30 @@ class PerfTest(pyauto.PyUITest): |
self.GetBrowserWindow(0).GetTab(1).Close(True) |
timings.append(elapsed) |
- self._PrintSummaryResults(orig_elapsed, 'ms', values=timings) |
+ self._PrintSummaryResults(description, orig_elapsed, 'milliseconds', |
+ values=timings) |
def testNewTab(self): |
"""Measures time to open a new tab.""" |
- self._RunNewTabTest(lambda: self.AppendTab(pyauto.GURL('chrome://newtab'))) |
+ self._RunNewTabTest('NewTabPage', |
+ lambda: self.AppendTab(pyauto.GURL('chrome://newtab'))) |
def testNewTabPdf(self): |
"""Measures time to open a new tab navigated to a PDF file.""" |
url = self.GetFileURLForDataPath('pyauto_private', 'pdf', 'TechCrunch.pdf') |
- self._RunNewTabTest(lambda: self.AppendTab(pyauto.GURL(url))) |
+ self._RunNewTabTest('NewTabPdfPage', |
+ lambda: self.AppendTab(pyauto.GURL(url))) |
def testNewTabFlash(self): |
"""Measures time to open a new tab navigated to a flash page.""" |
url = self.GetFileURLForDataPath('plugin', 'flash.swf') |
- self._RunNewTabTest(lambda: self.AppendTab(pyauto.GURL(url))) |
+ self._RunNewTabTest('NewTabFlashPage', |
+ lambda: self.AppendTab(pyauto.GURL(url))) |
def test20Tabs(self): |
"""Measures time to open 20 tabs.""" |
self._RunNewTabTest( |
+ '20TabsNewTabPage', |
lambda: self.AppendTab(pyauto.GURL('chrome://newtab')), num_tabs=20) |
def testV8BenchmarkSuite(self): |
@@ -154,7 +180,7 @@ class PerfTest(pyauto.PyUITest): |
msg='Timed out when waiting for v8 benchmark score.') |
val = self.ExecuteJavascript(js, 0, 1) |
score = int(val[val.find(':') + 2:]) |
- self._PrintSummaryResults(score, 'score') |
+ self._PrintSummaryResults('V8Benchmark', score, 'score') |
if __name__ == '__main__': |