Index: chrome/test/functional/media/media_fps_perf.py |
diff --git a/chrome/test/functional/media/media_fps_perf.py b/chrome/test/functional/media/media_fps_perf.py |
index d18e6bca7d99955a7883deb7e7d97ecd12f9a30c..77f7c4c2c671b2df7fa6b9e7526760464812b3a4 100644 |
--- a/chrome/test/functional/media/media_fps_perf.py |
+++ b/chrome/test/functional/media/media_fps_perf.py |
@@ -3,14 +3,16 @@ |
# Use of this source code is governed by a BSD-style license that can be |
# found in the LICENSE file. |
-"""FPS (frames per second) performance test for <video>. |
+"""CPU, Memory, and FPS performance test for <video>. |
-Calculates decoded fps and dropped fps while playing HTML5 media element. The |
-test compares results of playing a media file on different video resolutions. |
+Calculates decoded fps, dropped fps, CPU, and memory statistics while playing |
+HTML5 media element. The test compares results of playing a media file on |
+different video resolutions. |
""" |
import logging |
import os |
+import psutil |
import pyauto_media |
import pyauto |
@@ -22,46 +24,58 @@ _TEST_HTML_PATH = os.path.join('media', 'html', 'media_fps_perf.html') |
# Path under data path for test files. |
_TEST_MEDIA_PATH = os.path.join('pyauto_private', 'media', 'crowd') |
-# The media files used for testing. The map is from the media file type to short |
-# file names. A perf graph is generated for each file type. |
-_TEST_VIDEOS = { |
- 'webm': ['crowd2160', 'crowd1080', 'crowd720', 'crowd480', 'crowd360'] |
-} |
+# The media files used for testing. |
+_TEST_VIDEOS = [ |
+ 'crowd2160.webm', 'crowd1080.webm', 'crowd720.webm', 'crowd480.webm', |
+ 'crowd360.webm'] |
-def ToMbit(value): |
- """Converts a value of byte per sec units into Mbps units.""" |
- return float(value * 8) / (1024 * 1024) |
- |
- |
-class MediaFPSPerfTest(pyauto.PyUITest): |
+class MediaStatsPerfTest(pyauto.PyUITest): |
"""PyAuto test container. See file doc string for more information.""" |
- def testMediaFPSPerformance(self): |
- """Launches HTML test which plays each video and records statistics. |
- |
- For each video, the test plays till ended event is fired. It records decoded |
- fps, dropped fps, and total dropped frames. |
- """ |
- self.NavigateToURL(self.GetFileURLForDataPath(_TEST_HTML_PATH)) |
- |
- for ext, files in _TEST_VIDEOS.iteritems(): |
- for name in files: |
- file_url = self.GetFileURLForDataPath( |
- os.path.join(_TEST_MEDIA_PATH, '%s.%s' % (name, ext))) |
- logging.debug('Running fps perf test for %s.', file_url) |
- self.assertTrue(self.ExecuteJavascript("startTest('%s');" % file_url)) |
- decoded_fps = [float(value) for value in |
- self.GetDOMValue("decodedFPS.join(',')").split(',')] |
- dropped_frames = self.GetDOMValue('droppedFrames') |
- dropped_fps = [float(value) for value in |
- self.GetDOMValue("droppedFPS.join(',')").split(',')] |
- |
- pyauto_utils.PrintPerfResult('FPS_' + ext, name, decoded_fps, 'fps') |
- pyauto_utils.PrintPerfResult('Dropped_FPS_' + ext, name, dropped_fps, |
- 'fps') |
- pyauto_utils.PrintPerfResult('Dropped_Frames_' + ext, name, |
- dropped_frames, 'frames') |
+ def _GetChromeRendererProcess(self): |
+ """Returns the Chrome renderer process.""" |
+ renderer_id = self.GetBrowserInfo()['windows'][0]['tabs'][1]['renderer_pid'] |
+ if not renderer_id: |
+ self.fail('Can not find the tab renderer process.') |
+ return psutil.Process(renderer_id) |
+ |
+ def testMediaPerformance(self): |
+ """Launches HTML test which plays each video and records statistics.""" |
+ for file_name in _TEST_VIDEOS: |
+ # Append a tab and delete it at the end of the test to free its memory. |
+ self.AppendTab(pyauto.GURL(self.GetFileURLForDataPath(_TEST_HTML_PATH))) |
+ |
+ file_url = self.GetFileURLForDataPath( |
+ os.path.join(_TEST_MEDIA_PATH, '%s' % file_name)) |
DaleCurtis
2012/03/01 02:47:11
Just file_name without %
shadi
2012/03/01 19:17:33
Done.
|
+ logging.debug('Running perf test for %s.', file_url) |
+ |
+ renderer_process = self._GetChromeRendererProcess() |
+ # Call to set a starting time to record CPU usage by the renderer. |
+ renderer_process.get_cpu_percent() |
+ |
+ self.assertTrue( |
+ self.CallJavascriptFunc('startTest', [file_url, True], tab_index=1)) |
+ |
+ cpu_usage = renderer_process.get_cpu_percent() |
+ mem_usage = renderer_process.get_memory_info()[0] / 1024 |
+ pyauto_utils.PrintPerfResult('cpu', file_name, cpu_usage, '%') |
+ pyauto_utils.PrintPerfResult('memory', file_name, mem_usage, 'KB') |
+ |
+ decoded_fps = [ |
+ float(value) for value in |
+ self.GetDOMValue("decodedFPS.join(',')", tab_index=1).split(',')] |
+ dropped_frames = self.GetDOMValue('droppedFrames', tab_index=1) |
+ dropped_fps = [ |
+ float(value) for value in |
+ self.GetDOMValue("droppedFPS.join(',')", tab_index=1).split(',')] |
+ |
+ pyauto_utils.PrintPerfResult('fps', file_name, decoded_fps, 'fps') |
DaleCurtis
2012/03/01 02:47:11
Will we need to clean up the old graphs on the bot
shadi
2012/03/01 19:17:33
I think they are automatically cleaned up after wh
|
+ pyauto_utils.PrintPerfResult('dropped_fps', file_name, dropped_fps, 'fps') |
+ pyauto_utils.PrintPerfResult('dropped_frames', file_name, dropped_frames, |
+ 'frames') |
+ |
+ self.GetBrowserWindow(0).GetTab(1).Close(True) |
if __name__ == '__main__': |