| OLD | NEW |
| (Empty) |
| 1 #!/usr/bin/env python | |
| 2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
| 3 # Use of this source code is governed by a BSD-style license that can be | |
| 4 # found in the LICENSE file. | |
| 5 | |
| 6 """Records metrics on playing media under constrained network conditions. | |
| 7 | |
| 8 Spins up a Constrained Network Server (CNS) and runs through a test matrix of | |
| 9 bandwidth, latency, and packet loss settings. Tests running media files defined | |
| 10 in _TEST_MEDIA_EPP record the extra-play-percentage (EPP) metric and the | |
| 11 time-to-playback (TTP) metric in a format consumable by the Chromium perf bots. | |
| 12 Other tests running media files defined in _TEST_MEDIA_NO_EPP record only the | |
| 13 TTP metric. | |
| 14 | |
| 15 Since even a small number of different settings yields a large test matrix, the | |
| 16 design is threaded... however PyAuto is not, so a global lock is used when calls | |
| 17 into PyAuto are necessary. The number of threads can be set by _TEST_THREADS. | |
| 18 | |
| 19 The CNS code is located under: <root>/src/media/tools/constrained_network_server | |
| 20 """ | |
| 21 | |
| 22 import logging | |
| 23 import os | |
| 24 import posixpath | |
| 25 import Queue | |
| 26 | |
| 27 import pyauto_media | |
| 28 import pyauto_utils | |
| 29 | |
| 30 import cns_test_base | |
| 31 import worker_thread | |
| 32 | |
| 33 # The network constraints used for measuring ttp and epp. | |
| 34 # Previous tests with 2% and 5% packet loss resulted in inconsistent data. Thus | |
| 35 # packet loss is not used often in perf tests. Tests with very low bandwidth, | |
| 36 # such as 56K Dial-up resulted in very slow tests (about 8 mins to run each | |
| 37 # test iteration). In addition, metrics for Dial-up would be out of range of the | |
| 38 # other tests metrics, making the graphs hard to read. | |
| 39 _TESTS_TO_RUN = [cns_test_base.Cable, | |
| 40 cns_test_base.Wifi, | |
| 41 cns_test_base.DSL, | |
| 42 cns_test_base.Slow, | |
| 43 cns_test_base.NoConstraints] | |
| 44 | |
| 45 # HTML test path; relative to src/chrome/test/data. Loads a test video and | |
| 46 # records metrics in JavaScript. | |
| 47 _TEST_HTML_PATH = os.path.join( | |
| 48 'media', 'html', 'media_constrained_network.html') | |
| 49 | |
| 50 # Number of threads to use during testing. | |
| 51 _TEST_THREADS = 3 | |
| 52 | |
| 53 # Number of times we run the same test to eliminate outliers. | |
| 54 _TEST_ITERATIONS = 3 | |
| 55 | |
| 56 # Media file names used for measuring epp and tpp. | |
| 57 _TEST_MEDIA_EPP = ['roller.webm'] | |
| 58 _TEST_MEDIA_EPP.extend(posixpath.join('crowd', name) for name in | |
| 59 ['crowd360.ogv', 'crowd.wav', 'crowd.ogg']) | |
| 60 | |
| 61 # Media file names used for measuring tpp without epp. | |
| 62 _TEST_MEDIA_NO_EPP = [posixpath.join('dartmoor', name) for name in | |
| 63 ['dartmoor2.ogg', 'dartmoor2.m4a', 'dartmoor2.mp3', | |
| 64 'dartmoor2.wav']] | |
| 65 _TEST_MEDIA_NO_EPP.extend(posixpath.join('crowd', name) for name in | |
| 66 ['crowd1080.webm', 'crowd1080.ogv', 'crowd1080.mp4', | |
| 67 'crowd360.webm', 'crowd360.mp4']) | |
| 68 | |
| 69 # Timeout values for epp and ttp tests in seconds. | |
| 70 _TEST_EPP_TIMEOUT = 180 | |
| 71 _TEST_TTP_TIMEOUT = 20 | |
| 72 | |
| 73 | |
| 74 class CNSWorkerThread(worker_thread.WorkerThread): | |
| 75 """Worker thread. Runs a test for each task in the queue.""" | |
| 76 | |
| 77 def __init__(self, *args, **kwargs): | |
| 78 """Sets up CNSWorkerThread class variables.""" | |
| 79 # Allocate local vars before WorkerThread.__init__ runs the thread. | |
| 80 self._metrics = {} | |
| 81 self._test_iterations = _TEST_ITERATIONS | |
| 82 worker_thread.WorkerThread.__init__(self, *args, **kwargs) | |
| 83 | |
| 84 def _HaveMetricOrError(self, var_name, unique_url): | |
| 85 """Checks if the page has variable value ready or if an error has occured. | |
| 86 | |
| 87 The varaible value must be set to < 0 pre-run. | |
| 88 | |
| 89 Args: | |
| 90 var_name: The variable name to check the metric for. | |
| 91 unique_url: The url of the page to check for the variable's metric. | |
| 92 | |
| 93 Returns: | |
| 94 True is the var_name value is >=0 or if an error_msg exists. | |
| 95 """ | |
| 96 self._metrics[var_name] = int(self.GetDOMValue(var_name, url=unique_url)) | |
| 97 end_test = self.GetDOMValue('endTest', url=unique_url) | |
| 98 | |
| 99 return self._metrics[var_name] >= 0 or end_test | |
| 100 | |
| 101 def _GetEventsLog(self, unique_url): | |
| 102 """Returns the log of video events fired while running the test. | |
| 103 | |
| 104 Args: | |
| 105 unique_url: The url of the page identifying the test. | |
| 106 """ | |
| 107 return self.GetDOMValue('eventsMsg', url=unique_url) | |
| 108 | |
| 109 def _GetVideoProgress(self, unique_url): | |
| 110 """Gets the video's current play progress percentage. | |
| 111 | |
| 112 Args: | |
| 113 unique_url: The url of the page to check for video play progress. | |
| 114 """ | |
| 115 return int(self.CallJavascriptFunc('calculateProgress', url=unique_url)) | |
| 116 | |
| 117 def RunTask(self, unique_url, task): | |
| 118 """Runs the specific task on the url given. | |
| 119 | |
| 120 It is assumed that a tab with the unique_url is already loaded. | |
| 121 Args: | |
| 122 unique_url: A unique identifier of the test page. | |
| 123 task: A (series_name, settings, file_name, run_epp) tuple. | |
| 124 Returns: | |
| 125 True if at least one iteration of the tests run as expected. | |
| 126 """ | |
| 127 ttp_results = [] | |
| 128 epp_results = [] | |
| 129 # Build video source URL. Values <= 0 mean the setting is disabled. | |
| 130 series_name, settings, (file_name, run_epp) = task | |
| 131 video_url = cns_test_base.GetFileURL( | |
| 132 file_name, bandwidth=settings[0], latency=settings[1], | |
| 133 loss=settings[2], new_port=True) | |
| 134 | |
| 135 graph_name = series_name + '_' + os.path.basename(file_name) | |
| 136 for iter_num in xrange(self._test_iterations): | |
| 137 # Start the test! | |
| 138 self.CallJavascriptFunc('startTest', [video_url], url=unique_url) | |
| 139 | |
| 140 # Wait until the necessary metrics have been collected. | |
| 141 self._metrics['epp'] = self._metrics['ttp'] = -1 | |
| 142 self.WaitUntil(self._HaveMetricOrError, args=['ttp', unique_url], | |
| 143 retry_sleep=1, timeout=_TEST_EPP_TIMEOUT, debug=False) | |
| 144 # Do not wait for epp if ttp is not available. | |
| 145 if self._metrics['ttp'] >= 0: | |
| 146 ttp_results.append(self._metrics['ttp']) | |
| 147 if run_epp: | |
| 148 self.WaitUntil( | |
| 149 self._HaveMetricOrError, args=['epp', unique_url], retry_sleep=2, | |
| 150 timeout=_TEST_EPP_TIMEOUT, debug=False) | |
| 151 | |
| 152 if self._metrics['epp'] >= 0: | |
| 153 epp_results.append(self._metrics['epp']) | |
| 154 | |
| 155 logging.debug('Iteration:%d - Test %s ended with %d%% of the video ' | |
| 156 'played.', iter_num, graph_name, | |
| 157 self._GetVideoProgress(unique_url),) | |
| 158 | |
| 159 if self._metrics['ttp'] < 0 or (run_epp and self._metrics['epp'] < 0): | |
| 160 logging.error('Iteration:%d - Test %s failed to end gracefully due ' | |
| 161 'to time-out or error.\nVideo events fired:\n%s', | |
| 162 iter_num, graph_name, self._GetEventsLog(unique_url)) | |
| 163 | |
| 164 # End of iterations, print results, | |
| 165 pyauto_utils.PrintPerfResult('ttp', graph_name, ttp_results, 'ms') | |
| 166 | |
| 167 # Return true if we got at least one result to report. | |
| 168 if run_epp: | |
| 169 pyauto_utils.PrintPerfResult('epp', graph_name, epp_results, '%') | |
| 170 return len(epp_results) != 0 | |
| 171 return len(ttp_results) != 0 | |
| 172 | |
| 173 | |
| 174 class MediaConstrainedNetworkPerfTest(cns_test_base.CNSTestBase): | |
| 175 """PyAuto test container. See file doc string for more information.""" | |
| 176 | |
| 177 def _RunDummyTest(self, test_path): | |
| 178 """Runs a dummy test with high bandwidth and no latency or packet loss. | |
| 179 | |
| 180 Fails the unit test if the dummy test does not end. | |
| 181 | |
| 182 Args: | |
| 183 test_path: Path to HTML/JavaScript test code. | |
| 184 """ | |
| 185 tasks = Queue.Queue() | |
| 186 tasks.put(('Dummy Test', [5000, 0, 0], (_TEST_MEDIA_EPP[0], True))) | |
| 187 # Dummy test should successfully finish by passing all the tests. | |
| 188 if worker_thread.RunWorkerThreads(self, CNSWorkerThread, tasks, 1, | |
| 189 test_path): | |
| 190 self.fail('Failed to run dummy test.') | |
| 191 | |
| 192 def testConstrainedNetworkPerf(self): | |
| 193 | |
| 194 """Starts CNS, spins up worker threads to run through _TEST_CONSTRAINTS.""" | |
| 195 # Run a dummy test to avoid Chrome/CNS startup overhead. | |
| 196 logging.debug('Starting a dummy test to avoid Chrome/CNS startup overhead.') | |
| 197 self._RunDummyTest(_TEST_HTML_PATH) | |
| 198 logging.debug('Dummy test has finished. Starting real perf tests.') | |
| 199 | |
| 200 # Tests that wait for EPP metrics. | |
| 201 media_files = [(name, True) for name in _TEST_MEDIA_EPP] | |
| 202 media_files.extend((name, False) for name in _TEST_MEDIA_NO_EPP) | |
| 203 tasks = cns_test_base.CreateCNSPerfTasks(_TESTS_TO_RUN, media_files) | |
| 204 if worker_thread.RunWorkerThreads(self, CNSWorkerThread, tasks, | |
| 205 _TEST_THREADS, _TEST_HTML_PATH): | |
| 206 self.fail('Some tests failed to run as expected.') | |
| 207 | |
| 208 | |
| 209 if __name__ == '__main__': | |
| 210 pyauto_media.Main() | |
| OLD | NEW |