Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(17)

Side by Side Diff: chrome/test/vr/perf/latency/webvr_latency_test.py

Issue 2904583003: Support multiple URLs for latency testing (Closed)
Patch Set: Fix adb startup message appearing in device name Created 3 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « chrome/test/vr/perf/latency/run_latency_test.py ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 # Copyright 2017 The Chromium Authors. All rights reserved. 1 # Copyright 2017 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 4
5 import motopho_thread as mt 5 import motopho_thread as mt
6 import robot_arm as ra 6 import robot_arm as ra
7 7
8 import json 8 import json
9 import glob 9 import glob
10 import logging 10 import logging
11 import numpy 11 import numpy
12 import os 12 import os
13 import re 13 import re
14 import subprocess 14 import subprocess
15 import sys 15 import sys
16 import time 16 import time
17 17
18 18
19 MOTOPHO_THREAD_TIMEOUT = 30 19 MOTOPHO_THREAD_TIMEOUT = 15
20 DEFAULT_URLS = [
21 # TODO(bsheedy): See about having versioned copies of the flicker app
22 # instead of using personal github.
23 # Purely a flicker app - no additional CPU/GPU load
24 'https://weableandbob.github.io/Motopho/'
25 'flicker_apps/webvr/webvr-flicker-app-klaus.html?'
26 'polyfill=0\&canvasClickPresents=1',
27 # URLs that render 3D scenes in addition to the Motopho patch
28 # Heavy CPU load, moderate GPU load
29 'https://webvr.info/samples/test-slow-render.html?'
30 'latencyPatch=1\&canvasClickPresents=1\&'
31 'heavyGpu=1\&workTime=20\&cubeCount=8\&cubeScale=0.4',
32 # Moderate CPU load, light GPU load
33 'https://webvr.info/samples/test-slow-render.html?'
34 'latencyPatch=1\&canvasClickPresents=1\&'
35 'heavyGpu=1\&workTime=12\&cubeCount=8\&cubeScale=0.3',
36 # Light CPU load, moderate GPU load
37 'https://webvr.info/samples/test-slow-render.html?'
38 'latencyPatch=1\&canvasClickPresents=1\&'
39 'heavyGpu=1\&workTime=5\&cubeCount=8\&cubeScale=0.4',
40 # Heavy CPU load, very light GPU load
41 'https://webvr.info/samples/test-slow-render.html?'
42 'latencyPatch=1\&canvasClickPresents=1\&'
43 'workTime=20',
44 # No additional CPU load, very light GPU load
45 'https://webvr.info/samples/test-slow-render.html?'
46 'latencyPatch=1\&canvasClickPresents=1',
47 ]
20 48
21 49
22 def GetTtyDevices(tty_pattern, vendor_ids): 50 def GetTtyDevices(tty_pattern, vendor_ids):
23 """Finds all devices connected to tty that match a pattern and device id. 51 """Finds all devices connected to tty that match a pattern and device id.
24 52
25 If a serial device is connected to the computer via USB, this function 53 If a serial device is connected to the computer via USB, this function
26 will check all tty devices that match tty_pattern, and return the ones 54 will check all tty devices that match tty_pattern, and return the ones
27 that have vendor identification number in the list vendor_ids. 55 that have vendor identification number in the list vendor_ids.
28 56
29 Args: 57 Args:
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
62 class WebVrLatencyTest(object): 90 class WebVrLatencyTest(object):
63 """Base class for all WebVR latency tests. 91 """Base class for all WebVR latency tests.
64 92
65 This is meant to be subclassed for each platform the test is run on. While 93 This is meant to be subclassed for each platform the test is run on. While
66 the latency test itself is cross-platform, the setup and teardown for 94 the latency test itself is cross-platform, the setup and teardown for
67 tests is platform-dependent. 95 tests is platform-dependent.
68 """ 96 """
69 def __init__(self, args): 97 def __init__(self, args):
70 self.args = args 98 self.args = args
71 self._num_samples = args.num_samples 99 self._num_samples = args.num_samples
72 self._flicker_app_url = args.url 100 self._test_urls = args.urls or DEFAULT_URLS
73 assert (self._num_samples > 0),'Number of samples must be greater than 0' 101 assert (self._num_samples > 0),'Number of samples must be greater than 0'
74 self._device_name = 'generic_device' 102 self._device_name = 'generic_device'
103 self._test_results = {}
75 104
76 # Connect to the Arduino that drives the servos 105 # Connect to the Arduino that drives the servos
77 devices = GetTtyDevices(r'ttyACM\d+', [0x2a03, 0x2341]) 106 devices = GetTtyDevices(r'ttyACM\d+', [0x2a03, 0x2341])
78 assert (len(devices) == 1),'Found %d devices, expected 1' % len(devices) 107 assert (len(devices) == 1),'Found %d devices, expected 1' % len(devices)
79 self.robot_arm = ra.RobotArm(devices[0]) 108 self.robot_arm = ra.RobotArm(devices[0])
80 109
81 def RunTest(self): 110 def RunTests(self):
82 """Runs the steps to start Chrome, measure/save latency, and clean up.""" 111 """Runs latency tests on all the URLs provided to the test on creation.
83 self._Setup()
84 self._Run()
85 self._Teardown()
86 112
87 def _Setup(self): 113 Repeatedly runs the steps to start Chrome, measure/store latency, and
88 """Perform any platform-specific setup.""" 114 clean up before storing all results to a single file for dashboard
115 uploading.
116 """
117 try:
118 self._OneTimeSetup()
119 for url in self._test_urls:
120 self._Setup(url)
121 self._Run(url)
122 self._Teardown()
123 self._SaveResultsToFile()
124 finally:
125 self._OneTimeTeardown()
126
127 def _OneTimeSetup(self):
128 """Performs any platform-specific setup once before any tests."""
89 raise NotImplementedError( 129 raise NotImplementedError(
90 'Platform-specific setup must be implemented in subclass') 130 'Platform-specific setup must be implemented in subclass')
91 131
92 def _Run(self): 132 def _Setup(self, url):
133 """Performs any platform-specific setup before each test."""
134 raise NotImplementedError(
135 'Platform-specific setup must be implemented in subclass')
136
137 def _Run(self, url):
93 """Run the latency test. 138 """Run the latency test.
94 139
95 Handles the actual latency measurement, which is identical across 140 Handles the actual latency measurement, which is identical across
96 different platforms, as well as result saving. 141 different platforms, as well as result storing.
97 """ 142 """
98 # Motopho scripts use relative paths, so switch to the Motopho directory 143 # Motopho scripts use relative paths, so switch to the Motopho directory
99 os.chdir(self.args.motopho_path) 144 os.chdir(self.args.motopho_path)
100 145
101 # Set up the thread that runs the Motopho script 146 # Set up the thread that runs the Motopho script
102 motopho_thread = mt.MotophoThread(self._num_samples) 147 motopho_thread = mt.MotophoThread(self._num_samples)
103 motopho_thread.start() 148 motopho_thread.start()
104 149
105 # Run multiple times so we can get an average and standard deviation 150 # Run multiple times so we can get an average and standard deviation
106 for _ in xrange(self._num_samples): 151 for _ in xrange(self._num_samples):
107 self.robot_arm.ResetPosition() 152 self.robot_arm.ResetPosition()
108 # Start the Motopho script 153 # Start the Motopho script
109 motopho_thread.StartIteration() 154 motopho_thread.StartIteration()
110 # Let the Motopho be stationary so the script can calculate the bias 155 # Let the Motopho be stationary so the script can calculate the bias
111 time.sleep(3) 156 time.sleep(3)
112 motopho_thread.BlockNextIteration() 157 motopho_thread.BlockNextIteration()
113 # Move so we can measure latency 158 # Move so we can measure latency
114 self.robot_arm.StartMotophoMovement() 159 self.robot_arm.StartMotophoMovement()
115 if not motopho_thread.WaitForIterationEnd(MOTOPHO_THREAD_TIMEOUT): 160 if not motopho_thread.WaitForIterationEnd(MOTOPHO_THREAD_TIMEOUT):
116 # TODO(bsheedy): Look into ways to prevent Motopho from not sending any 161 # TODO(bsheedy): Look into ways to prevent Motopho from not sending any
117 # data until unplugged and replugged into the machine after a reboot. 162 # data until unplugged and replugged into the machine after a reboot.
118 logging.error('Motopho thread timeout, ' 163 logging.error('Motopho thread timeout, '
119 'Motopho may need to be replugged.') 164 'Motopho may need to be replugged.')
120 self.robot_arm.StopAllMovement() 165 self.robot_arm.StopAllMovement()
121 time.sleep(1) 166 time.sleep(1)
122 self._SaveResults(motopho_thread.latencies, motopho_thread.correlations) 167 self._StoreResults(motopho_thread.latencies, motopho_thread.correlations,
168 url)
123 169
124 def _Teardown(self): 170 def _Teardown(self):
125 """Performs any platform-specific teardown.""" 171 """Performs any platform-specific teardown after each test."""
126 raise NotImplementedError( 172 raise NotImplementedError(
127 'Platform-specific setup must be implemented in subclass') 173 'Platform-specific teardown must be implemented in subclass')
174
175 def _OneTimeTeardown(self):
176 """Performs any platform-specific teardown after all tests."""
177 raise NotImplementedError(
178 'Platform-specific teardown must be implemented in sublcass')
128 179
129 def _RunCommand(self, cmd): 180 def _RunCommand(self, cmd):
130 """Runs the given cmd list and returns its output. 181 """Runs the given cmd list and returns its output.
131 182
132 Prints the command's output and exits if any error occurs. 183 Prints the command's output and exits if any error occurs.
133 184
134 Returns: 185 Returns:
135 A string containing the stdout and stderr of the command. 186 A string containing the stdout and stderr of the command.
136 """ 187 """
137 try: 188 try:
138 return subprocess.check_output(cmd, stderr=subprocess.STDOUT) 189 return subprocess.check_output(cmd, stderr=subprocess.STDOUT)
139 except subprocess.CalledProcessError as e: 190 except subprocess.CalledProcessError as e:
140 logging.error('Failed command output: %s', e.output) 191 logging.error('Failed command output: %s', e.output)
141 raise e 192 raise e
142 193
143 def _SetChromeCommandLineFlags(self, flags): 194 def _SetChromeCommandLineFlags(self, flags):
144 raise NotImplementedError( 195 raise NotImplementedError(
145 'Command-line flag setting must be implemented in subclass') 196 'Command-line flag setting must be implemented in subclass')
146 197
147 def _SaveResults(self, latencies, correlations): 198 def _StoreResults(self, latencies, correlations, url):
148 """Saves the results to a JSON file. 199 """Temporarily stores the results of a test.
149 200
150 Saved JSON object is compatible with Chrome perf dashboard if 201 Stores the given results in memory to be later retrieved and written to
151 put in as the 'chart_data' value. Also logs the raw data and its 202 a file in _SaveResultsToFile once all tests are done. Also logs the raw
152 average/standard deviation. 203 data and its average/standard deviation.
153 """ 204 """
154 avg_latency = sum(latencies) / len(latencies) 205 avg_latency = sum(latencies) / len(latencies)
155 std_latency = numpy.std(latencies) 206 std_latency = numpy.std(latencies)
156 avg_correlation = sum(correlations) / len(correlations) 207 avg_correlation = sum(correlations) / len(correlations)
157 std_correlation = numpy.std(correlations) 208 std_correlation = numpy.std(correlations)
158 logging.info('Raw latencies: %s\nRaw correlations: %s\n' 209 logging.info('\nURL: %s\n'
210 'Raw latencies: %s\nRaw correlations: %s\n'
159 'Avg latency: %f +/- %f\nAvg correlation: %f +/- %f', 211 'Avg latency: %f +/- %f\nAvg correlation: %f +/- %f',
160 str(latencies), str(correlations), avg_latency, std_latency, 212 url, str(latencies), str(correlations), avg_latency,
161 avg_correlation, std_correlation) 213 std_latency, avg_correlation, std_correlation)
162 214
215 self._test_results[url] = {
216 'correlations': correlations,
217 'std_correlation': std_correlation,
218 'latencies': latencies,
219 'std_latency': std_latency,
220 }
221
222 def _SaveResultsToFile(self):
163 if not (self.args.output_dir and os.path.isdir(self.args.output_dir)): 223 if not (self.args.output_dir and os.path.isdir(self.args.output_dir)):
164 logging.warning('No output directory set, not saving results to file') 224 logging.warning('No output directory set, not saving results to file')
165 return 225 return
166 226
227 correlation_string = self._device_name + '_correlation'
228 latency_string = self._device_name + '_latency'
229 charts = {
230 correlation_string: {
231 'summary': {
232 'improvement_direction': 'up',
233 'name': correlation_string,
234 'std': 0.0,
235 'type': 'list_of_scalar_values',
236 'units': '',
237 'values': [],
238 }
239 },
240 latency_string: {
241 'summary': {
242 'improvement_direction': 'down',
243 'name': latency_string,
244 'std': 0.0,
245 'type': 'list_of_scalar_values',
246 'units': 'ms',
247 'values': [],
248 }
249 }
250 }
251 for url, results in self._test_results.iteritems():
252 charts[correlation_string][url] = {
253 'improvement_direction': 'up',
254 'name': correlation_string,
255 'std': results['std_correlation'],
256 'type': 'list_of_scalar_values',
257 'units': '',
258 'values': results['correlations'],
259 }
260
261 charts[correlation_string]['summary']['values'].extend(
262 results['correlations'])
263
264 charts[latency_string][url] = {
265 'improvement_direction': 'down',
266 'name': latency_string,
267 'std': results['std_latency'],
268 'type': 'list_of_scalar_values',
269 'units': 'ms',
270 'values': results['latencies'],
271 }
272
273 charts[latency_string]['summary']['values'].extend(results['latencies'])
274
167 results = { 275 results = {
168 'format_version': '1.0', 276 'format_version': '1.0',
169 'benchmark_name': 'webvr_latency', 277 'benchmark_name': 'webvr_latency',
170 'benchmark_description': 'Measures the motion-to-photon latency of WebVR', 278 'benchmark_description': 'Measures the motion-to-photon latency of WebVR',
171 'charts': { 279 'charts': charts,
172 'correlation': {
173 'summary': {
174 'improvement_direction': 'up',
175 'name': 'correlation',
176 'std': std_correlation,
177 'type': 'list_of_scalar_values',
178 'units': '',
179 'values': correlations,
180 },
181 },
182 'latency': {
183 'summary': {
184 'improvement_direction': 'down',
185 'name': 'latency',
186 'std': std_latency,
187 'type': 'list_of_scalar_values',
188 'units': 'ms',
189 'values': latencies,
190 },
191 }
192 }
193 } 280 }
194 281
195 with file(os.path.join(self.args.output_dir, 282 with file(os.path.join(self.args.output_dir,
196 self.args.results_file), 'w') as outfile: 283 self.args.results_file), 'w') as outfile:
197 json.dump(results, outfile) 284 json.dump(results, outfile)
OLDNEW
« no previous file with comments | « chrome/test/vr/perf/latency/run_latency_test.py ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698