Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 # Copyright 2017 The Chromium Authors. All rights reserved. | 1 # Copyright 2017 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 | 4 |
| 5 import motopho_thread as mt | 5 import motopho_thread as mt |
| 6 import robot_arm as ra | 6 import robot_arm as ra |
| 7 | 7 |
| 8 import json | 8 import json |
| 9 import glob | 9 import glob |
| 10 import logging | 10 import logging |
| 11 import numpy | 11 import numpy |
| 12 import os | 12 import os |
| 13 import re | 13 import re |
| 14 import subprocess | 14 import subprocess |
| 15 import sys | 15 import sys |
| 16 import time | 16 import time |
| 17 | 17 |
| 18 | 18 |
| 19 MOTOPHO_THREAD_TIMEOUT = 30 | 19 MOTOPHO_THREAD_TIMEOUT = 30 |
| 20 DEFAULT_URLS = [ | |
| 21 # TODO(bsheedy): See about having versioned copies of the flicker app | |
| 22 # instead of using personal github. | |
| 23 # Purely a flicker app - no additional CPU/GPU load | |
| 24 'https://weableandbob.github.io/Motopho/' | |
| 25 'flicker_apps/webvr/webvr-flicker-app-klaus.html?' | |
| 26 'polyfill=0\&canvasClickPresents=1', | |
| 27 # URLs that render 3D scenes in addition to the Motopho patch | |
| 28 # Heavy CPU load, moderate GPU load | |
| 29 'https://webvr.info/samples/test-slow-render.html?' | |
| 30 'latencyPatch=1\&canvasClickPresents=1\&' | |
| 31 'heavyGpu=1\&workTime=20\&cubeCount=8\&cubeScale=0.4', | |
| 32 # Moderate CPU load, light GPU load | |
| 33 'https://webvr.info/samples/test-slow-render.html?' | |
| 34 'latencyPatch=1\&canvasClickPresents=1\&' | |
| 35 'heavyGpu=1\&workTime=12\&cubeCount=8\&cubeScale=0.3', | |
| 36 # Light CPU load, moderate GPU load | |
| 37 'https://webvr.info/samples/test-slow-render.html?' | |
| 38 'latencyPatch=1\&canvasClickPresents=1\&' | |
| 39 'heavyGpu=1\&workTime=5\&cubeCount=8\&cubeScale=0.4', | |
| 40 # Heavy CPU load, very light GPU load | |
| 41 'https://webvr.info/samples/test-slow-render.html?' | |
| 42 'latencyPatch=1\&canvasClickPresents=1\&' | |
| 43 'workTime=20', | |
| 44 ] | |
| 20 | 45 |
| 21 | 46 |
| 22 def GetTtyDevices(tty_pattern, vendor_ids): | 47 def GetTtyDevices(tty_pattern, vendor_ids): |
| 23 """Finds all devices connected to tty that match a pattern and device id. | 48 """Finds all devices connected to tty that match a pattern and device id. |
| 24 | 49 |
| 25 If a serial device is connected to the computer via USB, this function | 50 If a serial device is connected to the computer via USB, this function |
| 26 will check all tty devices that match tty_pattern, and return the ones | 51 will check all tty devices that match tty_pattern, and return the ones |
| 27 that have vendor identification number in the list vendor_ids. | 52 that have vendor identification number in the list vendor_ids. |
| 28 | 53 |
| 29 Args: | 54 Args: |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 62 class WebVrLatencyTest(object): | 87 class WebVrLatencyTest(object): |
| 63 """Base class for all WebVR latency tests. | 88 """Base class for all WebVR latency tests. |
| 64 | 89 |
| 65 This is meant to be subclassed for each platform the test is run on. While | 90 This is meant to be subclassed for each platform the test is run on. While |
| 66 the latency test itself is cross-platform, the setup and teardown for | 91 the latency test itself is cross-platform, the setup and teardown for |
| 67 tests is platform-dependent. | 92 tests is platform-dependent. |
| 68 """ | 93 """ |
| 69 def __init__(self, args): | 94 def __init__(self, args): |
| 70 self.args = args | 95 self.args = args |
| 71 self._num_samples = args.num_samples | 96 self._num_samples = args.num_samples |
| 72 self._flicker_app_url = args.url | 97 self._test_urls = args.urls or DEFAULT_URLS |
| 73 assert (self._num_samples > 0),'Number of samples must be greater than 0' | 98 assert (self._num_samples > 0),'Number of samples must be greater than 0' |
| 74 self._device_name = 'generic_device' | 99 self._device_name = 'generic_device' |
| 100 self._test_results = {} | |
| 75 | 101 |
| 76 # Connect to the Arduino that drives the servos | 102 # Connect to the Arduino that drives the servos |
| 77 devices = GetTtyDevices(r'ttyACM\d+', [0x2a03, 0x2341]) | 103 devices = GetTtyDevices(r'ttyACM\d+', [0x2a03, 0x2341]) |
| 78 assert (len(devices) == 1),'Found %d devices, expected 1' % len(devices) | 104 assert (len(devices) == 1),'Found %d devices, expected 1' % len(devices) |
| 79 self.robot_arm = ra.RobotArm(devices[0]) | 105 self.robot_arm = ra.RobotArm(devices[0]) |
| 80 | 106 |
| 81 def RunTest(self): | 107 def RunTests(self): |
| 82 """Runs the steps to start Chrome, measure/save latency, and clean up.""" | 108 """Runs latency tests on all the URLs provided to the test on creation. |
| 83 self._Setup() | |
| 84 self._Run() | |
| 85 self._Teardown() | |
| 86 | 109 |
| 87 def _Setup(self): | 110 Repeatedly runs the steps to start Chrome, measure/store latency, and |
| 88 """Perform any platform-specific setup.""" | 111 clean up before storing all results to a single file for dashboard |
| 112 uploading. | |
| 113 """ | |
| 114 try: | |
| 115 self._OneTimeSetup() | |
| 116 for url in self._test_urls: | |
| 117 self._Setup(url) | |
| 118 self._Run(url) | |
| 119 self._Teardown() | |
| 120 self._SaveResultsToFile() | |
| 121 finally: | |
| 122 self._OneTimeTeardown() | |
| 123 | |
| 124 def _OneTimeSetup(self): | |
| 125 """Perform any platform-specific setup once before any tests.""" | |
| 89 raise NotImplementedError( | 126 raise NotImplementedError( |
| 90 'Platform-specific setup must be implemented in subclass') | 127 'Platform-specific setup must be implemented in subclass') |
| 91 | 128 |
| 92 def _Run(self): | 129 def _Setup(self, url): |
| 130 """Perform any platform-specific setup before each test.""" | |
|
Lei Lei
2017/05/24 01:04:47
s/Perform/Performs to keep consistent.
bsheedy
2017/05/25 17:46:23
Done.
| |
| 131 raise NotImplementedError( | |
| 132 'Platform-specific setup must be implemented in subclass') | |
| 133 | |
| 134 def _Run(self, url): | |
| 93 """Run the latency test. | 135 """Run the latency test. |
| 94 | 136 |
| 95 Handles the actual latency measurement, which is identical across | 137 Handles the actual latency measurement, which is identical across |
| 96 different platforms, as well as result saving. | 138 different platforms, as well as result storing. |
| 97 """ | 139 """ |
| 98 # Motopho scripts use relative paths, so switch to the Motopho directory | 140 # Motopho scripts use relative paths, so switch to the Motopho directory |
| 99 os.chdir(self.args.motopho_path) | 141 os.chdir(self.args.motopho_path) |
| 100 | 142 |
| 101 # Set up the thread that runs the Motopho script | 143 # Set up the thread that runs the Motopho script |
| 102 motopho_thread = mt.MotophoThread(self._num_samples) | 144 motopho_thread = mt.MotophoThread(self._num_samples) |
| 103 motopho_thread.start() | 145 motopho_thread.start() |
| 104 | 146 |
| 105 # Run multiple times so we can get an average and standard deviation | 147 # Run multiple times so we can get an average and standard deviation |
| 106 for _ in xrange(self._num_samples): | 148 for _ in xrange(self._num_samples): |
| 107 self.robot_arm.ResetPosition() | 149 self.robot_arm.ResetPosition() |
| 108 # Start the Motopho script | 150 # Start the Motopho script |
| 109 motopho_thread.StartIteration() | 151 motopho_thread.StartIteration() |
| 110 # Let the Motopho be stationary so the script can calculate the bias | 152 # Let the Motopho be stationary so the script can calculate the bias |
| 111 time.sleep(3) | 153 time.sleep(3) |
| 112 motopho_thread.BlockNextIteration() | 154 motopho_thread.BlockNextIteration() |
| 113 # Move so we can measure latency | 155 # Move so we can measure latency |
| 114 self.robot_arm.StartMotophoMovement() | 156 self.robot_arm.StartMotophoMovement() |
| 115 if not motopho_thread.WaitForIterationEnd(MOTOPHO_THREAD_TIMEOUT): | 157 if not motopho_thread.WaitForIterationEnd(MOTOPHO_THREAD_TIMEOUT): |
| 116 # TODO(bsheedy): Look into ways to prevent Motopho from not sending any | 158 # TODO(bsheedy): Look into ways to prevent Motopho from not sending any |
| 117 # data until unplugged and replugged into the machine after a reboot. | 159 # data until unplugged and replugged into the machine after a reboot. |
| 118 logging.error('Motopho thread timeout, ' | 160 logging.error('Motopho thread timeout, ' |
| 119 'Motopho may need to be replugged.') | 161 'Motopho may need to be replugged.') |
| 120 self.robot_arm.StopAllMovement() | 162 self.robot_arm.StopAllMovement() |
| 121 time.sleep(1) | 163 time.sleep(1) |
| 122 self._SaveResults(motopho_thread.latencies, motopho_thread.correlations) | 164 self._StoreResults(motopho_thread.latencies, motopho_thread.correlations, |
| 165 url) | |
| 123 | 166 |
| 124 def _Teardown(self): | 167 def _Teardown(self): |
| 125 """Performs any platform-specific teardown.""" | 168 """Performs any platform-specific teardown after each test.""" |
| 126 raise NotImplementedError( | 169 raise NotImplementedError( |
| 127 'Platform-specific setup must be implemented in subclass') | 170 'Platform-specific teardown must be implemented in subclass') |
| 171 | |
| 172 def _OneTimeTeardown(self): | |
| 173 """Performs any platform-specific teardown after all tests.""" | |
| 174 raise NotImplementedError( | |
| 175 'Platform-specific teardown must be implemented in sublcass') | |
| 128 | 176 |
| 129 def _RunCommand(self, cmd): | 177 def _RunCommand(self, cmd): |
| 130 """Runs the given cmd list and returns its output. | 178 """Runs the given cmd list and returns its output. |
| 131 | 179 |
| 132 Prints the command's output and exits if any error occurs. | 180 Prints the command's output and exits if any error occurs. |
| 133 | 181 |
| 134 Returns: | 182 Returns: |
| 135 A string containing the stdout and stderr of the command. | 183 A string containing the stdout and stderr of the command. |
| 136 """ | 184 """ |
| 137 try: | 185 try: |
| 138 return subprocess.check_output(cmd, stderr=subprocess.STDOUT) | 186 return subprocess.check_output(cmd, stderr=subprocess.STDOUT) |
| 139 except subprocess.CalledProcessError as e: | 187 except subprocess.CalledProcessError as e: |
| 140 logging.error('Failed command output: %s', e.output) | 188 logging.error('Failed command output: %s', e.output) |
| 141 raise e | 189 raise e |
| 142 | 190 |
| 143 def _SetChromeCommandLineFlags(self, flags): | 191 def _SetChromeCommandLineFlags(self, flags): |
| 144 raise NotImplementedError( | 192 raise NotImplementedError( |
| 145 'Command-line flag setting must be implemented in subclass') | 193 'Command-line flag setting must be implemented in subclass') |
| 146 | 194 |
| 147 def _SaveResults(self, latencies, correlations): | 195 def _StoreResults(self, latencies, correlations, url): |
| 148 """Saves the results to a JSON file. | 196 """Temporarily stores the results of a test. |
| 149 | 197 |
| 150 Saved JSON object is compatible with Chrome perf dashboard if | 198 Stores the given results in memory to be later retrieved and written to |
| 151 put in as the 'chart_data' value. Also logs the raw data and its | 199 a file in _SaveResultsToFile once all tests are done. Also logs the raw |
| 152 average/standard deviation. | 200 data and its average/standard deviation. |
| 153 """ | 201 """ |
| 154 avg_latency = sum(latencies) / len(latencies) | 202 avg_latency = sum(latencies) / len(latencies) |
| 155 std_latency = numpy.std(latencies) | 203 std_latency = numpy.std(latencies) |
| 156 avg_correlation = sum(correlations) / len(correlations) | 204 avg_correlation = sum(correlations) / len(correlations) |
| 157 std_correlation = numpy.std(correlations) | 205 std_correlation = numpy.std(correlations) |
| 158 logging.info('Raw latencies: %s\nRaw correlations: %s\n' | 206 logging.info('\nURL: %s\n' |
| 207 'Raw latencies: %s\nRaw correlations: %s\n' | |
| 159 'Avg latency: %f +/- %f\nAvg correlation: %f +/- %f', | 208 'Avg latency: %f +/- %f\nAvg correlation: %f +/- %f', |
| 160 str(latencies), str(correlations), avg_latency, std_latency, | 209 url, str(latencies), str(correlations), avg_latency, |
| 161 avg_correlation, std_correlation) | 210 std_latency, avg_correlation, std_correlation) |
| 162 | 211 |
| 212 self._test_results[url] = { | |
| 213 'correlations': correlations, | |
| 214 'std_correlation': std_correlation, | |
| 215 'latencies': latencies, | |
| 216 'std_latency': std_latency, | |
| 217 } | |
| 218 | |
| 219 def _SaveResultsToFile(self): | |
| 163 if not (self.args.output_dir and os.path.isdir(self.args.output_dir)): | 220 if not (self.args.output_dir and os.path.isdir(self.args.output_dir)): |
| 164 logging.warning('No output directory set, not saving results to file') | 221 logging.warning('No output directory set, not saving results to file') |
| 165 return | 222 return |
| 166 | 223 |
| 224 correlation_string = self._device_name + '_correlation' | |
| 225 latency_string = self._device_name + '_latency' | |
| 226 charts = { | |
| 227 correlation_string: { | |
| 228 'summary': { | |
| 229 'improvement_direction': 'up', | |
| 230 'name': correlation_string, | |
| 231 'std': 0.0, | |
| 232 'type': 'list_of_scalar_values', | |
| 233 'units': '', | |
| 234 'values': [], | |
| 235 } | |
| 236 }, | |
| 237 latency_string: { | |
| 238 'summary': { | |
| 239 'improvement_direction': 'down', | |
| 240 'name': latency_string, | |
| 241 'std': 0.0, | |
| 242 'type': 'list_of_scalar_values', | |
| 243 'units': 'ms', | |
| 244 'values': [], | |
| 245 } | |
| 246 } | |
| 247 } | |
| 248 for url, results in self._test_results.iteritems(): | |
| 249 charts[correlation_string][url] = { | |
| 250 'improvement_direction': 'up', | |
| 251 'name': correlation_string, | |
| 252 'std': results['std_correlation'], | |
| 253 'type': 'list_of_scalar_values', | |
| 254 'units': '', | |
| 255 'values': results['correlations'], | |
| 256 } | |
| 257 | |
| 258 charts[correlation_string]['summary']['values'].extend( | |
| 259 results['correlations']) | |
| 260 | |
| 261 charts[latency_string][url] = { | |
| 262 'improvement_direction': 'down', | |
| 263 'name': latency_string, | |
| 264 'std': results['std_latency'], | |
| 265 'type': 'list_of_scalar_values', | |
| 266 'units': 'ms', | |
| 267 'values': results['latencies'], | |
| 268 } | |
| 269 | |
| 270 charts[latency_string]['summary']['values'].extend(results['latencies']) | |
| 271 | |
| 167 results = { | 272 results = { |
| 168 'format_version': '1.0', | 273 'format_version': '1.0', |
| 169 'benchmark_name': 'webvr_latency', | 274 'benchmark_name': 'webvr_latency', |
| 170 'benchmark_description': 'Measures the motion-to-photon latency of WebVR', | 275 'benchmark_description': 'Measures the motion-to-photon latency of WebVR', |
| 171 'charts': { | 276 'charts': charts, |
| 172 'correlation': { | |
| 173 'summary': { | |
| 174 'improvement_direction': 'up', | |
| 175 'name': 'correlation', | |
| 176 'std': std_correlation, | |
| 177 'type': 'list_of_scalar_values', | |
| 178 'units': '', | |
| 179 'values': correlations, | |
| 180 }, | |
| 181 }, | |
| 182 'latency': { | |
| 183 'summary': { | |
| 184 'improvement_direction': 'down', | |
| 185 'name': 'latency', | |
| 186 'std': std_latency, | |
| 187 'type': 'list_of_scalar_values', | |
| 188 'units': 'ms', | |
| 189 'values': latencies, | |
| 190 }, | |
| 191 } | |
| 192 } | |
| 193 } | 277 } |
| 194 | 278 |
| 195 with file(os.path.join(self.args.output_dir, | 279 with file(os.path.join(self.args.output_dir, |
| 196 self.args.results_file), 'w') as outfile: | 280 self.args.results_file), 'w') as outfile: |
| 197 json.dump(results, outfile) | 281 json.dump(results, outfile) |
| OLD | NEW |