 Chromium Code Reviews
 Chromium Code Reviews Issue 1414403002:
  Add CommandLineArgumentParameter and use it for WebView tests  (Closed) 
  Base URL: https://chromium.googlesource.com/chromium/src.git@fix-cr-526885-read-nested-annotations
    
  
    Issue 1414403002:
  Add CommandLineArgumentParameter and use it for WebView tests  (Closed) 
  Base URL: https://chromium.googlesource.com/chromium/src.git@fix-cr-526885-read-nested-annotations| OLD | NEW | 
|---|---|
| 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | 
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be | 
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. | 
| 4 | 4 | 
| 5 """Class for running instrumentation tests on a single device.""" | 5 """Class for running instrumentation tests on a single device.""" | 
| 6 | 6 | 
| 7 import logging | 7 import logging | 
| 8 import os | 8 import os | 
| 9 import re | 9 import re | 
| 10 import sys | 10 import sys | 
| 11 import time | 11 import time | 
| 12 from collections import namedtuple | |
| 
jbudorick
2015/11/04 16:34:34
nit: same module import nit
 
mnaganov (inactive)
2015/11/04 17:51:03
Done.
 | |
| 12 | 13 | 
| 13 from devil.android import device_errors | 14 from devil.android import device_errors | 
| 14 from pylib import constants | 15 from pylib import constants | 
| 15 from pylib import flag_changer | 16 from pylib import flag_changer | 
| 16 from pylib import valgrind_tools | 17 from pylib import valgrind_tools | 
| 17 from pylib.base import base_test_result | 18 from pylib.base import base_test_result | 
| 18 from pylib.base import base_test_runner | 19 from pylib.base import base_test_runner | 
| 19 from pylib.instrumentation import instrumentation_test_instance | 20 from pylib.instrumentation import instrumentation_test_instance | 
| 20 from pylib.instrumentation import json_perf_parser | 21 from pylib.instrumentation import json_perf_parser | 
| 21 from pylib.instrumentation import test_result | 22 from pylib.instrumentation import test_result | 
| (...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 116 stripped_flags = (l.strip() for l in device_flags_file) | 117 stripped_flags = (l.strip() for l in device_flags_file) | 
| 117 flags_to_add.extend([flag for flag in stripped_flags if flag]) | 118 flags_to_add.extend([flag for flag in stripped_flags if flag]) | 
| 118 self.flags.AddFlags(flags_to_add) | 119 self.flags.AddFlags(flags_to_add) | 
| 119 | 120 | 
| 120 def TearDown(self): | 121 def TearDown(self): | 
| 121 """Cleans up the test harness and saves outstanding data from test run.""" | 122 """Cleans up the test harness and saves outstanding data from test run.""" | 
| 122 if self.flags: | 123 if self.flags: | 
| 123 self.flags.Restore() | 124 self.flags.Restore() | 
| 124 super(TestRunner, self).TearDown() | 125 super(TestRunner, self).TearDown() | 
| 125 | 126 | 
| 126 def TestSetup(self, test): | 127 def TestSetup(self, test, flag_modifiers): | 
| 127 """Sets up the test harness for running a particular test. | 128 """Sets up the test harness for running a particular test. | 
| 128 | 129 | 
| 129 Args: | 130 Args: | 
| 130 test: The name of the test that will be run. | 131 test: The name of the test that will be run. | 
| 131 """ | 132 """ | 
| 132 self.SetupPerfMonitoringIfNeeded(test) | 133 self.SetupPerfMonitoringIfNeeded(test) | 
| 133 self._SetupIndividualTestTimeoutScale(test) | 134 self._SetupIndividualTestTimeoutScale(test) | 
| 134 self.tool.SetupEnvironment() | 135 self.tool.SetupEnvironment() | 
| 135 | 136 | 
| 136 if self.flags and self._IsFreTest(test): | 137 if self.flags: | 
| 137 self.flags.RemoveFlags(['--disable-fre']) | 138 self.flags.PushFlags(add=flag_modifiers.add, remove=flag_modifiers.remove) | 
| 138 | 139 | 
| 139 # Make sure the forwarder is still running. | 140 # Make sure the forwarder is still running. | 
| 140 self._RestartHttpServerForwarderIfNecessary() | 141 self._RestartHttpServerForwarderIfNecessary() | 
| 141 | 142 | 
| 142 if self.coverage_dir: | 143 if self.coverage_dir: | 
| 143 coverage_basename = '%s.ec' % test | 144 coverage_basename = '%s.ec' % test | 
| 144 self.coverage_device_file = '%s/%s/%s' % ( | 145 self.coverage_device_file = '%s/%s/%s' % ( | 
| 145 self.device.GetExternalStoragePath(), | 146 self.device.GetExternalStoragePath(), | 
| 146 TestRunner._DEVICE_COVERAGE_DIR, coverage_basename) | 147 TestRunner._DEVICE_COVERAGE_DIR, coverage_basename) | 
| 147 self.coverage_host_file = os.path.join( | 148 self.coverage_host_file = os.path.join( | 
| (...skipping 16 matching lines...) Expand all Loading... | |
| 164 """Determines whether a test is a performance test. | 165 """Determines whether a test is a performance test. | 
| 165 | 166 | 
| 166 Args: | 167 Args: | 
| 167 test: The name of the test to be checked. | 168 test: The name of the test to be checked. | 
| 168 | 169 | 
| 169 Returns: | 170 Returns: | 
| 170 Whether the test is annotated as a performance test. | 171 Whether the test is annotated as a performance test. | 
| 171 """ | 172 """ | 
| 172 return _PERF_TEST_ANNOTATION in self.test_pkg.GetTestAnnotations(test) | 173 return _PERF_TEST_ANNOTATION in self.test_pkg.GetTestAnnotations(test) | 
| 173 | 174 | 
| 175 def _GetTestCmdlineParameters(self, test): | |
| 176 """Determines whether the test is parameterized to be run with different | |
| 177 command-line flags. | |
| 178 | |
| 179 Args: | |
| 180 test: The name of the test to be checked. | |
| 181 | |
| 182 Returns: | |
| 183 The list of parameters. | |
| 184 """ | |
| 185 annotations = self.test_pkg.GetTestAnnotations(test) | |
| 186 params = instrumentation_test_instance.ParseCommandLineFlagParameters( | |
| 187 annotations) | |
| 188 if not params: | |
| 189 params = [namedtuple('Dummy', ['add', 'remove'])([], [])] | |
| 190 return params | |
| 191 | |
| 174 def SetupPerfMonitoringIfNeeded(self, test): | 192 def SetupPerfMonitoringIfNeeded(self, test): | 
| 175 """Sets up performance monitoring if the specified test requires it. | 193 """Sets up performance monitoring if the specified test requires it. | 
| 176 | 194 | 
| 177 Args: | 195 Args: | 
| 178 test: The name of the test to be run. | 196 test: The name of the test to be run. | 
| 179 """ | 197 """ | 
| 180 if not self._IsPerfTest(test): | 198 if not self._IsPerfTest(test): | 
| 181 return | 199 return | 
| 182 self.device.RunShellCommand( | 200 self.device.RunShellCommand( | 
| 183 ['rm', TestRunner._DEVICE_PERF_OUTPUT_SEARCH_PREFIX]) | 201 ['rm', TestRunner._DEVICE_PERF_OUTPUT_SEARCH_PREFIX]) | 
| 184 self._logcat_monitor = self.device.GetLogcatMonitor() | 202 self._logcat_monitor = self.device.GetLogcatMonitor() | 
| 185 self._logcat_monitor.Start() | 203 self._logcat_monitor.Start() | 
| 186 | 204 | 
| 187 def TestTeardown(self, test, result): | 205 def TestTeardown(self, test, results): | 
| 188 """Cleans up the test harness after running a particular test. | 206 """Cleans up the test harness after running a particular test. | 
| 189 | 207 | 
| 190 Depending on the options of this TestRunner this might handle performance | 208 Depending on the options of this TestRunner this might handle performance | 
| 191 tracking. This method will only be called if the test passed. | 209 tracking. This method will only be called if the test passed. | 
| 192 | 210 | 
| 193 Args: | 211 Args: | 
| 194 test: The name of the test that was just run. | 212 test: The name of the test that was just run. | 
| 195 result: result for this test. | 213 results: results for this test. | 
| 196 """ | 214 """ | 
| 197 | 215 | 
| 198 self.tool.CleanUpEnvironment() | 216 self.tool.CleanUpEnvironment() | 
| 199 | 217 | 
| 200 if self.flags and self._IsFreTest(test): | 218 if self.flags: | 
| 201 self.flags.Restore() | 219 self.flags.Restore() | 
| 202 | 220 | 
| 203 if not result: | 221 if not results: | 
| 204 return | 222 return | 
| 205 if result.DidRunPass(): | 223 if results.DidRunPass(): | 
| 206 self.TearDownPerfMonitoring(test) | 224 self.TearDownPerfMonitoring(test) | 
| 207 | 225 | 
| 208 if self.coverage_dir: | 226 if self.coverage_dir: | 
| 209 self.device.PullFile( | 227 self.device.PullFile( | 
| 210 self.coverage_device_file, self.coverage_host_file) | 228 self.coverage_device_file, self.coverage_host_file) | 
| 211 self.device.RunShellCommand( | 229 self.device.RunShellCommand( | 
| 212 'rm -f %s' % self.coverage_device_file) | 230 'rm -f %s' % self.coverage_device_file) | 
| 213 elif self.package_info: | 231 elif self.package_info: | 
| 214 self.device.ClearApplicationState(self.package_info.package) | 232 self.device.ClearApplicationState(self.package_info.package) | 
| 215 | 233 | 
| (...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 338 return test_result.InstrumentationTestResult( | 356 return test_result.InstrumentationTestResult( | 
| 339 test, base_test_result.ResultType.UNKNOWN, start_ms, duration_ms) | 357 test, base_test_result.ResultType.UNKNOWN, start_ms, duration_ms) | 
| 340 | 358 | 
| 341 #override | 359 #override | 
| 342 def RunTest(self, test): | 360 def RunTest(self, test): | 
| 343 results = base_test_result.TestRunResults() | 361 results = base_test_result.TestRunResults() | 
| 344 timeout = (self._GetIndividualTestTimeoutSecs(test) * | 362 timeout = (self._GetIndividualTestTimeoutSecs(test) * | 
| 345 self._GetIndividualTestTimeoutScale(test) * | 363 self._GetIndividualTestTimeoutScale(test) * | 
| 346 self.tool.GetTimeoutScale()) | 364 self.tool.GetTimeoutScale()) | 
| 347 | 365 | 
| 348 start_ms = 0 | 366 cmdline_parameters = self._GetTestCmdlineParameters(test) | 
| 349 duration_ms = 0 | 367 for flag_modifiers in cmdline_parameters: | 
| 350 try: | 368 start_ms = 0 | 
| 351 self.TestSetup(test) | 369 duration_ms = 0 | 
| 370 try: | |
| 371 if self._IsFreTest(test): | |
| 372 flag_modifiers.remove.append('--disable-fre') | |
| 373 self.TestSetup(test, flag_modifiers) | |
| 352 | 374 | 
| 353 try: | 375 try: | 
| 354 self.device.GoHome() | 376 self.device.GoHome() | 
| 355 except device_errors.CommandTimeoutError: | 377 except device_errors.CommandTimeoutError: | 
| 356 logging.exception('Failed to focus the launcher.') | 378 logging.exception('Failed to focus the launcher.') | 
| 357 | 379 | 
| 358 time_ms = lambda: int(time.time() * 1000) | 380 time_ms = lambda: int(time.time() * 1000) | 
| 359 start_ms = time_ms() | 381 start_ms = time_ms() | 
| 360 raw_output = self._RunTest(test, timeout) | 382 raw_output = self._RunTest(test, timeout) | 
| 361 duration_ms = time_ms() - start_ms | 383 duration_ms = time_ms() - start_ms | 
| 362 | 384 | 
| 363 # Parse the test output | 385 # Parse the test output | 
| 364 result_code, result_bundle, statuses = ( | 386 result_code, result_bundle, statuses = ( | 
| 365 instrumentation_test_instance.ParseAmInstrumentRawOutput(raw_output)) | 387 instrumentation_test_instance.ParseAmInstrumentRawOutput( | 
| 366 result = self._GenerateTestResult( | 388 raw_output)) | 
| 367 test, result_code, result_bundle, statuses, start_ms, duration_ms) | 389 result = self._GenerateTestResult( | 
| 368 if local_device_instrumentation_test_run.DidPackageCrashOnDevice( | 390 test, result_code, result_bundle, statuses, start_ms, duration_ms) | 
| 369 self.test_pkg.GetPackageName(), self.device): | 391 if local_device_instrumentation_test_run.DidPackageCrashOnDevice( | 
| 370 result.SetType(base_test_result.ResultType.CRASH) | 392 self.test_pkg.GetPackageName(), self.device): | 
| 393 result.SetType(base_test_result.ResultType.CRASH) | |
| 394 except device_errors.CommandTimeoutError as e: | |
| 395 result = test_result.InstrumentationTestResult( | |
| 396 test, base_test_result.ResultType.TIMEOUT, start_ms, duration_ms, | |
| 397 log=str(e) or 'No information') | |
| 398 if self.package_info: | |
| 399 self.device.ForceStop(self.package_info.package) | |
| 400 self.device.ForceStop(self.package_info.test_package) | |
| 401 except device_errors.DeviceUnreachableError as e: | |
| 402 result = test_result.InstrumentationTestResult( | |
| 403 test, base_test_result.ResultType.CRASH, start_ms, duration_ms, | |
| 404 log=str(e) or 'No information') | |
| 405 if len(cmdline_parameters) > 1: | |
| 406 # Specify commandline flag modifications used in the test run | |
| 407 result_name = result.GetName() | |
| 408 if flag_modifiers.add: | |
| 409 result_name = '%s with {%s}' % ( | |
| 410 result_name, ' '.join(flag_modifiers.add)) | |
| 411 if flag_modifiers.remove: | |
| 412 result_name = '%s without {%s}' % ( | |
| 413 result_name, ' '.join(flag_modifiers.remove)) | |
| 414 result.SetName(result_name) | |
| 371 results.AddResult(result) | 415 results.AddResult(result) | 
| 372 except device_errors.CommandTimeoutError as e: | 416 | 
| 373 results.AddResult(test_result.InstrumentationTestResult( | 417 self.TestTeardown(test, results) | 
| 374 test, base_test_result.ResultType.TIMEOUT, start_ms, duration_ms, | 418 | 
| 375 log=str(e) or 'No information')) | |
| 376 if self.package_info: | |
| 377 self.device.ForceStop(self.package_info.package) | |
| 378 self.device.ForceStop(self.package_info.test_package) | |
| 379 except device_errors.DeviceUnreachableError as e: | |
| 380 results.AddResult(test_result.InstrumentationTestResult( | |
| 381 test, base_test_result.ResultType.CRASH, start_ms, duration_ms, | |
| 382 log=str(e) or 'No information')) | |
| 383 self.TestTeardown(test, results) | |
| 384 return (results, None if results.DidRunPass() else test) | 419 return (results, None if results.DidRunPass() else test) | 
| OLD | NEW |