 Chromium Code Reviews
 Chromium Code Reviews Issue 1414403002:
  Add CommandLineArgumentParameter and use it for WebView tests  (Closed) 
  Base URL: https://chromium.googlesource.com/chromium/src.git@fix-cr-526885-read-nested-annotations
    
  
    Issue 1414403002:
  Add CommandLineArgumentParameter and use it for WebView tests  (Closed) 
  Base URL: https://chromium.googlesource.com/chromium/src.git@fix-cr-526885-read-nested-annotations| OLD | NEW | 
|---|---|
| 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | 
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be | 
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. | 
| 4 | 4 | 
| 5 """Class for running instrumentation tests on a single device.""" | 5 """Class for running instrumentation tests on a single device.""" | 
| 6 | 6 | 
| 7 import logging | 7 import logging | 
| 8 import os | 8 import os | 
| 9 import re | 9 import re | 
| 10 import sys | 10 import sys | 
| 11 import time | 11 import time | 
| 12 | 12 | 
| 13 from devil.android import device_errors | 13 from devil.android import device_errors | 
| 14 from pylib import constants | 14 from pylib import constants | 
| 15 from pylib import flag_changer | 15 from pylib import flag_changer | 
| 16 from pylib import valgrind_tools | 16 from pylib import valgrind_tools | 
| 17 from pylib.base import base_test_result | 17 from pylib.base import base_test_result | 
| 18 from pylib.base import base_test_runner | 18 from pylib.base import base_test_runner | 
| 19 from pylib.instrumentation import instrumentation_test_instance | 19 from pylib.instrumentation import instrumentation_test_instance | 
| 20 from pylib.instrumentation import json_perf_parser | 20 from pylib.instrumentation import json_perf_parser | 
| 21 from pylib.instrumentation import test_result | 21 from pylib.instrumentation import test_result | 
| 22 from pylib.local.device import local_device_instrumentation_test_run | 22 from pylib.local.device import local_device_instrumentation_test_run | 
| 23 | 23 | 
| 24 sys.path.append(os.path.join(constants.DIR_SOURCE_ROOT, 'build', 'util', 'lib', | 24 sys.path.append(os.path.join(constants.DIR_SOURCE_ROOT, 'build', 'util', 'lib', | 
| 25 'common')) | 25 'common')) | 
| 26 import perf_tests_results_helper # pylint: disable=F0401 | 26 import perf_tests_results_helper # pylint: disable=F0401 | 
| 27 | 27 | 
| 28 | 28 | 
| 29 _PERF_TEST_ANNOTATION = 'PerfTest' | 29 _PERF_TEST_ANNOTATION = 'PerfTest' | 
| 30 _PARAMETERIZED_TEST_ANNOTATION = 'ParameterizedTest' | |
| 31 _PARAMETERIZED_TEST_SET_ANNOTATION = 'ParameterizedTest$Set' | |
| 32 _COMMAND_LINE_PARAMETER = 'cmdlinearg-parameter' | |
| 30 | 33 | 
| 31 | 34 | 
| 32 class TestRunner(base_test_runner.BaseTestRunner): | 35 class TestRunner(base_test_runner.BaseTestRunner): | 
| 
jbudorick
2015/10/27 16:14:37
beware that I'm actively working on deprecating th
 
mnaganov (inactive)
2015/10/27 23:53:30
No problem, how do you run it?
 
jbudorick
2015/10/28 16:36:35
It's a bit verbose at the moment, but I'm working
 | |
| 33 """Responsible for running a series of tests connected to a single device.""" | 36 """Responsible for running a series of tests connected to a single device.""" | 
| 34 | 37 | 
| 35 _DEVICE_COVERAGE_DIR = 'chrome/test/coverage' | 38 _DEVICE_COVERAGE_DIR = 'chrome/test/coverage' | 
| 36 _HOSTMACHINE_PERF_OUTPUT_FILE = '/tmp/chrome-profile' | 39 _HOSTMACHINE_PERF_OUTPUT_FILE = '/tmp/chrome-profile' | 
| 37 _DEVICE_PERF_OUTPUT_SEARCH_PREFIX = (constants.DEVICE_PERF_OUTPUT_DIR + | 40 _DEVICE_PERF_OUTPUT_SEARCH_PREFIX = (constants.DEVICE_PERF_OUTPUT_DIR + | 
| 38 '/chrome-profile*') | 41 '/chrome-profile*') | 
| 39 | 42 | 
| 40 def __init__(self, test_options, device, shard_index, test_pkg, | 43 def __init__(self, test_options, device, shard_index, test_pkg, | 
| 41 additional_flags=None): | 44 additional_flags=None): | 
| 42 """Create a new TestRunner. | 45 """Create a new TestRunner. | 
| (...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 116 stripped_flags = (l.strip() for l in device_flags_file) | 119 stripped_flags = (l.strip() for l in device_flags_file) | 
| 117 flags_to_add.extend([flag for flag in stripped_flags if flag]) | 120 flags_to_add.extend([flag for flag in stripped_flags if flag]) | 
| 118 self.flags.AddFlags(flags_to_add) | 121 self.flags.AddFlags(flags_to_add) | 
| 119 | 122 | 
| 120 def TearDown(self): | 123 def TearDown(self): | 
| 121 """Cleans up the test harness and saves outstanding data from test run.""" | 124 """Cleans up the test harness and saves outstanding data from test run.""" | 
| 122 if self.flags: | 125 if self.flags: | 
| 123 self.flags.Restore() | 126 self.flags.Restore() | 
| 124 super(TestRunner, self).TearDown() | 127 super(TestRunner, self).TearDown() | 
| 125 | 128 | 
| 126 def TestSetup(self, test): | 129 def TestSetup(self, test, flag_modifiers): | 
| 127 """Sets up the test harness for running a particular test. | 130 """Sets up the test harness for running a particular test. | 
| 128 | 131 | 
| 129 Args: | 132 Args: | 
| 130 test: The name of the test that will be run. | 133 test: The name of the test that will be run. | 
| 131 """ | 134 """ | 
| 132 self.SetupPerfMonitoringIfNeeded(test) | 135 self.SetupPerfMonitoringIfNeeded(test) | 
| 133 self._SetupIndividualTestTimeoutScale(test) | 136 self._SetupIndividualTestTimeoutScale(test) | 
| 134 self.tool.SetupEnvironment() | 137 self.tool.SetupEnvironment() | 
| 135 | 138 | 
| 136 if self.flags and self._IsFreTest(test): | 139 if self.flags: | 
| 137 self.flags.RemoveFlags(['--disable-fre']) | 140 self.flags.PushFlags(add=flag_modifiers[0], remove=flag_modifiers[1]) | 
| 138 | 141 | 
| 139 # Make sure the forwarder is still running. | 142 # Make sure the forwarder is still running. | 
| 140 self._RestartHttpServerForwarderIfNecessary() | 143 self._RestartHttpServerForwarderIfNecessary() | 
| 141 | 144 | 
| 142 if self.coverage_dir: | 145 if self.coverage_dir: | 
| 143 coverage_basename = '%s.ec' % test | 146 coverage_basename = '%s.ec' % test | 
| 144 self.coverage_device_file = '%s/%s/%s' % ( | 147 self.coverage_device_file = '%s/%s/%s' % ( | 
| 145 self.device.GetExternalStoragePath(), | 148 self.device.GetExternalStoragePath(), | 
| 146 TestRunner._DEVICE_COVERAGE_DIR, coverage_basename) | 149 TestRunner._DEVICE_COVERAGE_DIR, coverage_basename) | 
| 147 self.coverage_host_file = os.path.join( | 150 self.coverage_host_file = os.path.join( | 
| (...skipping 16 matching lines...) Expand all Loading... | |
| 164 """Determines whether a test is a performance test. | 167 """Determines whether a test is a performance test. | 
| 165 | 168 | 
| 166 Args: | 169 Args: | 
| 167 test: The name of the test to be checked. | 170 test: The name of the test to be checked. | 
| 168 | 171 | 
| 169 Returns: | 172 Returns: | 
| 170 Whether the test is annotated as a performance test. | 173 Whether the test is annotated as a performance test. | 
| 171 """ | 174 """ | 
| 172 return _PERF_TEST_ANNOTATION in self.test_pkg.GetTestAnnotations(test) | 175 return _PERF_TEST_ANNOTATION in self.test_pkg.GetTestAnnotations(test) | 
| 173 | 176 | 
| 177 def _GetTestCmdlineParameters(self, test): | |
| 178 """Determines whether the test is parameterized to be run with different | |
| 179 command-line flags. | |
| 180 | |
| 181 Args: | |
| 182 test: The name of the test to be checked. | |
| 183 | |
| 184 Returns: | |
| 185 If the test is parameterized, returns a list of add/remove tuples | |
| 186 with lists of flags, e.g.: | |
| 187 | |
| 188 [(["--flag-to-add"], []), ([], ["--flag-to-remove"]), ([], [])] | |
| 
jbudorick
2015/10/27 16:14:36
This would be more readable if it returned a colle
 
mnaganov (inactive)
2015/10/27 23:53:30
Thanks! I'm learning neat Python tricks with you :
 | |
| 189 | |
| 190 That means, the test must be run three times, the first time with | |
| 191 "--flag-to-add" added to command-line, the second time with | |
| 192 "--flag-to-remove" to be removed from command-line, and the third time | |
| 193 with default command-line args. If the same flag is listed in both | |
| 194 sections, it is left unchanged. | |
| 195 | |
| 196 """ | |
| 197 parameterized_tests = [] | |
| 198 annotations = self.test_pkg.GetTestAnnotations(test) | |
| 199 if _PARAMETERIZED_TEST_ANNOTATION in annotations: | |
| 200 parameterized_tests = [annotations[_PARAMETERIZED_TEST_ANNOTATION]] | |
| 201 elif _PARAMETERIZED_TEST_SET_ANNOTATION in annotations: | |
| 202 if annotations[_PARAMETERIZED_TEST_SET_ANNOTATION]: | |
| 203 parameterized_tests = annotations[ | |
| 204 _PARAMETERIZED_TEST_SET_ANNOTATION].get('tests', []) | |
| 205 else: | |
| 206 return [([], [])] | |
| 207 | |
| 208 result = [] | |
| 209 for pt in parameterized_tests: | |
| 210 if not pt: | |
| 211 continue | |
| 212 for p in pt['parameters']: | |
| 213 if p['tag'] == _COMMAND_LINE_PARAMETER: | |
| 214 to_add = [] | |
| 215 to_remove = [] | |
| 216 if 'arguments' in p: | |
| 217 for a in p['arguments']: | |
| 218 if a['name'] == 'add': | |
| 219 to_add = a['stringArray'] | |
| 220 elif a['name'] == 'remove': | |
| 221 to_remove = a['stringArray'] | |
| 222 result.append((to_add, to_remove)) | |
| 223 return result if result else [([], [])] | |
| 224 | |
| 174 def SetupPerfMonitoringIfNeeded(self, test): | 225 def SetupPerfMonitoringIfNeeded(self, test): | 
| 175 """Sets up performance monitoring if the specified test requires it. | 226 """Sets up performance monitoring if the specified test requires it. | 
| 176 | 227 | 
| 177 Args: | 228 Args: | 
| 178 test: The name of the test to be run. | 229 test: The name of the test to be run. | 
| 179 """ | 230 """ | 
| 180 if not self._IsPerfTest(test): | 231 if not self._IsPerfTest(test): | 
| 181 return | 232 return | 
| 182 self.device.RunShellCommand( | 233 self.device.RunShellCommand( | 
| 183 ['rm', TestRunner._DEVICE_PERF_OUTPUT_SEARCH_PREFIX]) | 234 ['rm', TestRunner._DEVICE_PERF_OUTPUT_SEARCH_PREFIX]) | 
| 184 self._logcat_monitor = self.device.GetLogcatMonitor() | 235 self._logcat_monitor = self.device.GetLogcatMonitor() | 
| 185 self._logcat_monitor.Start() | 236 self._logcat_monitor.Start() | 
| 186 | 237 | 
| 187 def TestTeardown(self, test, result): | 238 def TestTeardown(self, test, results): | 
| 188 """Cleans up the test harness after running a particular test. | 239 """Cleans up the test harness after running a particular test. | 
| 189 | 240 | 
| 190 Depending on the options of this TestRunner this might handle performance | 241 Depending on the options of this TestRunner this might handle performance | 
| 191 tracking. This method will only be called if the test passed. | 242 tracking. This method will only be called if the test passed. | 
| 192 | 243 | 
| 193 Args: | 244 Args: | 
| 194 test: The name of the test that was just run. | 245 test: The name of the test that was just run. | 
| 195 result: result for this test. | 246 results: results for this test. | 
| 196 """ | 247 """ | 
| 197 | 248 | 
| 198 self.tool.CleanUpEnvironment() | 249 self.tool.CleanUpEnvironment() | 
| 199 | 250 | 
| 200 if self.flags and self._IsFreTest(test): | 251 if self.flags: | 
| 201 self.flags.Restore() | 252 self.flags.Restore() | 
| 202 | 253 | 
| 203 if not result: | 254 if not results: | 
| 204 return | 255 return | 
| 205 if result.DidRunPass(): | 256 if results.DidRunPass(): | 
| 206 self.TearDownPerfMonitoring(test) | 257 self.TearDownPerfMonitoring(test) | 
| 207 | 258 | 
| 208 if self.coverage_dir: | 259 if self.coverage_dir: | 
| 209 self.device.PullFile( | 260 self.device.PullFile( | 
| 210 self.coverage_device_file, self.coverage_host_file) | 261 self.coverage_device_file, self.coverage_host_file) | 
| 211 self.device.RunShellCommand( | 262 self.device.RunShellCommand( | 
| 212 'rm -f %s' % self.coverage_device_file) | 263 'rm -f %s' % self.coverage_device_file) | 
| 213 elif self.package_info: | 264 elif self.package_info: | 
| 214 self.device.ClearApplicationState(self.package_info.package) | 265 self.device.ClearApplicationState(self.package_info.package) | 
| 215 | 266 | 
| (...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 338 return test_result.InstrumentationTestResult( | 389 return test_result.InstrumentationTestResult( | 
| 339 test, base_test_result.ResultType.UNKNOWN, start_ms, duration_ms) | 390 test, base_test_result.ResultType.UNKNOWN, start_ms, duration_ms) | 
| 340 | 391 | 
| 341 #override | 392 #override | 
| 342 def RunTest(self, test): | 393 def RunTest(self, test): | 
| 343 results = base_test_result.TestRunResults() | 394 results = base_test_result.TestRunResults() | 
| 344 timeout = (self._GetIndividualTestTimeoutSecs(test) * | 395 timeout = (self._GetIndividualTestTimeoutSecs(test) * | 
| 345 self._GetIndividualTestTimeoutScale(test) * | 396 self._GetIndividualTestTimeoutScale(test) * | 
| 346 self.tool.GetTimeoutScale()) | 397 self.tool.GetTimeoutScale()) | 
| 347 | 398 | 
| 348 start_ms = 0 | 399 cmdline_parameters = self._GetTestCmdlineParameters(test) | 
| 
jbudorick
2015/10/27 16:14:37
This should be part of test list generation, not t
 
mnaganov (inactive)
2015/10/27 23:53:30
But... isn't test list consist of just test names?
 
jbudorick
2015/10/28 16:36:35
Oh right, that's true in this version. In the new
 | |
| 349 duration_ms = 0 | 400 for cmdline_modifiers in cmdline_parameters: | 
| 350 try: | 401 start_ms = 0 | 
| 351 self.TestSetup(test) | 402 duration_ms = 0 | 
| 403 try: | |
| 404 flag_modifiers = cmdline_modifiers[:] | |
| 405 if self._IsFreTest(test): | |
| 
jbudorick
2015/10/27 16:14:37
We should be able to remove this in light of https
 
mnaganov (inactive)
2015/10/27 23:53:30
Oh, cool. I thought that maybe it was again import
 | |
| 406 flag_modifiers[1].append('--disable-fre') | |
| 407 self.TestSetup(test, flag_modifiers) | |
| 352 | 408 | 
| 353 try: | 409 try: | 
| 354 self.device.GoHome() | 410 self.device.GoHome() | 
| 355 except device_errors.CommandTimeoutError: | 411 except device_errors.CommandTimeoutError: | 
| 356 logging.exception('Failed to focus the launcher.') | 412 logging.exception('Failed to focus the launcher.') | 
| 357 | 413 | 
| 358 time_ms = lambda: int(time.time() * 1000) | 414 time_ms = lambda: int(time.time() * 1000) | 
| 359 start_ms = time_ms() | 415 start_ms = time_ms() | 
| 360 raw_output = self._RunTest(test, timeout) | 416 raw_output = self._RunTest(test, timeout) | 
| 361 duration_ms = time_ms() - start_ms | 417 duration_ms = time_ms() - start_ms | 
| 362 | 418 | 
| 363 # Parse the test output | 419 # Parse the test output | 
| 364 result_code, result_bundle, statuses = ( | 420 result_code, result_bundle, statuses = ( | 
| 365 instrumentation_test_instance.ParseAmInstrumentRawOutput(raw_output)) | 421 instrumentation_test_instance.ParseAmInstrumentRawOutput( | 
| 366 result = self._GenerateTestResult( | 422 raw_output)) | 
| 367 test, result_code, result_bundle, statuses, start_ms, duration_ms) | 423 result = self._GenerateTestResult( | 
| 368 if local_device_instrumentation_test_run.DidPackageCrashOnDevice( | 424 test, result_code, result_bundle, statuses, start_ms, duration_ms) | 
| 369 self.test_pkg.GetPackageName(), self.device): | 425 if local_device_instrumentation_test_run.DidPackageCrashOnDevice( | 
| 370 result.SetType(base_test_result.ResultType.CRASH) | 426 self.test_pkg.GetPackageName(), self.device): | 
| 371 results.AddResult(result) | 427 result.SetType(base_test_result.ResultType.CRASH) | 
| 372 except device_errors.CommandTimeoutError as e: | 428 if len(cmdline_parameters): | 
| 373 results.AddResult(test_result.InstrumentationTestResult( | 429 result.SetName( | 
| 430 result.GetName() + ' with {' + ' '.join(cmdline_modifiers[0]) + '}') | |
| 
jbudorick
2015/10/27 16:14:37
This name should be generated once, presumably bef
 
mnaganov (inactive)
2015/10/27 23:53:30
We can't change the 'test' string, because it is t
 | |
| 431 results.AddResult(result) | |
| 432 except device_errors.CommandTimeoutError as e: | |
| 433 result = test_result.InstrumentationTestResult( | |
| 374 test, base_test_result.ResultType.TIMEOUT, start_ms, duration_ms, | 434 test, base_test_result.ResultType.TIMEOUT, start_ms, duration_ms, | 
| 375 log=str(e) or 'No information')) | 435 log=str(e) or 'No information') | 
| 376 if self.package_info: | 436 if len(cmdline_parameters): | 
| 377 self.device.ForceStop(self.package_info.package) | 437 result.SetName( | 
| 378 self.device.ForceStop(self.package_info.test_package) | 438 result.GetName() + ' with {' + ' '.join(cmdline_modifiers[0]) + '}') | 
| 379 except device_errors.DeviceUnreachableError as e: | 439 results.AddResult(result) | 
| 380 results.AddResult(test_result.InstrumentationTestResult( | 440 if self.package_info: | 
| 381 test, base_test_result.ResultType.CRASH, start_ms, duration_ms, | 441 self.device.ForceStop(self.package_info.package) | 
| 382 log=str(e) or 'No information')) | 442 self.device.ForceStop(self.package_info.test_package) | 
| 383 self.TestTeardown(test, results) | 443 except device_errors.DeviceUnreachableError as e: | 
| 444 result = test_result.InstrumentationTestResult( | |
| 445 test, base_test_result.ResultType.CRASH, start_ms, duration_ms, | |
| 446 log=str(e) or 'No information') | |
| 447 if len(cmdline_parameters): | |
| 448 result.SetName( | |
| 449 result.GetName() + ' with {' + ' '.join(cmdline_modifiers[0]) + '}') | |
| 450 results.AddResult(result) | |
| 451 | |
| 452 self.TestTeardown(test, results) | |
| 453 | |
| 384 return (results, None if results.DidRunPass() else test) | 454 return (results, None if results.DidRunPass() else test) | 
| OLD | NEW |