OLD | NEW |
(Empty) | |
| 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. |
| 4 |
| 5 """Runs the Java tests. See more information on run_instrumentation_tests.py.""" |
| 6 |
| 7 import fnmatch |
| 8 import logging |
| 9 import os |
| 10 import re |
| 11 import shutil |
| 12 import sys |
| 13 import time |
| 14 |
| 15 import android_commands |
| 16 import apk_info |
| 17 from base_test_runner import BaseTestRunner |
| 18 from base_test_sharder import BaseTestSharder, SetTestsContainer |
| 19 import cmd_helper |
| 20 import constants |
| 21 import errors |
| 22 from forwarder import Forwarder |
| 23 from json_perf_parser import GetAverageRunInfoFromJSONString |
| 24 from perf_tests_helper import PrintPerfResult |
| 25 import sharded_tests_queue |
| 26 from test_result import JAVA, SingleTestResult, TestResults |
| 27 |
| 28 |
| 29 _PERF_TEST_ANNOTATION = 'PerfTest' |
| 30 |
| 31 |
| 32 class FatalTestException(Exception): |
| 33 """A fatal test exception.""" |
| 34 pass |
| 35 |
| 36 |
| 37 def _TestNameToExpectation(test_name): |
| 38 # A test name is a Package.Path.Class#testName; convert to what we use in |
| 39 # the expectation file. |
| 40 return '.'.join(test_name.replace('#', '.').split('.')[-2:]) |
| 41 |
| 42 |
| 43 def FilterTests(test_names, pattern_list, inclusive): |
| 44 """Filters |test_names| using a list of patterns. |
| 45 |
| 46 Args: |
| 47 test_names: A list of test names. |
| 48 pattern_list: A list of patterns. |
| 49 inclusive: If True, returns the tests that match any pattern. if False, |
| 50 returns the tests that do not match any pattern. |
| 51 Returns: |
| 52 A list of test names. |
| 53 """ |
| 54 ret = [] |
| 55 for t in test_names: |
| 56 has_match = False |
| 57 for pattern in pattern_list: |
| 58 has_match = has_match or fnmatch.fnmatch(_TestNameToExpectation(t), |
| 59 pattern) |
| 60 if has_match == inclusive: |
| 61 ret += [t] |
| 62 return ret |
| 63 |
| 64 |
| 65 class TestRunner(BaseTestRunner): |
| 66 """Responsible for running a series of tests connected to a single device.""" |
| 67 |
| 68 _DEVICE_DATA_DIR = '/data/local/tmp/chrome/test/data' |
| 69 _EMMA_JAR = os.path.join(os.environ.get('ANDROID_BUILD_TOP', ''), |
| 70 'external/emma/lib/emma.jar') |
| 71 _COVERAGE_MERGED_FILENAME = 'unittest_coverage.es' |
| 72 _COVERAGE_WEB_ROOT_DIR = os.environ.get('EMMA_WEB_ROOTDIR') |
| 73 _COVERAGE_FILENAME = 'coverage.ec' |
| 74 _COVERAGE_RESULT_PATH = ('/data/data/com.google.android.apps.chrome/files/' + |
| 75 _COVERAGE_FILENAME) |
| 76 _COVERAGE_META_INFO_PATH = os.path.join(os.environ.get('ANDROID_BUILD_TOP', |
| 77 ''), |
| 78 'out/target/common/obj/APPS', |
| 79 'Chrome_intermediates/coverage.em') |
| 80 _HOSTMACHINE_PERF_OUTPUT_FILE = '/tmp/chrome-profile' |
| 81 _DEVICE_PERF_OUTPUT_DIR = '/sdcard/Download/' |
| 82 _DEVICE_PERF_OUTPUT_SEARCH_PREFIX = (_DEVICE_PERF_OUTPUT_DIR + |
| 83 'chrome-profile*') |
| 84 _DEVICE_HAS_TEST_FILES = {} |
| 85 |
| 86 def __init__(self, options, device, tests_iter, coverage, shard_index, apks, |
| 87 ports_to_forward): |
| 88 """Create a new TestRunner. |
| 89 |
| 90 Args: |
| 91 options: An options object with the following required attributes: |
| 92 - install_apk: Re-installs the apk if opted. |
| 93 - save_perf_json: Whether or not to save the JSON file from UI perf |
| 94 tests. |
| 95 - screenshot_failures: Take a screenshot for a test failure |
| 96 - tool: Name of the Valgrind tool. |
| 97 - wait_for_debugger: blocks until the debugger is connected. |
| 98 device: Attached android device. |
| 99 tests_iter: A list of tests to be run. |
| 100 coverage: Collects coverage information if opted. |
| 101 shard_index: shard # for this TestRunner, used to create unique port |
| 102 numbers. |
| 103 apks: A list of ApkInfo objects need to be installed. The first element |
| 104 should be the tests apk, the rests could be the apks used in test. |
| 105 The default is ChromeTest.apk. |
| 106 ports_to_forward: A list of port numbers for which to set up forwarders. |
| 107 Can be optionally requested by a test case. |
| 108 Raises: |
| 109 FatalTestException: if coverage metadata is not available. |
| 110 """ |
| 111 BaseTestRunner.__init__(self, device, options.tool, shard_index) |
| 112 |
| 113 if not apks: |
| 114 apks = [apk_info.ApkInfo(options.test_apk_path, |
| 115 options.test_apk_jar_path)] |
| 116 |
| 117 self.install_apk = options.install_apk |
| 118 self.save_perf_json = options.save_perf_json |
| 119 self.screenshot_failures = options.screenshot_failures |
| 120 self.wait_for_debugger = options.wait_for_debugger |
| 121 |
| 122 self.tests_iter = tests_iter |
| 123 self.coverage = coverage |
| 124 self.apks = apks |
| 125 self.test_apk = apks[0] |
| 126 self.instrumentation_class_path = self.test_apk.GetPackageName() |
| 127 self.ports_to_forward = ports_to_forward |
| 128 |
| 129 self.test_results = TestResults() |
| 130 # List of forwarders created by this instance of TestRunner. |
| 131 self.forwarders = [] |
| 132 |
| 133 if self.coverage: |
| 134 if os.path.exists(TestRunner._COVERAGE_MERGED_FILENAME): |
| 135 os.remove(TestRunner._COVERAGE_MERGED_FILENAME) |
| 136 if not os.path.exists(TestRunner._COVERAGE_META_INFO_PATH): |
| 137 raise FatalTestException('FATAL ERROR in ' + sys.argv[0] + |
| 138 ' : Coverage meta info [' + |
| 139 TestRunner._COVERAGE_META_INFO_PATH + |
| 140 '] does not exist.') |
| 141 if (not TestRunner._COVERAGE_WEB_ROOT_DIR or |
| 142 not os.path.exists(TestRunner._COVERAGE_WEB_ROOT_DIR)): |
| 143 raise FatalTestException('FATAL ERROR in ' + sys.argv[0] + |
| 144 ' : Path specified in $EMMA_WEB_ROOTDIR [' + |
| 145 TestRunner._COVERAGE_WEB_ROOT_DIR + |
| 146 '] does not exist.') |
| 147 |
| 148 def _GetTestsIter(self): |
| 149 if not self.tests_iter: |
| 150 # multiprocessing.Queue can't be pickled across processes if we have it as |
| 151 # a member set during constructor. Grab one here instead. |
| 152 self.tests_iter = (BaseTestSharder.tests_container) |
| 153 assert self.tests_iter |
| 154 return self.tests_iter |
| 155 |
| 156 def CopyTestFilesOnce(self): |
| 157 """Pushes the test data files to the device. Installs the apk if opted.""" |
| 158 if TestRunner._DEVICE_HAS_TEST_FILES.get(self.device, False): |
| 159 logging.warning('Already copied test files to device %s, skipping.', |
| 160 self.device) |
| 161 return |
| 162 host_test_files_path = (constants.CHROME_DIR + |
| 163 '/chrome/test/data/android/device_files') |
| 164 if os.path.exists(host_test_files_path): |
| 165 self.adb.PushIfNeeded(host_test_files_path, |
| 166 TestRunner._DEVICE_DATA_DIR) |
| 167 if self.install_apk: |
| 168 # Install -r is not reliable, so uninstall it first. |
| 169 for apk in self.apks: |
| 170 self.adb.Adb().SendCommand('uninstall ' + apk.GetPackageName()) |
| 171 self.adb.Adb().SendCommand('install ' + apk.GetApkPath()) |
| 172 self.tool.CopyFiles() |
| 173 TestRunner._DEVICE_HAS_TEST_FILES[self.device] = True |
| 174 |
| 175 def SaveCoverageData(self, test): |
| 176 """Saves the Emma coverage data before it's overwritten by the next test. |
| 177 |
| 178 Args: |
| 179 test: the test whose coverage data is collected. |
| 180 """ |
| 181 if not self.coverage: |
| 182 return |
| 183 if not self.adb.Adb().Pull(TestRunner._COVERAGE_RESULT_PATH, |
| 184 constants.CHROME_DIR): |
| 185 logging.error('ERROR: Unable to find file ' + |
| 186 TestRunner._COVERAGE_RESULT_PATH + |
| 187 ' on the device for test ' + test) |
| 188 pulled_coverage_file = os.path.join(constants.CHROME_DIR, |
| 189 TestRunner._COVERAGE_FILENAME) |
| 190 if os.path.exists(TestRunner._COVERAGE_MERGED_FILENAME): |
| 191 cmd = ['java', '-classpath', TestRunner._EMMA_JAR, 'emma', 'merge', |
| 192 '-in', pulled_coverage_file, |
| 193 '-in', TestRunner._COVERAGE_MERGED_FILENAME, |
| 194 '-out', TestRunner._COVERAGE_MERGED_FILENAME] |
| 195 cmd_helper.RunCmd(cmd) |
| 196 else: |
| 197 shutil.copy(pulled_coverage_file, |
| 198 TestRunner._COVERAGE_MERGED_FILENAME) |
| 199 os.remove(pulled_coverage_file) |
| 200 |
| 201 def GenerateCoverageReportIfNeeded(self): |
| 202 """Uses the Emma to generate a coverage report and a html page.""" |
| 203 if not self.coverage: |
| 204 return |
| 205 cmd = ['java', '-classpath', TestRunner._EMMA_JAR, |
| 206 'emma', 'report', '-r', 'html', |
| 207 '-in', TestRunner._COVERAGE_MERGED_FILENAME, |
| 208 '-in', TestRunner._COVERAGE_META_INFO_PATH] |
| 209 cmd_helper.RunCmd(cmd) |
| 210 new_dir = os.path.join(TestRunner._COVERAGE_WEB_ROOT_DIR, |
| 211 time.strftime('Coverage_for_%Y_%m_%d_%a_%H:%M')) |
| 212 shutil.copytree('coverage', new_dir) |
| 213 |
| 214 latest_dir = os.path.join(TestRunner._COVERAGE_WEB_ROOT_DIR, |
| 215 'Latest_Coverage_Run') |
| 216 if os.path.exists(latest_dir): |
| 217 shutil.rmtree(latest_dir) |
| 218 os.mkdir(latest_dir) |
| 219 webserver_new_index = os.path.join(new_dir, 'index.html') |
| 220 webserver_new_files = os.path.join(new_dir, '_files') |
| 221 webserver_latest_index = os.path.join(latest_dir, 'index.html') |
| 222 webserver_latest_files = os.path.join(latest_dir, '_files') |
| 223 # Setup new softlinks to last result. |
| 224 os.symlink(webserver_new_index, webserver_latest_index) |
| 225 os.symlink(webserver_new_files, webserver_latest_files) |
| 226 cmd_helper.RunCmd(['chmod', '755', '-R', latest_dir, new_dir]) |
| 227 |
| 228 def _GetInstrumentationArgs(self): |
| 229 ret = {} |
| 230 if self.coverage: |
| 231 ret['coverage'] = 'true' |
| 232 if self.wait_for_debugger: |
| 233 ret['debug'] = 'true' |
| 234 return ret |
| 235 |
| 236 def _TakeScreenshot(self, test): |
| 237 """Takes a screenshot from the device.""" |
| 238 screenshot_tool = os.path.join(os.getenv('ANDROID_HOST_OUT'), 'bin', |
| 239 'screenshot2') |
| 240 screenshot_path = os.path.join(constants.CHROME_DIR, |
| 241 'out_screenshots') |
| 242 if not os.path.exists(screenshot_path): |
| 243 os.mkdir(screenshot_path) |
| 244 screenshot_name = os.path.join(screenshot_path, test + '.png') |
| 245 logging.info('Taking screenshot named %s', screenshot_name) |
| 246 cmd_helper.RunCmd([screenshot_tool, '-s', self.device, screenshot_name]) |
| 247 |
| 248 def SetUp(self): |
| 249 """Sets up the test harness and device before all tests are run.""" |
| 250 super(TestRunner, self).SetUp() |
| 251 if self.adb.SetJavaAssertsEnabled(enable=True): |
| 252 self.adb.Reboot(full_reboot=False) |
| 253 |
| 254 # We give different default value to launch HTTP server based on shard index |
| 255 # because it may have race condition when multiple processes are trying to |
| 256 # launch lighttpd with same port at same time. |
| 257 # This line *must* come before the forwarding below, as it nukes all |
| 258 # the other forwarders. A more comprehensive fix might be to pull the |
| 259 # forwarder-killing line up to here, but that might violate assumptions |
| 260 # implicit in other places. |
| 261 self.LaunchTestHttpServer(os.path.join(constants.CHROME_DIR), |
| 262 (constants.LIGHTTPD_RANDOM_PORT_FIRST + |
| 263 self.shard_index)) |
| 264 |
| 265 if self.ports_to_forward: |
| 266 for port in self.ports_to_forward: |
| 267 self.forwarders.append( |
| 268 Forwarder(self.adb, [(port, port)], self.tool, '127.0.0.1')) |
| 269 self.CopyTestFilesOnce() |
| 270 self.flags.AddFlags(['--enable-test-intents']) |
| 271 |
| 272 def TearDown(self): |
| 273 """Cleans up the test harness and saves outstanding data from test run.""" |
| 274 if self.forwarders: |
| 275 for forwarder in self.forwarders: |
| 276 forwarder.Close() |
| 277 self.GenerateCoverageReportIfNeeded() |
| 278 super(TestRunner, self).TearDown() |
| 279 |
| 280 def TestSetup(self, test): |
| 281 """Sets up the test harness for running a particular test. |
| 282 |
| 283 Args: |
| 284 test: The name of the test that will be run. |
| 285 """ |
| 286 self.SetupPerfMonitoringIfNeeded(test) |
| 287 self._SetupIndividualTestTimeoutScale(test) |
| 288 self.tool.SetupEnvironment() |
| 289 |
| 290 # Make sure the forwarder is still running. |
| 291 self.RestartHttpServerForwarderIfNecessary() |
| 292 |
| 293 def _IsPerfTest(self, test): |
| 294 """Determines whether a test is a performance test. |
| 295 |
| 296 Args: |
| 297 test: The name of the test to be checked. |
| 298 |
| 299 Returns: |
| 300 Whether the test is annotated as a performance test. |
| 301 """ |
| 302 return _PERF_TEST_ANNOTATION in self.test_apk.GetTestAnnotations(test) |
| 303 |
| 304 def SetupPerfMonitoringIfNeeded(self, test): |
| 305 """Sets up performance monitoring if the specified test requires it. |
| 306 |
| 307 Args: |
| 308 test: The name of the test to be run. |
| 309 """ |
| 310 if not self._IsPerfTest(test): |
| 311 return |
| 312 self.adb.Adb().SendCommand('shell rm ' + |
| 313 TestRunner._DEVICE_PERF_OUTPUT_SEARCH_PREFIX) |
| 314 self.adb.StartMonitoringLogcat() |
| 315 |
| 316 def TestTeardown(self, test, test_result): |
| 317 """Cleans up the test harness after running a particular test. |
| 318 |
| 319 Depending on the options of this TestRunner this might handle coverage |
| 320 tracking or performance tracking. This method will only be called if the |
| 321 test passed. |
| 322 |
| 323 Args: |
| 324 test: The name of the test that was just run. |
| 325 test_result: result for this test. |
| 326 """ |
| 327 |
| 328 self.tool.CleanUpEnvironment() |
| 329 |
| 330 # The logic below relies on the test passing. |
| 331 if not test_result or test_result.GetStatusCode(): |
| 332 return |
| 333 |
| 334 self.TearDownPerfMonitoring(test) |
| 335 self.SaveCoverageData(test) |
| 336 |
| 337 def TearDownPerfMonitoring(self, test): |
| 338 """Cleans up performance monitoring if the specified test required it. |
| 339 |
| 340 Args: |
| 341 test: The name of the test that was just run. |
| 342 Raises: |
| 343 FatalTestException: if there's anything wrong with the perf data. |
| 344 """ |
| 345 if not self._IsPerfTest(test): |
| 346 return |
| 347 raw_test_name = test.split('#')[1] |
| 348 |
| 349 # Wait and grab annotation data so we can figure out which traces to parse |
| 350 regex = self.adb.WaitForLogMatch(re.compile('\*\*PERFANNOTATION\(' + |
| 351 raw_test_name + |
| 352 '\)\:(.*)'), None) |
| 353 |
| 354 # If the test is set to run on a specific device type only (IE: only |
| 355 # tablet or phone) and it is being run on the wrong device, the test |
| 356 # just quits and does not do anything. The java test harness will still |
| 357 # print the appropriate annotation for us, but will add --NORUN-- for |
| 358 # us so we know to ignore the results. |
| 359 # The --NORUN-- tag is managed by MainActivityTestBase.java |
| 360 if regex.group(1) != '--NORUN--': |
| 361 |
| 362 # Obtain the relevant perf data. The data is dumped to a |
| 363 # JSON formatted file. |
| 364 json_string = self.adb.GetFileContents( |
| 365 '/data/data/com.google.android.apps.chrome/files/PerfTestData.txt') |
| 366 |
| 367 if json_string: |
| 368 json_string = '\n'.join(json_string) |
| 369 else: |
| 370 raise FatalTestException('Perf file does not exist or is empty') |
| 371 |
| 372 if self.save_perf_json: |
| 373 json_local_file = '/tmp/chromium-android-perf-json-' + raw_test_name |
| 374 with open(json_local_file, 'w') as f: |
| 375 f.write(json_string) |
| 376 logging.info('Saving Perf UI JSON from test ' + |
| 377 test + ' to ' + json_local_file) |
| 378 |
| 379 raw_perf_data = regex.group(1).split(';') |
| 380 |
| 381 for raw_perf_set in raw_perf_data: |
| 382 if raw_perf_set: |
| 383 perf_set = raw_perf_set.split(',') |
| 384 if len(perf_set) != 3: |
| 385 raise FatalTestException('Unexpected number of tokens in ' |
| 386 'perf annotation string: ' + raw_perf_set) |
| 387 |
| 388 # Process the performance data |
| 389 result = GetAverageRunInfoFromJSONString(json_string, perf_set[0]) |
| 390 |
| 391 PrintPerfResult(perf_set[1], perf_set[2], |
| 392 [result['average']], result['units']) |
| 393 |
| 394 def _SetupIndividualTestTimeoutScale(self, test): |
| 395 timeout_scale = self._GetIndividualTestTimeoutScale(test) |
| 396 if timeout_scale == 1: |
| 397 value = '""' |
| 398 else: |
| 399 value = '%f' % timeout_scale |
| 400 self.adb.RunShellCommand('setprop chrome.timeout_scale %s' % value) |
| 401 |
| 402 def _GetIndividualTestTimeoutScale(self, test): |
| 403 """Returns the timeout scale for the given |test|.""" |
| 404 annotations = self.apks[0].GetTestAnnotations(test) |
| 405 timeout_scale = 1 |
| 406 if 'TimeoutScale' in annotations: |
| 407 for annotation in annotations: |
| 408 scale_match = re.match('TimeoutScale:([0-9]+)', annotation) |
| 409 if scale_match: |
| 410 timeout_scale = int(scale_match.group(1)) |
| 411 if self.wait_for_debugger: |
| 412 timeout_scale *= 100 |
| 413 return timeout_scale |
| 414 |
| 415 def _GetIndividualTestTimeoutSecs(self, test): |
| 416 """Returns the timeout in seconds for the given |test|.""" |
| 417 annotations = self.apks[0].GetTestAnnotations(test) |
| 418 if 'Manual' in annotations: |
| 419 return 600 * 60 |
| 420 if 'External' in annotations: |
| 421 return 10 * 60 |
| 422 if 'LargeTest' in annotations or _PERF_TEST_ANNOTATION in annotations: |
| 423 return 5 * 60 |
| 424 if 'MediumTest' in annotations: |
| 425 return 3 * 60 |
| 426 return 1 * 60 |
| 427 |
| 428 def RunTests(self): |
| 429 """Runs the tests, generating the coverage if needed. |
| 430 |
| 431 Returns: |
| 432 A TestResults object. |
| 433 """ |
| 434 instrumentation_path = (self.instrumentation_class_path + |
| 435 '/android.test.InstrumentationTestRunner') |
| 436 instrumentation_args = self._GetInstrumentationArgs() |
| 437 for test in self._GetTestsIter(): |
| 438 test_result = None |
| 439 start_date_ms = None |
| 440 try: |
| 441 self.TestSetup(test) |
| 442 start_date_ms = int(time.time()) * 1000 |
| 443 args_with_filter = dict(instrumentation_args) |
| 444 args_with_filter['class'] = test |
| 445 # |test_results| is a list that should contain |
| 446 # a single TestResult object. |
| 447 logging.warn(args_with_filter) |
| 448 (test_results, _) = self.adb.Adb().StartInstrumentation( |
| 449 instrumentation_path=instrumentation_path, |
| 450 instrumentation_args=args_with_filter, |
| 451 timeout_time=(self._GetIndividualTestTimeoutSecs(test) * |
| 452 self._GetIndividualTestTimeoutScale(test) * |
| 453 self.tool.GetTimeoutScale())) |
| 454 duration_ms = int(time.time()) * 1000 - start_date_ms |
| 455 assert len(test_results) == 1 |
| 456 test_result = test_results[0] |
| 457 status_code = test_result.GetStatusCode() |
| 458 if status_code: |
| 459 log = test_result.GetFailureReason() |
| 460 if not log: |
| 461 log = 'No information.' |
| 462 if self.screenshot_failures or log.find('INJECT_EVENTS perm') >= 0: |
| 463 self._TakeScreenshot(test) |
| 464 result = (log.split('\n')[0], log) |
| 465 self.test_results.failed += [SingleTestResult(test, start_date_ms, |
| 466 duration_ms, JAVA, log, |
| 467 result)] |
| 468 else: |
| 469 result = [SingleTestResult(test, start_date_ms, duration_ms, JAVA)] |
| 470 self.test_results.ok += result |
| 471 # Catch exceptions thrown by StartInstrumentation(). |
| 472 # See ../../third_party/android/testrunner/adb_interface.py |
| 473 except (errors.WaitForResponseTimedOutError, |
| 474 errors.DeviceUnresponsiveError, |
| 475 errors.InstrumentationError), e: |
| 476 if start_date_ms: |
| 477 duration_ms = int(time.time()) * 1000 - start_date_ms |
| 478 else: |
| 479 start_date_ms = int(time.time()) * 1000 |
| 480 duration_ms = 0 |
| 481 message = str(e) |
| 482 if not message: |
| 483 message = 'No information.' |
| 484 self.test_results.crashed += [SingleTestResult(test, start_date_ms, |
| 485 duration_ms, |
| 486 JAVA, message, |
| 487 (message, message))] |
| 488 test_result = None |
| 489 self.TestTeardown(test, test_result) |
| 490 return self.test_results |
| 491 |
| 492 |
| 493 class TestSharder(BaseTestSharder): |
| 494 """Responsible for sharding the tests on the connected devices.""" |
| 495 |
| 496 def __init__(self, attached_devices, options, tests, apks): |
| 497 BaseTestSharder.__init__(self, attached_devices) |
| 498 self.options = options |
| 499 self.tests = tests |
| 500 self.apks = apks |
| 501 |
| 502 def SetupSharding(self, tests): |
| 503 """Called before starting the shards.""" |
| 504 SetTestsContainer(sharded_tests_queue.ShardedTestsQueue( |
| 505 len(self.attached_devices), tests)) |
| 506 |
| 507 def CreateShardedTestRunner(self, device, index): |
| 508 """Creates a sharded test runner. |
| 509 |
| 510 Args: |
| 511 device: Device serial where this shard will run. |
| 512 index: Index of this device in the pool. |
| 513 |
| 514 Returns: |
| 515 A TestRunner object. |
| 516 """ |
| 517 return TestRunner(self.options, device, None, False, index, self.apks, []) |
| 518 |
| 519 |
| 520 def DispatchJavaTests(options, apks): |
| 521 """Dispatches Java tests onto connected device(s). |
| 522 |
| 523 If possible, this method will attempt to shard the tests to |
| 524 all connected devices. Otherwise, dispatch and run tests on one device. |
| 525 |
| 526 Args: |
| 527 options: Command line options. |
| 528 apks: list of APKs to use. |
| 529 |
| 530 Returns: |
| 531 A TestResults object holding the results of the Java tests. |
| 532 |
| 533 Raises: |
| 534 FatalTestException: when there's no attached the devices. |
| 535 """ |
| 536 test_apk = apks[0] |
| 537 if options.annotation: |
| 538 available_tests = test_apk.GetAnnotatedTests(options.annotation) |
| 539 if len(options.annotation) == 1 and options.annotation[0] == 'SmallTest': |
| 540 tests_without_annotation = [ |
| 541 m for m in |
| 542 test_apk.GetTestMethods() |
| 543 if not test_apk.GetTestAnnotations(m) and |
| 544 not apk_info.ApkInfo.IsPythonDrivenTest(m)] |
| 545 if tests_without_annotation: |
| 546 tests_without_annotation.sort() |
| 547 logging.warning('The following tests do not contain any annotation. ' |
| 548 'Assuming "SmallTest":\n%s', |
| 549 '\n'.join(tests_without_annotation)) |
| 550 available_tests += tests_without_annotation |
| 551 else: |
| 552 available_tests = [m for m in test_apk.GetTestMethods() |
| 553 if not apk_info.ApkInfo.IsPythonDrivenTest(m)] |
| 554 coverage = os.environ.get('EMMA_INSTRUMENT') == 'true' |
| 555 |
| 556 tests = [] |
| 557 if options.test_filter: |
| 558 # |available_tests| are in adb instrument format: package.path.class#test. |
| 559 filter_without_hash = options.test_filter.replace('#', '.') |
| 560 tests = [t for t in available_tests |
| 561 if filter_without_hash in t.replace('#', '.')] |
| 562 else: |
| 563 tests = available_tests |
| 564 |
| 565 if not tests: |
| 566 logging.warning('No Java tests to run with current args.') |
| 567 return TestResults() |
| 568 |
| 569 tests *= options.number_of_runs |
| 570 |
| 571 attached_devices = android_commands.GetAttachedDevices() |
| 572 test_results = TestResults() |
| 573 |
| 574 if not attached_devices: |
| 575 raise FatalTestException('You have no devices attached or visible!') |
| 576 if options.device: |
| 577 attached_devices = [options.device] |
| 578 |
| 579 logging.info('Will run: %s', str(tests)) |
| 580 |
| 581 if (len(attached_devices) > 1 and |
| 582 not coverage and |
| 583 not options.wait_for_debugger): |
| 584 sharder = TestSharder(attached_devices, options, tests, apks) |
| 585 test_results = sharder.RunShardedTests() |
| 586 else: |
| 587 runner = TestRunner(options, attached_devices[0], tests, coverage, 0, apks, |
| 588 []) |
| 589 test_results = runner.Run() |
| 590 return test_results |
OLD | NEW |