OLD | NEW |
| (Empty) |
1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
2 # Use of this source code is governed by a BSD-style license that can be | |
3 # found in the LICENSE file. | |
4 | |
5 """Runs the Java tests. See more information on run_instrumentation_tests.py.""" | |
6 | |
7 import fnmatch | |
8 import logging | |
9 import os | |
10 import re | |
11 import shutil | |
12 import sys | |
13 import time | |
14 | |
15 import android_commands | |
16 import apk_info | |
17 from base_test_runner import BaseTestRunner | |
18 from base_test_sharder import BaseTestSharder, SetTestsContainer | |
19 import cmd_helper | |
20 import constants | |
21 import errors | |
22 from forwarder import Forwarder | |
23 from json_perf_parser import GetAverageRunInfoFromJSONString | |
24 from perf_tests_helper import PrintPerfResult | |
25 import sharded_tests_queue | |
26 from test_result import SingleTestResult, TestResults | |
27 import valgrind_tools | |
28 | |
29 _PERF_TEST_ANNOTATION = 'PerfTest' | |
30 | |
31 | |
32 class FatalTestException(Exception): | |
33 """A fatal test exception.""" | |
34 pass | |
35 | |
36 | |
37 def _TestNameToExpectation(test_name): | |
38 # A test name is a Package.Path.Class#testName; convert to what we use in | |
39 # the expectation file. | |
40 return '.'.join(test_name.replace('#', '.').split('.')[-2:]) | |
41 | |
42 | |
43 def FilterTests(test_names, pattern_list, inclusive): | |
44 """Filters |test_names| using a list of patterns. | |
45 | |
46 Args: | |
47 test_names: A list of test names. | |
48 pattern_list: A list of patterns. | |
49 inclusive: If True, returns the tests that match any pattern. if False, | |
50 returns the tests that do not match any pattern. | |
51 Returns: | |
52 A list of test names. | |
53 """ | |
54 ret = [] | |
55 for t in test_names: | |
56 has_match = False | |
57 for pattern in pattern_list: | |
58 has_match = has_match or fnmatch.fnmatch(_TestNameToExpectation(t), | |
59 pattern) | |
60 if has_match == inclusive: | |
61 ret += [t] | |
62 return ret | |
63 | |
64 | |
65 class TestRunner(BaseTestRunner): | |
66 """Responsible for running a series of tests connected to a single device.""" | |
67 | |
68 _DEVICE_DATA_DIR = 'chrome/test/data' | |
69 _EMMA_JAR = os.path.join(os.environ.get('ANDROID_BUILD_TOP', ''), | |
70 'external/emma/lib/emma.jar') | |
71 _COVERAGE_MERGED_FILENAME = 'unittest_coverage.es' | |
72 _COVERAGE_WEB_ROOT_DIR = os.environ.get('EMMA_WEB_ROOTDIR') | |
73 _COVERAGE_FILENAME = 'coverage.ec' | |
74 _COVERAGE_RESULT_PATH = ('/data/data/com.google.android.apps.chrome/files/' + | |
75 _COVERAGE_FILENAME) | |
76 _COVERAGE_META_INFO_PATH = os.path.join(os.environ.get('ANDROID_BUILD_TOP', | |
77 ''), | |
78 'out/target/common/obj/APPS', | |
79 'Chrome_intermediates/coverage.em') | |
80 _HOSTMACHINE_PERF_OUTPUT_FILE = '/tmp/chrome-profile' | |
81 _DEVICE_PERF_OUTPUT_SEARCH_PREFIX = (constants.DEVICE_PERF_OUTPUT_DIR + | |
82 '/chrome-profile*') | |
83 _DEVICE_HAS_TEST_FILES = {} | |
84 | |
85 def __init__(self, options, device, tests_iter, coverage, shard_index, apks, | |
86 ports_to_forward): | |
87 """Create a new TestRunner. | |
88 | |
89 Args: | |
90 options: An options object with the following required attributes: | |
91 - build_type: 'Release' or 'Debug'. | |
92 - install_apk: Re-installs the apk if opted. | |
93 - save_perf_json: Whether or not to save the JSON file from UI perf | |
94 tests. | |
95 - screenshot_failures: Take a screenshot for a test failure | |
96 - tool: Name of the Valgrind tool. | |
97 - wait_for_debugger: blocks until the debugger is connected. | |
98 - disable_assertions: Whether to disable java assertions on the device. | |
99 device: Attached android device. | |
100 tests_iter: A list of tests to be run. | |
101 coverage: Collects coverage information if opted. | |
102 shard_index: shard # for this TestRunner, used to create unique port | |
103 numbers. | |
104 apks: A list of ApkInfo objects need to be installed. The first element | |
105 should be the tests apk, the rests could be the apks used in test. | |
106 The default is ChromeTest.apk. | |
107 ports_to_forward: A list of port numbers for which to set up forwarders. | |
108 Can be optionally requested by a test case. | |
109 Raises: | |
110 FatalTestException: if coverage metadata is not available. | |
111 """ | |
112 BaseTestRunner.__init__( | |
113 self, device, options.tool, shard_index, options.build_type) | |
114 | |
115 if not apks: | |
116 apks = [apk_info.ApkInfo(options.test_apk_path, | |
117 options.test_apk_jar_path)] | |
118 | |
119 self.build_type = options.build_type | |
120 self.install_apk = options.install_apk | |
121 self.test_data = options.test_data | |
122 self.save_perf_json = options.save_perf_json | |
123 self.screenshot_failures = options.screenshot_failures | |
124 self.wait_for_debugger = options.wait_for_debugger | |
125 self.disable_assertions = options.disable_assertions | |
126 | |
127 self.tests_iter = tests_iter | |
128 self.coverage = coverage | |
129 self.apks = apks | |
130 self.test_apk = apks[0] | |
131 self.instrumentation_class_path = self.test_apk.GetPackageName() | |
132 self.ports_to_forward = ports_to_forward | |
133 | |
134 self.test_results = TestResults() | |
135 self.forwarder = None | |
136 | |
137 if self.coverage: | |
138 if os.path.exists(TestRunner._COVERAGE_MERGED_FILENAME): | |
139 os.remove(TestRunner._COVERAGE_MERGED_FILENAME) | |
140 if not os.path.exists(TestRunner._COVERAGE_META_INFO_PATH): | |
141 raise FatalTestException('FATAL ERROR in ' + sys.argv[0] + | |
142 ' : Coverage meta info [' + | |
143 TestRunner._COVERAGE_META_INFO_PATH + | |
144 '] does not exist.') | |
145 if (not TestRunner._COVERAGE_WEB_ROOT_DIR or | |
146 not os.path.exists(TestRunner._COVERAGE_WEB_ROOT_DIR)): | |
147 raise FatalTestException('FATAL ERROR in ' + sys.argv[0] + | |
148 ' : Path specified in $EMMA_WEB_ROOTDIR [' + | |
149 TestRunner._COVERAGE_WEB_ROOT_DIR + | |
150 '] does not exist.') | |
151 | |
152 def _GetTestsIter(self): | |
153 if not self.tests_iter: | |
154 # multiprocessing.Queue can't be pickled across processes if we have it as | |
155 # a member set during constructor. Grab one here instead. | |
156 self.tests_iter = (BaseTestSharder.tests_container) | |
157 assert self.tests_iter | |
158 return self.tests_iter | |
159 | |
160 def CopyTestFilesOnce(self): | |
161 """Pushes the test data files to the device. Installs the apk if opted.""" | |
162 if TestRunner._DEVICE_HAS_TEST_FILES.get(self.device, False): | |
163 logging.warning('Already copied test files to device %s, skipping.', | |
164 self.device) | |
165 return | |
166 for dest_host_pair in self.test_data: | |
167 dst_src = dest_host_pair.split(':',1) | |
168 dst_layer = dst_src[0] | |
169 host_src = dst_src[1] | |
170 host_test_files_path = constants.CHROME_DIR + '/' + host_src | |
171 if os.path.exists(host_test_files_path): | |
172 self.adb.PushIfNeeded(host_test_files_path, | |
173 self.adb.GetExternalStorage() + '/' + | |
174 TestRunner._DEVICE_DATA_DIR + '/' + dst_layer) | |
175 if self.install_apk: | |
176 for apk in self.apks: | |
177 self.adb.ManagedInstall(apk.GetApkPath(), | |
178 package_name=apk.GetPackageName()) | |
179 self.tool.CopyFiles() | |
180 TestRunner._DEVICE_HAS_TEST_FILES[self.device] = True | |
181 | |
182 def SaveCoverageData(self, test): | |
183 """Saves the Emma coverage data before it's overwritten by the next test. | |
184 | |
185 Args: | |
186 test: the test whose coverage data is collected. | |
187 """ | |
188 if not self.coverage: | |
189 return | |
190 if not self.adb.Adb().Pull(TestRunner._COVERAGE_RESULT_PATH, | |
191 constants.CHROME_DIR): | |
192 logging.error('ERROR: Unable to find file ' + | |
193 TestRunner._COVERAGE_RESULT_PATH + | |
194 ' on the device for test ' + test) | |
195 pulled_coverage_file = os.path.join(constants.CHROME_DIR, | |
196 TestRunner._COVERAGE_FILENAME) | |
197 if os.path.exists(TestRunner._COVERAGE_MERGED_FILENAME): | |
198 cmd = ['java', '-classpath', TestRunner._EMMA_JAR, 'emma', 'merge', | |
199 '-in', pulled_coverage_file, | |
200 '-in', TestRunner._COVERAGE_MERGED_FILENAME, | |
201 '-out', TestRunner._COVERAGE_MERGED_FILENAME] | |
202 cmd_helper.RunCmd(cmd) | |
203 else: | |
204 shutil.copy(pulled_coverage_file, | |
205 TestRunner._COVERAGE_MERGED_FILENAME) | |
206 os.remove(pulled_coverage_file) | |
207 | |
208 def GenerateCoverageReportIfNeeded(self): | |
209 """Uses the Emma to generate a coverage report and a html page.""" | |
210 if not self.coverage: | |
211 return | |
212 cmd = ['java', '-classpath', TestRunner._EMMA_JAR, | |
213 'emma', 'report', '-r', 'html', | |
214 '-in', TestRunner._COVERAGE_MERGED_FILENAME, | |
215 '-in', TestRunner._COVERAGE_META_INFO_PATH] | |
216 cmd_helper.RunCmd(cmd) | |
217 new_dir = os.path.join(TestRunner._COVERAGE_WEB_ROOT_DIR, | |
218 time.strftime('Coverage_for_%Y_%m_%d_%a_%H:%M')) | |
219 shutil.copytree('coverage', new_dir) | |
220 | |
221 latest_dir = os.path.join(TestRunner._COVERAGE_WEB_ROOT_DIR, | |
222 'Latest_Coverage_Run') | |
223 if os.path.exists(latest_dir): | |
224 shutil.rmtree(latest_dir) | |
225 os.mkdir(latest_dir) | |
226 webserver_new_index = os.path.join(new_dir, 'index.html') | |
227 webserver_new_files = os.path.join(new_dir, '_files') | |
228 webserver_latest_index = os.path.join(latest_dir, 'index.html') | |
229 webserver_latest_files = os.path.join(latest_dir, '_files') | |
230 # Setup new softlinks to last result. | |
231 os.symlink(webserver_new_index, webserver_latest_index) | |
232 os.symlink(webserver_new_files, webserver_latest_files) | |
233 cmd_helper.RunCmd(['chmod', '755', '-R', latest_dir, new_dir]) | |
234 | |
235 def _GetInstrumentationArgs(self): | |
236 ret = {} | |
237 if self.coverage: | |
238 ret['coverage'] = 'true' | |
239 if self.wait_for_debugger: | |
240 ret['debug'] = 'true' | |
241 return ret | |
242 | |
243 def _TakeScreenshot(self, test): | |
244 """Takes a screenshot from the device.""" | |
245 screenshot_name = os.path.join(constants.SCREENSHOTS_DIR, test + '.png') | |
246 logging.info('Taking screenshot named %s', screenshot_name) | |
247 self.adb.TakeScreenshot(screenshot_name) | |
248 | |
249 def SetUp(self): | |
250 """Sets up the test harness and device before all tests are run.""" | |
251 super(TestRunner, self).SetUp() | |
252 if not self.adb.IsRootEnabled(): | |
253 logging.warning('Unable to enable java asserts for %s, non rooted device', | |
254 self.device) | |
255 else: | |
256 if self.adb.SetJavaAssertsEnabled(enable=not self.disable_assertions): | |
257 self.adb.Reboot(full_reboot=False) | |
258 | |
259 # We give different default value to launch HTTP server based on shard index | |
260 # because it may have race condition when multiple processes are trying to | |
261 # launch lighttpd with same port at same time. | |
262 http_server_ports = self.LaunchTestHttpServer( | |
263 os.path.join(constants.CHROME_DIR), | |
264 (constants.LIGHTTPD_RANDOM_PORT_FIRST + self.shard_index)) | |
265 if self.ports_to_forward: | |
266 port_pairs = [(port, port) for port in self.ports_to_forward] | |
267 # We need to remember which ports the HTTP server is using, since the | |
268 # forwarder will stomp on them otherwise. | |
269 port_pairs.append(http_server_ports) | |
270 self.forwarder = Forwarder(self.adb, self.build_type) | |
271 self.forwarder.Run(port_pairs, self.tool, '127.0.0.1') | |
272 self.CopyTestFilesOnce() | |
273 self.flags.AddFlags(['--enable-test-intents']) | |
274 | |
275 def TearDown(self): | |
276 """Cleans up the test harness and saves outstanding data from test run.""" | |
277 if self.forwarder: | |
278 self.forwarder.Close() | |
279 self.GenerateCoverageReportIfNeeded() | |
280 super(TestRunner, self).TearDown() | |
281 | |
282 def TestSetup(self, test): | |
283 """Sets up the test harness for running a particular test. | |
284 | |
285 Args: | |
286 test: The name of the test that will be run. | |
287 """ | |
288 self.SetupPerfMonitoringIfNeeded(test) | |
289 self._SetupIndividualTestTimeoutScale(test) | |
290 self.tool.SetupEnvironment() | |
291 | |
292 # Make sure the forwarder is still running. | |
293 self.RestartHttpServerForwarderIfNecessary() | |
294 | |
295 def _IsPerfTest(self, test): | |
296 """Determines whether a test is a performance test. | |
297 | |
298 Args: | |
299 test: The name of the test to be checked. | |
300 | |
301 Returns: | |
302 Whether the test is annotated as a performance test. | |
303 """ | |
304 return _PERF_TEST_ANNOTATION in self.test_apk.GetTestAnnotations(test) | |
305 | |
306 def SetupPerfMonitoringIfNeeded(self, test): | |
307 """Sets up performance monitoring if the specified test requires it. | |
308 | |
309 Args: | |
310 test: The name of the test to be run. | |
311 """ | |
312 if not self._IsPerfTest(test): | |
313 return | |
314 self.adb.Adb().SendCommand('shell rm ' + | |
315 TestRunner._DEVICE_PERF_OUTPUT_SEARCH_PREFIX) | |
316 self.adb.StartMonitoringLogcat() | |
317 | |
318 def TestTeardown(self, test, test_result): | |
319 """Cleans up the test harness after running a particular test. | |
320 | |
321 Depending on the options of this TestRunner this might handle coverage | |
322 tracking or performance tracking. This method will only be called if the | |
323 test passed. | |
324 | |
325 Args: | |
326 test: The name of the test that was just run. | |
327 test_result: result for this test. | |
328 """ | |
329 | |
330 self.tool.CleanUpEnvironment() | |
331 | |
332 # The logic below relies on the test passing. | |
333 if not test_result or test_result.GetStatusCode(): | |
334 return | |
335 | |
336 self.TearDownPerfMonitoring(test) | |
337 self.SaveCoverageData(test) | |
338 | |
339 def TearDownPerfMonitoring(self, test): | |
340 """Cleans up performance monitoring if the specified test required it. | |
341 | |
342 Args: | |
343 test: The name of the test that was just run. | |
344 Raises: | |
345 FatalTestException: if there's anything wrong with the perf data. | |
346 """ | |
347 if not self._IsPerfTest(test): | |
348 return | |
349 raw_test_name = test.split('#')[1] | |
350 | |
351 # Wait and grab annotation data so we can figure out which traces to parse | |
352 regex = self.adb.WaitForLogMatch(re.compile('\*\*PERFANNOTATION\(' + | |
353 raw_test_name + | |
354 '\)\:(.*)'), None) | |
355 | |
356 # If the test is set to run on a specific device type only (IE: only | |
357 # tablet or phone) and it is being run on the wrong device, the test | |
358 # just quits and does not do anything. The java test harness will still | |
359 # print the appropriate annotation for us, but will add --NORUN-- for | |
360 # us so we know to ignore the results. | |
361 # The --NORUN-- tag is managed by MainActivityTestBase.java | |
362 if regex.group(1) != '--NORUN--': | |
363 | |
364 # Obtain the relevant perf data. The data is dumped to a | |
365 # JSON formatted file. | |
366 json_string = self.adb.GetFileContents( | |
367 '/data/data/com.google.android.apps.chrome/files/PerfTestData.txt') | |
368 | |
369 if json_string: | |
370 json_string = '\n'.join(json_string) | |
371 else: | |
372 raise FatalTestException('Perf file does not exist or is empty') | |
373 | |
374 if self.save_perf_json: | |
375 json_local_file = '/tmp/chromium-android-perf-json-' + raw_test_name | |
376 with open(json_local_file, 'w') as f: | |
377 f.write(json_string) | |
378 logging.info('Saving Perf UI JSON from test ' + | |
379 test + ' to ' + json_local_file) | |
380 | |
381 raw_perf_data = regex.group(1).split(';') | |
382 | |
383 for raw_perf_set in raw_perf_data: | |
384 if raw_perf_set: | |
385 perf_set = raw_perf_set.split(',') | |
386 if len(perf_set) != 3: | |
387 raise FatalTestException('Unexpected number of tokens in ' | |
388 'perf annotation string: ' + raw_perf_set) | |
389 | |
390 # Process the performance data | |
391 result = GetAverageRunInfoFromJSONString(json_string, perf_set[0]) | |
392 | |
393 PrintPerfResult(perf_set[1], perf_set[2], | |
394 [result['average']], result['units']) | |
395 | |
396 def _SetupIndividualTestTimeoutScale(self, test): | |
397 timeout_scale = self._GetIndividualTestTimeoutScale(test) | |
398 valgrind_tools.SetChromeTimeoutScale(self.adb, timeout_scale) | |
399 | |
400 def _GetIndividualTestTimeoutScale(self, test): | |
401 """Returns the timeout scale for the given |test|.""" | |
402 annotations = self.apks[0].GetTestAnnotations(test) | |
403 timeout_scale = 1 | |
404 if 'TimeoutScale' in annotations: | |
405 for annotation in annotations: | |
406 scale_match = re.match('TimeoutScale:([0-9]+)', annotation) | |
407 if scale_match: | |
408 timeout_scale = int(scale_match.group(1)) | |
409 if self.wait_for_debugger: | |
410 timeout_scale *= 100 | |
411 return timeout_scale | |
412 | |
413 def _GetIndividualTestTimeoutSecs(self, test): | |
414 """Returns the timeout in seconds for the given |test|.""" | |
415 annotations = self.apks[0].GetTestAnnotations(test) | |
416 if 'Manual' in annotations: | |
417 return 600 * 60 | |
418 if 'External' in annotations: | |
419 return 10 * 60 | |
420 if 'LargeTest' in annotations or _PERF_TEST_ANNOTATION in annotations: | |
421 return 5 * 60 | |
422 if 'MediumTest' in annotations: | |
423 return 3 * 60 | |
424 return 1 * 60 | |
425 | |
426 def RunTests(self): | |
427 """Runs the tests, generating the coverage if needed. | |
428 | |
429 Returns: | |
430 A TestResults object. | |
431 """ | |
432 instrumentation_path = (self.instrumentation_class_path + | |
433 '/android.test.InstrumentationTestRunner') | |
434 instrumentation_args = self._GetInstrumentationArgs() | |
435 for test in self._GetTestsIter(): | |
436 test_result = None | |
437 start_date_ms = None | |
438 try: | |
439 self.TestSetup(test) | |
440 start_date_ms = int(time.time()) * 1000 | |
441 args_with_filter = dict(instrumentation_args) | |
442 args_with_filter['class'] = test | |
443 # |test_results| is a list that should contain | |
444 # a single TestResult object. | |
445 logging.warn(args_with_filter) | |
446 (test_results, _) = self.adb.Adb().StartInstrumentation( | |
447 instrumentation_path=instrumentation_path, | |
448 instrumentation_args=args_with_filter, | |
449 timeout_time=(self._GetIndividualTestTimeoutSecs(test) * | |
450 self._GetIndividualTestTimeoutScale(test) * | |
451 self.tool.GetTimeoutScale())) | |
452 duration_ms = int(time.time()) * 1000 - start_date_ms | |
453 assert len(test_results) == 1 | |
454 test_result = test_results[0] | |
455 status_code = test_result.GetStatusCode() | |
456 if status_code: | |
457 log = test_result.GetFailureReason() | |
458 if not log: | |
459 log = 'No information.' | |
460 if self.screenshot_failures or log.find('INJECT_EVENTS perm') >= 0: | |
461 self._TakeScreenshot(test) | |
462 self.test_results.failed += [SingleTestResult(test, start_date_ms, | |
463 duration_ms, log)] | |
464 else: | |
465 result = [SingleTestResult(test, start_date_ms, duration_ms)] | |
466 self.test_results.ok += result | |
467 # Catch exceptions thrown by StartInstrumentation(). | |
468 # See ../../third_party/android/testrunner/adb_interface.py | |
469 except (errors.WaitForResponseTimedOutError, | |
470 errors.DeviceUnresponsiveError, | |
471 errors.InstrumentationError), e: | |
472 if start_date_ms: | |
473 duration_ms = int(time.time()) * 1000 - start_date_ms | |
474 else: | |
475 start_date_ms = int(time.time()) * 1000 | |
476 duration_ms = 0 | |
477 message = str(e) | |
478 if not message: | |
479 message = 'No information.' | |
480 self.test_results.crashed += [SingleTestResult(test, start_date_ms, | |
481 duration_ms, | |
482 message)] | |
483 test_result = None | |
484 self.TestTeardown(test, test_result) | |
485 return self.test_results | |
486 | |
487 | |
488 class TestSharder(BaseTestSharder): | |
489 """Responsible for sharding the tests on the connected devices.""" | |
490 | |
491 def __init__(self, attached_devices, options, tests, apks): | |
492 BaseTestSharder.__init__(self, attached_devices, options.build_type) | |
493 self.options = options | |
494 self.tests = tests | |
495 self.apks = apks | |
496 | |
497 def SetupSharding(self, tests): | |
498 """Called before starting the shards.""" | |
499 SetTestsContainer(sharded_tests_queue.ShardedTestsQueue( | |
500 len(self.attached_devices), tests)) | |
501 | |
502 def CreateShardedTestRunner(self, device, index): | |
503 """Creates a sharded test runner. | |
504 | |
505 Args: | |
506 device: Device serial where this shard will run. | |
507 index: Index of this device in the pool. | |
508 | |
509 Returns: | |
510 A TestRunner object. | |
511 """ | |
512 return TestRunner(self.options, device, None, False, index, self.apks, []) | |
513 | |
514 | |
515 def DispatchJavaTests(options, apks): | |
516 """Dispatches Java tests onto connected device(s). | |
517 | |
518 If possible, this method will attempt to shard the tests to | |
519 all connected devices. Otherwise, dispatch and run tests on one device. | |
520 | |
521 Args: | |
522 options: Command line options. | |
523 apks: list of APKs to use. | |
524 | |
525 Returns: | |
526 A TestResults object holding the results of the Java tests. | |
527 | |
528 Raises: | |
529 FatalTestException: when there's no attached the devices. | |
530 """ | |
531 test_apk = apks[0] | |
532 # The default annotation for tests which do not have any sizes annotation. | |
533 default_size_annotation = 'SmallTest' | |
534 | |
535 def _GetTestsMissingAnnotation(test_apk): | |
536 test_size_annotations = frozenset(['Smoke', 'SmallTest', 'MediumTest', | |
537 'LargeTest', 'EnormousTest', 'FlakyTest', | |
538 'DisabledTest', 'Manual', 'PerfTest']) | |
539 tests_missing_annotations = [] | |
540 for test_method in test_apk.GetTestMethods(): | |
541 annotations = frozenset(test_apk.GetTestAnnotations(test_method)) | |
542 if (annotations.isdisjoint(test_size_annotations) and | |
543 not apk_info.ApkInfo.IsPythonDrivenTest(test_method)): | |
544 tests_missing_annotations.append(test_method) | |
545 return sorted(tests_missing_annotations) | |
546 | |
547 if options.annotation: | |
548 available_tests = test_apk.GetAnnotatedTests(options.annotation) | |
549 if options.annotation.count(default_size_annotation) > 0: | |
550 tests_missing_annotations = _GetTestsMissingAnnotation(test_apk) | |
551 if tests_missing_annotations: | |
552 logging.warning('The following tests do not contain any annotation. ' | |
553 'Assuming "%s":\n%s', | |
554 default_size_annotation, | |
555 '\n'.join(tests_missing_annotations)) | |
556 available_tests += tests_missing_annotations | |
557 else: | |
558 available_tests = [m for m in test_apk.GetTestMethods() | |
559 if not apk_info.ApkInfo.IsPythonDrivenTest(m)] | |
560 coverage = os.environ.get('EMMA_INSTRUMENT') == 'true' | |
561 | |
562 tests = [] | |
563 if options.test_filter: | |
564 # |available_tests| are in adb instrument format: package.path.class#test. | |
565 filter_without_hash = options.test_filter.replace('#', '.') | |
566 tests = [t for t in available_tests | |
567 if filter_without_hash in t.replace('#', '.')] | |
568 else: | |
569 tests = available_tests | |
570 | |
571 if not tests: | |
572 logging.warning('No Java tests to run with current args.') | |
573 return TestResults() | |
574 | |
575 tests *= options.number_of_runs | |
576 | |
577 attached_devices = android_commands.GetAttachedDevices() | |
578 test_results = TestResults() | |
579 | |
580 if not attached_devices: | |
581 raise FatalTestException('You have no devices attached or visible!') | |
582 if options.device: | |
583 attached_devices = [options.device] | |
584 | |
585 logging.info('Will run: %s', str(tests)) | |
586 | |
587 if len(attached_devices) > 1 and (coverage or options.wait_for_debugger): | |
588 logging.warning('Coverage / debugger can not be sharded, ' | |
589 'using first available device') | |
590 attached_devices = attached_devices[:1] | |
591 sharder = TestSharder(attached_devices, options, tests, apks) | |
592 test_results = sharder.RunShardedTests() | |
593 return test_results | |
OLD | NEW |