OLD | NEW |
| (Empty) |
1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
2 # Use of this source code is governed by a BSD-style license that can be | |
3 # found in the LICENSE file. | |
4 | |
5 """Runs the Java tests. See more information on run_instrumentation_tests.py.""" | |
6 | |
7 import fnmatch | |
8 import logging | |
9 import os | |
10 import re | |
11 import shutil | |
12 import sys | |
13 import time | |
14 | |
15 from pylib import android_commands | |
16 from pylib import cmd_helper | |
17 from pylib import constants | |
18 from pylib import valgrind_tools | |
19 from pylib.android_commands import errors | |
20 from pylib.base import sharded_tests_queue | |
21 from pylib.base.base_test_runner import BaseTestRunner | |
22 from pylib.base.base_test_sharder import BaseTestSharder, SetTestsContainer | |
23 from pylib.base.test_result import SingleTestResult, TestResults | |
24 from pylib.forwarder import Forwarder | |
25 from pylib.json_perf_parser import GetAverageRunInfoFromJSONString | |
26 from pylib.perf_tests_helper import PrintPerfResult | |
27 | |
28 import apk_info | |
29 | |
30 | |
31 _PERF_TEST_ANNOTATION = 'PerfTest' | |
32 | |
33 | |
34 class FatalTestException(Exception): | |
35 """A fatal test exception.""" | |
36 pass | |
37 | |
38 | |
39 def _TestNameToExpectation(test_name): | |
40 # A test name is a Package.Path.Class#testName; convert to what we use in | |
41 # the expectation file. | |
42 return '.'.join(test_name.replace('#', '.').split('.')[-2:]) | |
43 | |
44 | |
45 def FilterTests(test_names, pattern_list, inclusive): | |
46 """Filters |test_names| using a list of patterns. | |
47 | |
48 Args: | |
49 test_names: A list of test names. | |
50 pattern_list: A list of patterns. | |
51 inclusive: If True, returns the tests that match any pattern. if False, | |
52 returns the tests that do not match any pattern. | |
53 Returns: | |
54 A list of test names. | |
55 """ | |
56 ret = [] | |
57 for t in test_names: | |
58 has_match = False | |
59 for pattern in pattern_list: | |
60 has_match = has_match or fnmatch.fnmatch(_TestNameToExpectation(t), | |
61 pattern) | |
62 if has_match == inclusive: | |
63 ret += [t] | |
64 return ret | |
65 | |
66 | |
67 class TestRunner(BaseTestRunner): | |
68 """Responsible for running a series of tests connected to a single device.""" | |
69 | |
70 _DEVICE_DATA_DIR = 'chrome/test/data' | |
71 _EMMA_JAR = os.path.join(os.environ.get('ANDROID_BUILD_TOP', ''), | |
72 'external/emma/lib/emma.jar') | |
73 _COVERAGE_MERGED_FILENAME = 'unittest_coverage.es' | |
74 _COVERAGE_WEB_ROOT_DIR = os.environ.get('EMMA_WEB_ROOTDIR') | |
75 _COVERAGE_FILENAME = 'coverage.ec' | |
76 _COVERAGE_RESULT_PATH = ('/data/data/com.google.android.apps.chrome/files/' + | |
77 _COVERAGE_FILENAME) | |
78 _COVERAGE_META_INFO_PATH = os.path.join(os.environ.get('ANDROID_BUILD_TOP', | |
79 ''), | |
80 'out/target/common/obj/APPS', | |
81 'Chrome_intermediates/coverage.em') | |
82 _HOSTMACHINE_PERF_OUTPUT_FILE = '/tmp/chrome-profile' | |
83 _DEVICE_PERF_OUTPUT_SEARCH_PREFIX = (constants.DEVICE_PERF_OUTPUT_DIR + | |
84 '/chrome-profile*') | |
85 _DEVICE_HAS_TEST_FILES = {} | |
86 | |
87 def __init__(self, options, device, tests_iter, coverage, shard_index, apks, | |
88 ports_to_forward): | |
89 """Create a new TestRunner. | |
90 | |
91 Args: | |
92 options: An options object with the following required attributes: | |
93 - build_type: 'Release' or 'Debug'. | |
94 - install_apk: Re-installs the apk if opted. | |
95 - save_perf_json: Whether or not to save the JSON file from UI perf | |
96 tests. | |
97 - screenshot_failures: Take a screenshot for a test failure | |
98 - tool: Name of the Valgrind tool. | |
99 - wait_for_debugger: blocks until the debugger is connected. | |
100 - disable_assertions: Whether to disable java assertions on the device. | |
101 device: Attached android device. | |
102 tests_iter: A list of tests to be run. | |
103 coverage: Collects coverage information if opted. | |
104 shard_index: shard # for this TestRunner, used to create unique port | |
105 numbers. | |
106 apks: A list of ApkInfo objects need to be installed. The first element | |
107 should be the tests apk, the rests could be the apks used in test. | |
108 The default is ChromeTest.apk. | |
109 ports_to_forward: A list of port numbers for which to set up forwarders. | |
110 Can be optionally requested by a test case. | |
111 Raises: | |
112 FatalTestException: if coverage metadata is not available. | |
113 """ | |
114 BaseTestRunner.__init__( | |
115 self, device, options.tool, shard_index, options.build_type) | |
116 | |
117 if not apks: | |
118 apks = [apk_info.ApkInfo(options.test_apk_path, | |
119 options.test_apk_jar_path)] | |
120 | |
121 self.build_type = options.build_type | |
122 self.install_apk = options.install_apk | |
123 self.test_data = options.test_data | |
124 self.save_perf_json = options.save_perf_json | |
125 self.screenshot_failures = options.screenshot_failures | |
126 self.wait_for_debugger = options.wait_for_debugger | |
127 self.disable_assertions = options.disable_assertions | |
128 | |
129 self.tests_iter = tests_iter | |
130 self.coverage = coverage | |
131 self.apks = apks | |
132 self.test_apk = apks[0] | |
133 self.instrumentation_class_path = self.test_apk.GetPackageName() | |
134 self.ports_to_forward = ports_to_forward | |
135 | |
136 self.test_results = TestResults() | |
137 self.forwarder = None | |
138 | |
139 if self.coverage: | |
140 if os.path.exists(TestRunner._COVERAGE_MERGED_FILENAME): | |
141 os.remove(TestRunner._COVERAGE_MERGED_FILENAME) | |
142 if not os.path.exists(TestRunner._COVERAGE_META_INFO_PATH): | |
143 raise FatalTestException('FATAL ERROR in ' + sys.argv[0] + | |
144 ' : Coverage meta info [' + | |
145 TestRunner._COVERAGE_META_INFO_PATH + | |
146 '] does not exist.') | |
147 if (not TestRunner._COVERAGE_WEB_ROOT_DIR or | |
148 not os.path.exists(TestRunner._COVERAGE_WEB_ROOT_DIR)): | |
149 raise FatalTestException('FATAL ERROR in ' + sys.argv[0] + | |
150 ' : Path specified in $EMMA_WEB_ROOTDIR [' + | |
151 TestRunner._COVERAGE_WEB_ROOT_DIR + | |
152 '] does not exist.') | |
153 | |
154 def _GetTestsIter(self): | |
155 if not self.tests_iter: | |
156 # multiprocessing.Queue can't be pickled across processes if we have it as | |
157 # a member set during constructor. Grab one here instead. | |
158 self.tests_iter = (BaseTestSharder.tests_container) | |
159 assert self.tests_iter | |
160 return self.tests_iter | |
161 | |
162 def CopyTestFilesOnce(self): | |
163 """Pushes the test data files to the device. Installs the apk if opted.""" | |
164 if TestRunner._DEVICE_HAS_TEST_FILES.get(self.device, False): | |
165 logging.warning('Already copied test files to device %s, skipping.', | |
166 self.device) | |
167 return | |
168 for dest_host_pair in self.test_data: | |
169 dst_src = dest_host_pair.split(':',1) | |
170 dst_layer = dst_src[0] | |
171 host_src = dst_src[1] | |
172 host_test_files_path = constants.CHROME_DIR + '/' + host_src | |
173 if os.path.exists(host_test_files_path): | |
174 self.adb.PushIfNeeded(host_test_files_path, | |
175 self.adb.GetExternalStorage() + '/' + | |
176 TestRunner._DEVICE_DATA_DIR + '/' + dst_layer) | |
177 if self.install_apk: | |
178 for apk in self.apks: | |
179 self.adb.ManagedInstall(apk.GetApkPath(), | |
180 package_name=apk.GetPackageName()) | |
181 self.tool.CopyFiles() | |
182 TestRunner._DEVICE_HAS_TEST_FILES[self.device] = True | |
183 | |
184 def SaveCoverageData(self, test): | |
185 """Saves the Emma coverage data before it's overwritten by the next test. | |
186 | |
187 Args: | |
188 test: the test whose coverage data is collected. | |
189 """ | |
190 if not self.coverage: | |
191 return | |
192 if not self.adb.Adb().Pull(TestRunner._COVERAGE_RESULT_PATH, | |
193 constants.CHROME_DIR): | |
194 logging.error('ERROR: Unable to find file ' + | |
195 TestRunner._COVERAGE_RESULT_PATH + | |
196 ' on the device for test ' + test) | |
197 pulled_coverage_file = os.path.join(constants.CHROME_DIR, | |
198 TestRunner._COVERAGE_FILENAME) | |
199 if os.path.exists(TestRunner._COVERAGE_MERGED_FILENAME): | |
200 cmd = ['java', '-classpath', TestRunner._EMMA_JAR, 'emma', 'merge', | |
201 '-in', pulled_coverage_file, | |
202 '-in', TestRunner._COVERAGE_MERGED_FILENAME, | |
203 '-out', TestRunner._COVERAGE_MERGED_FILENAME] | |
204 cmd_helper.RunCmd(cmd) | |
205 else: | |
206 shutil.copy(pulled_coverage_file, | |
207 TestRunner._COVERAGE_MERGED_FILENAME) | |
208 os.remove(pulled_coverage_file) | |
209 | |
210 def GenerateCoverageReportIfNeeded(self): | |
211 """Uses the Emma to generate a coverage report and a html page.""" | |
212 if not self.coverage: | |
213 return | |
214 cmd = ['java', '-classpath', TestRunner._EMMA_JAR, | |
215 'emma', 'report', '-r', 'html', | |
216 '-in', TestRunner._COVERAGE_MERGED_FILENAME, | |
217 '-in', TestRunner._COVERAGE_META_INFO_PATH] | |
218 cmd_helper.RunCmd(cmd) | |
219 new_dir = os.path.join(TestRunner._COVERAGE_WEB_ROOT_DIR, | |
220 time.strftime('Coverage_for_%Y_%m_%d_%a_%H:%M')) | |
221 shutil.copytree('coverage', new_dir) | |
222 | |
223 latest_dir = os.path.join(TestRunner._COVERAGE_WEB_ROOT_DIR, | |
224 'Latest_Coverage_Run') | |
225 if os.path.exists(latest_dir): | |
226 shutil.rmtree(latest_dir) | |
227 os.mkdir(latest_dir) | |
228 webserver_new_index = os.path.join(new_dir, 'index.html') | |
229 webserver_new_files = os.path.join(new_dir, '_files') | |
230 webserver_latest_index = os.path.join(latest_dir, 'index.html') | |
231 webserver_latest_files = os.path.join(latest_dir, '_files') | |
232 # Setup new softlinks to last result. | |
233 os.symlink(webserver_new_index, webserver_latest_index) | |
234 os.symlink(webserver_new_files, webserver_latest_files) | |
235 cmd_helper.RunCmd(['chmod', '755', '-R', latest_dir, new_dir]) | |
236 | |
237 def _GetInstrumentationArgs(self): | |
238 ret = {} | |
239 if self.coverage: | |
240 ret['coverage'] = 'true' | |
241 if self.wait_for_debugger: | |
242 ret['debug'] = 'true' | |
243 return ret | |
244 | |
245 def _TakeScreenshot(self, test): | |
246 """Takes a screenshot from the device.""" | |
247 screenshot_name = os.path.join(constants.SCREENSHOTS_DIR, test + '.png') | |
248 logging.info('Taking screenshot named %s', screenshot_name) | |
249 self.adb.TakeScreenshot(screenshot_name) | |
250 | |
251 def SetUp(self): | |
252 """Sets up the test harness and device before all tests are run.""" | |
253 super(TestRunner, self).SetUp() | |
254 if not self.adb.IsRootEnabled(): | |
255 logging.warning('Unable to enable java asserts for %s, non rooted device', | |
256 self.device) | |
257 else: | |
258 if self.adb.SetJavaAssertsEnabled(enable=not self.disable_assertions): | |
259 self.adb.Reboot(full_reboot=False) | |
260 | |
261 # We give different default value to launch HTTP server based on shard index | |
262 # because it may have race condition when multiple processes are trying to | |
263 # launch lighttpd with same port at same time. | |
264 http_server_ports = self.LaunchTestHttpServer( | |
265 os.path.join(constants.CHROME_DIR), | |
266 (constants.LIGHTTPD_RANDOM_PORT_FIRST + self.shard_index)) | |
267 if self.ports_to_forward: | |
268 port_pairs = [(port, port) for port in self.ports_to_forward] | |
269 # We need to remember which ports the HTTP server is using, since the | |
270 # forwarder will stomp on them otherwise. | |
271 port_pairs.append(http_server_ports) | |
272 self.forwarder = Forwarder(self.adb, self.build_type) | |
273 self.forwarder.Run(port_pairs, self.tool, '127.0.0.1') | |
274 self.CopyTestFilesOnce() | |
275 self.flags.AddFlags(['--enable-test-intents']) | |
276 | |
277 def TearDown(self): | |
278 """Cleans up the test harness and saves outstanding data from test run.""" | |
279 if self.forwarder: | |
280 self.forwarder.Close() | |
281 self.GenerateCoverageReportIfNeeded() | |
282 super(TestRunner, self).TearDown() | |
283 | |
284 def TestSetup(self, test): | |
285 """Sets up the test harness for running a particular test. | |
286 | |
287 Args: | |
288 test: The name of the test that will be run. | |
289 """ | |
290 self.SetupPerfMonitoringIfNeeded(test) | |
291 self._SetupIndividualTestTimeoutScale(test) | |
292 self.tool.SetupEnvironment() | |
293 | |
294 # Make sure the forwarder is still running. | |
295 self.RestartHttpServerForwarderIfNecessary() | |
296 | |
297 def _IsPerfTest(self, test): | |
298 """Determines whether a test is a performance test. | |
299 | |
300 Args: | |
301 test: The name of the test to be checked. | |
302 | |
303 Returns: | |
304 Whether the test is annotated as a performance test. | |
305 """ | |
306 return _PERF_TEST_ANNOTATION in self.test_apk.GetTestAnnotations(test) | |
307 | |
308 def SetupPerfMonitoringIfNeeded(self, test): | |
309 """Sets up performance monitoring if the specified test requires it. | |
310 | |
311 Args: | |
312 test: The name of the test to be run. | |
313 """ | |
314 if not self._IsPerfTest(test): | |
315 return | |
316 self.adb.Adb().SendCommand('shell rm ' + | |
317 TestRunner._DEVICE_PERF_OUTPUT_SEARCH_PREFIX) | |
318 self.adb.StartMonitoringLogcat() | |
319 | |
320 def TestTeardown(self, test, test_result): | |
321 """Cleans up the test harness after running a particular test. | |
322 | |
323 Depending on the options of this TestRunner this might handle coverage | |
324 tracking or performance tracking. This method will only be called if the | |
325 test passed. | |
326 | |
327 Args: | |
328 test: The name of the test that was just run. | |
329 test_result: result for this test. | |
330 """ | |
331 | |
332 self.tool.CleanUpEnvironment() | |
333 | |
334 # The logic below relies on the test passing. | |
335 if not test_result or test_result.GetStatusCode(): | |
336 return | |
337 | |
338 self.TearDownPerfMonitoring(test) | |
339 self.SaveCoverageData(test) | |
340 | |
341 def TearDownPerfMonitoring(self, test): | |
342 """Cleans up performance monitoring if the specified test required it. | |
343 | |
344 Args: | |
345 test: The name of the test that was just run. | |
346 Raises: | |
347 FatalTestException: if there's anything wrong with the perf data. | |
348 """ | |
349 if not self._IsPerfTest(test): | |
350 return | |
351 raw_test_name = test.split('#')[1] | |
352 | |
353 # Wait and grab annotation data so we can figure out which traces to parse | |
354 regex = self.adb.WaitForLogMatch(re.compile('\*\*PERFANNOTATION\(' + | |
355 raw_test_name + | |
356 '\)\:(.*)'), None) | |
357 | |
358 # If the test is set to run on a specific device type only (IE: only | |
359 # tablet or phone) and it is being run on the wrong device, the test | |
360 # just quits and does not do anything. The java test harness will still | |
361 # print the appropriate annotation for us, but will add --NORUN-- for | |
362 # us so we know to ignore the results. | |
363 # The --NORUN-- tag is managed by MainActivityTestBase.java | |
364 if regex.group(1) != '--NORUN--': | |
365 | |
366 # Obtain the relevant perf data. The data is dumped to a | |
367 # JSON formatted file. | |
368 json_string = self.adb.GetProtectedFileContents( | |
369 '/data/data/com.google.android.apps.chrome/files/PerfTestData.txt') | |
370 | |
371 if json_string: | |
372 json_string = '\n'.join(json_string) | |
373 else: | |
374 raise FatalTestException('Perf file does not exist or is empty') | |
375 | |
376 if self.save_perf_json: | |
377 json_local_file = '/tmp/chromium-android-perf-json-' + raw_test_name | |
378 with open(json_local_file, 'w') as f: | |
379 f.write(json_string) | |
380 logging.info('Saving Perf UI JSON from test ' + | |
381 test + ' to ' + json_local_file) | |
382 | |
383 raw_perf_data = regex.group(1).split(';') | |
384 | |
385 for raw_perf_set in raw_perf_data: | |
386 if raw_perf_set: | |
387 perf_set = raw_perf_set.split(',') | |
388 if len(perf_set) != 3: | |
389 raise FatalTestException('Unexpected number of tokens in ' | |
390 'perf annotation string: ' + raw_perf_set) | |
391 | |
392 # Process the performance data | |
393 result = GetAverageRunInfoFromJSONString(json_string, perf_set[0]) | |
394 | |
395 PrintPerfResult(perf_set[1], perf_set[2], | |
396 [result['average']], result['units']) | |
397 | |
398 def _SetupIndividualTestTimeoutScale(self, test): | |
399 timeout_scale = self._GetIndividualTestTimeoutScale(test) | |
400 valgrind_tools.SetChromeTimeoutScale(self.adb, timeout_scale) | |
401 | |
402 def _GetIndividualTestTimeoutScale(self, test): | |
403 """Returns the timeout scale for the given |test|.""" | |
404 annotations = self.apks[0].GetTestAnnotations(test) | |
405 timeout_scale = 1 | |
406 if 'TimeoutScale' in annotations: | |
407 for annotation in annotations: | |
408 scale_match = re.match('TimeoutScale:([0-9]+)', annotation) | |
409 if scale_match: | |
410 timeout_scale = int(scale_match.group(1)) | |
411 if self.wait_for_debugger: | |
412 timeout_scale *= 100 | |
413 return timeout_scale | |
414 | |
415 def _GetIndividualTestTimeoutSecs(self, test): | |
416 """Returns the timeout in seconds for the given |test|.""" | |
417 annotations = self.apks[0].GetTestAnnotations(test) | |
418 if 'Manual' in annotations: | |
419 return 600 * 60 | |
420 if 'External' in annotations: | |
421 return 10 * 60 | |
422 if 'LargeTest' in annotations or _PERF_TEST_ANNOTATION in annotations: | |
423 return 5 * 60 | |
424 if 'MediumTest' in annotations: | |
425 return 3 * 60 | |
426 return 1 * 60 | |
427 | |
428 def RunTests(self): | |
429 """Runs the tests, generating the coverage if needed. | |
430 | |
431 Returns: | |
432 A TestResults object. | |
433 """ | |
434 instrumentation_path = (self.instrumentation_class_path + | |
435 '/android.test.InstrumentationTestRunner') | |
436 instrumentation_args = self._GetInstrumentationArgs() | |
437 for test in self._GetTestsIter(): | |
438 test_result = None | |
439 start_date_ms = None | |
440 try: | |
441 self.TestSetup(test) | |
442 start_date_ms = int(time.time()) * 1000 | |
443 args_with_filter = dict(instrumentation_args) | |
444 args_with_filter['class'] = test | |
445 # |test_results| is a list that should contain | |
446 # a single TestResult object. | |
447 logging.warn(args_with_filter) | |
448 (test_results, _) = self.adb.Adb().StartInstrumentation( | |
449 instrumentation_path=instrumentation_path, | |
450 instrumentation_args=args_with_filter, | |
451 timeout_time=(self._GetIndividualTestTimeoutSecs(test) * | |
452 self._GetIndividualTestTimeoutScale(test) * | |
453 self.tool.GetTimeoutScale())) | |
454 duration_ms = int(time.time()) * 1000 - start_date_ms | |
455 assert len(test_results) == 1 | |
456 test_result = test_results[0] | |
457 status_code = test_result.GetStatusCode() | |
458 if status_code: | |
459 log = test_result.GetFailureReason() | |
460 if not log: | |
461 log = 'No information.' | |
462 if self.screenshot_failures or log.find('INJECT_EVENTS perm') >= 0: | |
463 self._TakeScreenshot(test) | |
464 self.test_results.failed += [SingleTestResult(test, start_date_ms, | |
465 duration_ms, log)] | |
466 else: | |
467 result = [SingleTestResult(test, start_date_ms, duration_ms)] | |
468 self.test_results.ok += result | |
469 # Catch exceptions thrown by StartInstrumentation(). | |
470 # See ../../third_party/android/testrunner/adb_interface.py | |
471 except (errors.WaitForResponseTimedOutError, | |
472 errors.DeviceUnresponsiveError, | |
473 errors.InstrumentationError), e: | |
474 if start_date_ms: | |
475 duration_ms = int(time.time()) * 1000 - start_date_ms | |
476 else: | |
477 start_date_ms = int(time.time()) * 1000 | |
478 duration_ms = 0 | |
479 message = str(e) | |
480 if not message: | |
481 message = 'No information.' | |
482 self.test_results.crashed += [SingleTestResult(test, start_date_ms, | |
483 duration_ms, | |
484 message)] | |
485 test_result = None | |
486 self.TestTeardown(test, test_result) | |
487 return self.test_results | |
488 | |
489 | |
490 class TestSharder(BaseTestSharder): | |
491 """Responsible for sharding the tests on the connected devices.""" | |
492 | |
493 def __init__(self, attached_devices, options, tests, apks): | |
494 BaseTestSharder.__init__(self, attached_devices, options.build_type) | |
495 self.options = options | |
496 self.tests = tests | |
497 self.apks = apks | |
498 | |
499 def SetupSharding(self, tests): | |
500 """Called before starting the shards.""" | |
501 SetTestsContainer(sharded_tests_queue.ShardedTestsQueue( | |
502 len(self.attached_devices), tests)) | |
503 | |
504 def CreateShardedTestRunner(self, device, index): | |
505 """Creates a sharded test runner. | |
506 | |
507 Args: | |
508 device: Device serial where this shard will run. | |
509 index: Index of this device in the pool. | |
510 | |
511 Returns: | |
512 A TestRunner object. | |
513 """ | |
514 return TestRunner(self.options, device, None, False, index, self.apks, []) | |
515 | |
516 | |
517 def DispatchJavaTests(options, apks): | |
518 """Dispatches Java tests onto connected device(s). | |
519 | |
520 If possible, this method will attempt to shard the tests to | |
521 all connected devices. Otherwise, dispatch and run tests on one device. | |
522 | |
523 Args: | |
524 options: Command line options. | |
525 apks: list of APKs to use. | |
526 | |
527 Returns: | |
528 A TestResults object holding the results of the Java tests. | |
529 | |
530 Raises: | |
531 FatalTestException: when there's no attached the devices. | |
532 """ | |
533 test_apk = apks[0] | |
534 # The default annotation for tests which do not have any sizes annotation. | |
535 default_size_annotation = 'SmallTest' | |
536 | |
537 def _GetTestsMissingAnnotation(test_apk): | |
538 test_size_annotations = frozenset(['Smoke', 'SmallTest', 'MediumTest', | |
539 'LargeTest', 'EnormousTest', 'FlakyTest', | |
540 'DisabledTest', 'Manual', 'PerfTest']) | |
541 tests_missing_annotations = [] | |
542 for test_method in test_apk.GetTestMethods(): | |
543 annotations = frozenset(test_apk.GetTestAnnotations(test_method)) | |
544 if (annotations.isdisjoint(test_size_annotations) and | |
545 not apk_info.ApkInfo.IsPythonDrivenTest(test_method)): | |
546 tests_missing_annotations.append(test_method) | |
547 return sorted(tests_missing_annotations) | |
548 | |
549 if options.annotation: | |
550 available_tests = test_apk.GetAnnotatedTests(options.annotation) | |
551 if options.annotation.count(default_size_annotation) > 0: | |
552 tests_missing_annotations = _GetTestsMissingAnnotation(test_apk) | |
553 if tests_missing_annotations: | |
554 logging.warning('The following tests do not contain any annotation. ' | |
555 'Assuming "%s":\n%s', | |
556 default_size_annotation, | |
557 '\n'.join(tests_missing_annotations)) | |
558 available_tests += tests_missing_annotations | |
559 else: | |
560 available_tests = [m for m in test_apk.GetTestMethods() | |
561 if not apk_info.ApkInfo.IsPythonDrivenTest(m)] | |
562 coverage = os.environ.get('EMMA_INSTRUMENT') == 'true' | |
563 | |
564 tests = [] | |
565 if options.test_filter: | |
566 # |available_tests| are in adb instrument format: package.path.class#test. | |
567 filter_without_hash = options.test_filter.replace('#', '.') | |
568 tests = [t for t in available_tests | |
569 if filter_without_hash in t.replace('#', '.')] | |
570 else: | |
571 tests = available_tests | |
572 | |
573 if not tests: | |
574 logging.warning('No Java tests to run with current args.') | |
575 return TestResults() | |
576 | |
577 tests *= options.number_of_runs | |
578 | |
579 attached_devices = android_commands.GetAttachedDevices() | |
580 test_results = TestResults() | |
581 | |
582 if not attached_devices: | |
583 raise FatalTestException('You have no devices attached or visible!') | |
584 if options.device: | |
585 attached_devices = [options.device] | |
586 | |
587 logging.info('Will run: %s', str(tests)) | |
588 | |
589 if len(attached_devices) > 1 and (coverage or options.wait_for_debugger): | |
590 logging.warning('Coverage / debugger can not be sharded, ' | |
591 'using first available device') | |
592 attached_devices = attached_devices[:1] | |
593 sharder = TestSharder(attached_devices, options, tests, apks) | |
594 test_results = sharder.RunShardedTests() | |
595 return test_results | |
OLD | NEW |