OLD | NEW |
1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 """Class for running instrumentation tests on a single device.""" | 5 """Class for running instrumentation tests on a single device.""" |
6 | 6 |
7 import logging | 7 import logging |
8 import os | 8 import os |
9 import re | 9 import re |
10 import sys | 10 import sys |
11 import time | 11 import time |
12 | 12 |
13 from pylib import android_commands | |
14 from pylib import constants | 13 from pylib import constants |
15 from pylib import flag_changer | 14 from pylib import flag_changer |
16 from pylib import valgrind_tools | 15 from pylib import valgrind_tools |
17 from pylib.base import base_test_result | 16 from pylib.base import base_test_result |
18 from pylib.base import base_test_runner | 17 from pylib.base import base_test_runner |
19 from pylib.device import device_errors | 18 from pylib.device import device_errors |
20 from pylib.instrumentation import json_perf_parser | 19 from pylib.instrumentation import json_perf_parser |
21 from pylib.instrumentation import test_result | 20 from pylib.instrumentation import test_result |
22 | 21 |
23 sys.path.append(os.path.join(sys.path[0], | 22 sys.path.append(os.path.join(constants.DIR_SOURCE_ROOT, 'build', 'util', 'lib', |
24 os.pardir, os.pardir, 'build', 'util', 'lib', | |
25 'common')) | 23 'common')) |
26 import perf_tests_results_helper # pylint: disable=F0401 | 24 import perf_tests_results_helper # pylint: disable=F0401 |
27 | 25 |
28 | 26 |
29 _PERF_TEST_ANNOTATION = 'PerfTest' | 27 _PERF_TEST_ANNOTATION = 'PerfTest' |
30 | 28 |
31 | 29 |
32 def _GetDataFilesForTestSuite(suite_basename): | 30 def _GetDataFilesForTestSuite(suite_basename): |
33 """Returns a list of data files/dirs needed by the test suite. | 31 """Returns a list of data files/dirs needed by the test suite. |
34 | 32 |
(...skipping 177 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
212 | 210 |
213 Args: | 211 Args: |
214 test: The name of the test to be run. | 212 test: The name of the test to be run. |
215 """ | 213 """ |
216 if not self._IsPerfTest(test): | 214 if not self._IsPerfTest(test): |
217 return | 215 return |
218 self.device.old_interface.Adb().SendCommand( | 216 self.device.old_interface.Adb().SendCommand( |
219 'shell rm ' + TestRunner._DEVICE_PERF_OUTPUT_SEARCH_PREFIX) | 217 'shell rm ' + TestRunner._DEVICE_PERF_OUTPUT_SEARCH_PREFIX) |
220 self.device.old_interface.StartMonitoringLogcat() | 218 self.device.old_interface.StartMonitoringLogcat() |
221 | 219 |
222 def TestTeardown(self, test, raw_result): | 220 def TestTeardown(self, test, result): |
223 """Cleans up the test harness after running a particular test. | 221 """Cleans up the test harness after running a particular test. |
224 | 222 |
225 Depending on the options of this TestRunner this might handle performance | 223 Depending on the options of this TestRunner this might handle performance |
226 tracking. This method will only be called if the test passed. | 224 tracking. This method will only be called if the test passed. |
227 | 225 |
228 Args: | 226 Args: |
229 test: The name of the test that was just run. | 227 test: The name of the test that was just run. |
230 raw_result: result for this test. | 228 result: result for this test. |
231 """ | 229 """ |
232 | 230 |
233 self.tool.CleanUpEnvironment() | 231 self.tool.CleanUpEnvironment() |
234 | 232 |
235 # The logic below relies on the test passing. | 233 # The logic below relies on the test passing. |
236 if not raw_result or raw_result.GetStatusCode(): | 234 if not result or not result.DidRunPass(): |
237 return | 235 return |
238 | 236 |
239 self.TearDownPerfMonitoring(test) | 237 self.TearDownPerfMonitoring(test) |
240 | 238 |
241 if self.coverage_dir: | 239 if self.coverage_dir: |
242 self.device.PullFile( | 240 self.device.PullFile( |
243 self.coverage_device_file, self.coverage_host_file) | 241 self.coverage_device_file, self.coverage_host_file) |
244 self.device.RunShellCommand( | 242 self.device.RunShellCommand( |
245 'rm -f %s' % self.coverage_device_file) | 243 'rm -f %s' % self.coverage_device_file) |
246 | 244 |
(...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
344 def RunInstrumentationTest(self, test, test_package, instr_args, timeout): | 342 def RunInstrumentationTest(self, test, test_package, instr_args, timeout): |
345 """Runs a single instrumentation test. | 343 """Runs a single instrumentation test. |
346 | 344 |
347 Args: | 345 Args: |
348 test: Test class/method. | 346 test: Test class/method. |
349 test_package: Package name of test apk. | 347 test_package: Package name of test apk. |
350 instr_args: Extra key/value to pass to am instrument. | 348 instr_args: Extra key/value to pass to am instrument. |
351 timeout: Timeout time in seconds. | 349 timeout: Timeout time in seconds. |
352 | 350 |
353 Returns: | 351 Returns: |
354 An instance of am_instrument_parser.TestResult object. | 352 An instance of InstrumentationTestResult |
355 """ | 353 """ |
| 354 # Build the 'am instrument' command |
356 instrumentation_path = ( | 355 instrumentation_path = ( |
357 '%s/%s' % (test_package, self.options.test_runner)) | 356 '%s/%s' % (test_package, self.options.test_runner)) |
358 args_with_filter = dict(instr_args) | |
359 args_with_filter['class'] = test | |
360 logging.info(args_with_filter) | |
361 (raw_results, _) = self.device.old_interface.Adb().StartInstrumentation( | |
362 instrumentation_path=instrumentation_path, | |
363 instrumentation_args=args_with_filter, | |
364 timeout_time=timeout) | |
365 assert len(raw_results) == 1 | |
366 return raw_results[0] | |
367 | 357 |
| 358 cmd = ['am', 'instrument', '-r'] |
| 359 for k, v in instr_args.iteritems(): |
| 360 cmd.extend(['-e', k, "'%s'" % v]) |
| 361 cmd.extend(['-e', 'class', "'%s'" % test]) |
| 362 cmd.extend(['-w', instrumentation_path]) |
368 | 363 |
369 def _RunTest(self, test, timeout): | 364 time_ms = lambda: int(time.time() * 1000) |
| 365 |
| 366 # Run the test. |
| 367 start_ms = time_ms() |
370 try: | 368 try: |
371 return self.RunInstrumentationTest( | 369 instr_output = self.device.RunShellCommand( |
372 test, self.test_pkg.GetPackageName(), | 370 cmd, timeout=timeout, retries=0) |
373 self._GetInstrumentationArgs(), timeout) | 371 except device_errors.CommandTimeoutError: |
374 except (device_errors.CommandTimeoutError, | 372 return test_result.InstrumentationTestResult( |
375 # TODO(jbudorick) Remove this once the underlying implementations | 373 test, base_test_result.ResultType.TIMEOUT, start_ms, |
376 # for the above are switched or wrapped. | 374 time_ms() - start_ms) |
377 android_commands.errors.WaitForResponseTimedOutError): | 375 duration_ms = time_ms() - start_ms |
378 logging.info('Ran the test with timeout of %ds.' % timeout) | |
379 raise | |
380 | 376 |
381 #override | 377 # Parse the test output |
382 def RunTest(self, test): | 378 _, _, statuses = self._ParseAmInstrumentRawOutput(instr_output) |
383 raw_result = None | 379 return self._GenerateTestResult(test, statuses, start_ms, duration_ms) |
384 start_date_ms = None | 380 |
385 results = base_test_result.TestRunResults() | 381 @staticmethod |
386 timeout = (self._GetIndividualTestTimeoutSecs(test) * | 382 def _ParseAmInstrumentRawOutput(raw_output): |
387 self._GetIndividualTestTimeoutScale(test) * | 383 """Parses the output of an |am instrument -r| call. |
388 self.tool.GetTimeoutScale()) | 384 |
389 try: | 385 Args: |
390 self.TestSetup(test) | 386 raw_output: the output of an |am instrument -r| call as a list of lines |
391 start_date_ms = int(time.time()) * 1000 | 387 Returns: |
392 raw_result = self._RunTest(test, timeout) | 388 A 3-tuple containing: |
393 duration_ms = int(time.time()) * 1000 - start_date_ms | 389 - the instrumentation code as an integer |
394 status_code = raw_result.GetStatusCode() | 390 - the instrumentation result as a list of lines |
395 if status_code: | 391 - the instrumentation statuses received as a list of 2-tuples |
396 if self.options.screenshot_failures: | 392 containing: |
397 self._TakeScreenshot(test) | 393 - the status code as an integer |
398 log = raw_result.GetFailureReason() | 394 - the bundle dump as a dict mapping string keys to a list of |
399 if not log: | 395 strings, one for each line. |
400 log = 'No information.' | 396 """ |
| 397 INSTR_STATUS = 'INSTRUMENTATION_STATUS: ' |
| 398 INSTR_STATUS_CODE = 'INSTRUMENTATION_STATUS_CODE: ' |
| 399 INSTR_RESULT = 'INSTRUMENTATION_RESULT: ' |
| 400 INSTR_CODE = 'INSTRUMENTATION_CODE: ' |
| 401 |
| 402 last = None |
| 403 instr_code = None |
| 404 instr_result = [] |
| 405 instr_statuses = [] |
| 406 bundle = {} |
| 407 for line in raw_output: |
| 408 if line.startswith(INSTR_STATUS): |
| 409 instr_var = line[len(INSTR_STATUS):] |
| 410 if '=' in instr_var: |
| 411 k, v = instr_var.split('=', 1) |
| 412 bundle[k] = [v] |
| 413 last = INSTR_STATUS |
| 414 last_key = k |
| 415 else: |
| 416 logging.debug('Unknown "%s" line: %s' % (INSTR_STATUS, line)) |
| 417 |
| 418 elif line.startswith(INSTR_STATUS_CODE): |
| 419 instr_status = line[len(INSTR_STATUS_CODE):] |
| 420 instr_statuses.append((int(instr_status), bundle)) |
| 421 bundle = {} |
| 422 last = INSTR_STATUS_CODE |
| 423 |
| 424 elif line.startswith(INSTR_RESULT): |
| 425 instr_result.append(line[len(INSTR_RESULT):]) |
| 426 last = INSTR_RESULT |
| 427 |
| 428 elif line.startswith(INSTR_CODE): |
| 429 instr_code = int(line[len(INSTR_CODE):]) |
| 430 last = INSTR_CODE |
| 431 |
| 432 elif last == INSTR_STATUS: |
| 433 bundle[last_key].append(line) |
| 434 |
| 435 elif last == INSTR_RESULT: |
| 436 instr_result.append(line) |
| 437 |
| 438 return (instr_code, instr_result, instr_statuses) |
| 439 |
| 440 def _GenerateTestResult(self, test, instr_statuses, start_ms, duration_ms): |
| 441 """Generate the result of |test| from |instr_statuses|. |
| 442 |
| 443 Args: |
| 444 instr_statuses: A list of 2-tuples containing: |
| 445 - the status code as an integer |
| 446 - the bundle dump as a dict mapping string keys to string values |
| 447 Note that this is the same as the third item in the 3-tuple returned by |
| 448 |_ParseAmInstrumentRawOutput|. |
| 449 start_ms: The start time of the test in milliseconds. |
| 450 duration_ms: The duration of the test in milliseconds. |
| 451 Returns: |
| 452 An InstrumentationTestResult object. |
| 453 """ |
| 454 INSTR_STATUS_CODE_START = 1 |
| 455 INSTR_STATUS_CODE_OK = 0 |
| 456 INSTR_STATUS_CODE_ERROR = -1 |
| 457 INSTR_STATUS_CODE_FAIL = -2 |
| 458 |
| 459 log = '' |
| 460 result_type = base_test_result.ResultType.UNKNOWN |
| 461 |
| 462 for status_code, bundle in instr_statuses: |
| 463 if status_code == INSTR_STATUS_CODE_START: |
| 464 pass |
| 465 elif status_code == INSTR_STATUS_CODE_OK: |
| 466 bundle_test = '%s#%s' % ( |
| 467 ''.join(bundle.get('class', [''])), |
| 468 ''.join(bundle.get('test', ['']))) |
| 469 skipped = ''.join(bundle.get('test_skipped', [''])) |
| 470 |
| 471 if (test == bundle_test and |
| 472 result_type == base_test_result.ResultType.UNKNOWN): |
| 473 result_type = base_test_result.ResultType.PASS |
| 474 elif skipped.lower() in ('true', '1', 'yes'): |
| 475 result_type = base_test_result.ResultType.SKIP |
| 476 logging.info('Skipped ' + test) |
| 477 else: |
| 478 if status_code not in (INSTR_STATUS_CODE_ERROR, |
| 479 INSTR_STATUS_CODE_FAIL): |
| 480 logging.info('Unrecognized status code %d. Handling as an error.', |
| 481 status_code) |
401 result_type = base_test_result.ResultType.FAIL | 482 result_type = base_test_result.ResultType.FAIL |
| 483 if 'stack' in bundle: |
| 484 log = '\n'.join(bundle['stack']) |
402 # Dismiss any error dialogs. Limit the number in case we have an error | 485 # Dismiss any error dialogs. Limit the number in case we have an error |
403 # loop or we are failing to dismiss. | 486 # loop or we are failing to dismiss. |
404 for _ in xrange(10): | 487 for _ in xrange(10): |
405 package = self.device.old_interface.DismissCrashDialogIfNeeded() | 488 package = self.device.old_interface.DismissCrashDialogIfNeeded() |
406 if not package: | 489 if not package: |
407 break | 490 break |
408 # Assume test package convention of ".test" suffix | 491 # Assume test package convention of ".test" suffix |
409 if package in self.test_pkg.GetPackageName(): | 492 if package in self.test_pkg.GetPackageName(): |
410 result_type = base_test_result.ResultType.CRASH | 493 result_type = base_test_result.ResultType.CRASH |
411 break | 494 break |
412 result = test_result.InstrumentationTestResult( | 495 |
413 test, result_type, start_date_ms, duration_ms, log=log) | 496 return test_result.InstrumentationTestResult( |
414 else: | 497 test, result_type, start_ms, duration_ms, log=log) |
415 result = test_result.InstrumentationTestResult( | 498 |
416 test, base_test_result.ResultType.PASS, start_date_ms, duration_ms) | 499 #override |
| 500 def RunTest(self, test): |
| 501 results = base_test_result.TestRunResults() |
| 502 timeout = (self._GetIndividualTestTimeoutSecs(test) * |
| 503 self._GetIndividualTestTimeoutScale(test) * |
| 504 self.tool.GetTimeoutScale()) |
| 505 try: |
| 506 self.TestSetup(test) |
| 507 result = self.RunInstrumentationTest( |
| 508 test, self.test_pkg.GetPackageName(), self._GetInstrumentationArgs(), |
| 509 timeout) |
417 results.AddResult(result) | 510 results.AddResult(result) |
418 # Catch exceptions thrown by StartInstrumentation(). | |
419 # See ../../third_party/android/testrunner/adb_interface.py | |
420 except (device_errors.CommandTimeoutError, | 511 except (device_errors.CommandTimeoutError, |
421 device_errors.DeviceUnreachableError, | 512 device_errors.DeviceUnreachableError) as e: |
422 # TODO(jbudorick) Remove these once the underlying implementations | |
423 # for the above are switched or wrapped. | |
424 android_commands.errors.WaitForResponseTimedOutError, | |
425 android_commands.errors.DeviceUnresponsiveError, | |
426 android_commands.errors.InstrumentationError), e: | |
427 if start_date_ms: | |
428 duration_ms = int(time.time()) * 1000 - start_date_ms | |
429 else: | |
430 start_date_ms = int(time.time()) * 1000 | |
431 duration_ms = 0 | |
432 message = str(e) | 513 message = str(e) |
433 if not message: | 514 if not message: |
434 message = 'No information.' | 515 message = 'No information.' |
435 results.AddResult(test_result.InstrumentationTestResult( | 516 results.AddResult(test_result.InstrumentationTestResult( |
436 test, base_test_result.ResultType.CRASH, start_date_ms, duration_ms, | 517 test, base_test_result.ResultType.CRASH, int(time.time() * 1000), 0, |
437 log=message)) | 518 log=message)) |
438 raw_result = None | 519 self.TestTeardown(test, results) |
439 self.TestTeardown(test, raw_result) | |
440 return (results, None if results.DidRunPass() else test) | 520 return (results, None if results.DidRunPass() else test) |
OLD | NEW |