| OLD | NEW |
| 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 | 4 |
| 5 """Runs the Java tests. See more information on run_instrumentation_tests.py.""" | 5 """Runs the Java tests. See more information on run_instrumentation_tests.py.""" |
| 6 | 6 |
| 7 import logging | 7 import logging |
| 8 import os | 8 import os |
| 9 import re | 9 import re |
| 10 import shutil | 10 import shutil |
| 11 import sys | 11 import sys |
| 12 import time | 12 import time |
| 13 | 13 |
| 14 from pylib import android_commands | 14 from pylib import android_commands |
| 15 from pylib import cmd_helper | 15 from pylib import cmd_helper |
| 16 from pylib import constants | 16 from pylib import constants |
| 17 from pylib import forwarder | 17 from pylib import forwarder |
| 18 from pylib import json_perf_parser | 18 from pylib import json_perf_parser |
| 19 from pylib import perf_tests_helper | 19 from pylib import perf_tests_helper |
| 20 from pylib import valgrind_tools | 20 from pylib import valgrind_tools |
| 21 from pylib.base import base_test_result |
| 21 from pylib.base import base_test_runner | 22 from pylib.base import base_test_runner |
| 22 from pylib.base import test_result | 23 |
| 24 import test_result |
| 23 | 25 |
| 24 | 26 |
| 25 _PERF_TEST_ANNOTATION = 'PerfTest' | 27 _PERF_TEST_ANNOTATION = 'PerfTest' |
| 26 | 28 |
| 27 | 29 |
| 28 class TestRunner(base_test_runner.BaseTestRunner): | 30 class TestRunner(base_test_runner.BaseTestRunner): |
| 29 """Responsible for running a series of tests connected to a single device.""" | 31 """Responsible for running a series of tests connected to a single device.""" |
| 30 | 32 |
| 31 _DEVICE_DATA_DIR = 'chrome/test/data' | 33 _DEVICE_DATA_DIR = 'chrome/test/data' |
| 32 _EMMA_JAR = os.path.join(os.environ.get('ANDROID_BUILD_TOP', ''), | 34 _EMMA_JAR = os.path.join(os.environ.get('ANDROID_BUILD_TOP', ''), |
| (...skipping 331 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 364 if 'Manual' in annotations: | 366 if 'Manual' in annotations: |
| 365 return 600 * 60 | 367 return 600 * 60 |
| 366 if 'External' in annotations: | 368 if 'External' in annotations: |
| 367 return 10 * 60 | 369 return 10 * 60 |
| 368 if 'LargeTest' in annotations or _PERF_TEST_ANNOTATION in annotations: | 370 if 'LargeTest' in annotations or _PERF_TEST_ANNOTATION in annotations: |
| 369 return 5 * 60 | 371 return 5 * 60 |
| 370 if 'MediumTest' in annotations: | 372 if 'MediumTest' in annotations: |
| 371 return 3 * 60 | 373 return 3 * 60 |
| 372 return 1 * 60 | 374 return 1 * 60 |
| 373 | 375 |
| 376 #override. |
| 374 def RunTest(self, test): | 377 def RunTest(self, test): |
| 375 """Runs the test, generating the coverage if needed. | |
| 376 | |
| 377 Returns: | |
| 378 A test_result.TestResults object. | |
| 379 """ | |
| 380 raw_result = None | 378 raw_result = None |
| 381 start_date_ms = None | 379 start_date_ms = None |
| 382 test_results = test_result.TestResults() | 380 results = base_test_result.TestRunResults() |
| 383 timeout=(self._GetIndividualTestTimeoutSecs(test) * | 381 timeout=(self._GetIndividualTestTimeoutSecs(test) * |
| 384 self._GetIndividualTestTimeoutScale(test) * | 382 self._GetIndividualTestTimeoutScale(test) * |
| 385 self.tool.GetTimeoutScale()) | 383 self.tool.GetTimeoutScale()) |
| 386 try: | 384 try: |
| 387 self.TestSetup(test) | 385 self.TestSetup(test) |
| 388 start_date_ms = int(time.time()) * 1000 | 386 start_date_ms = int(time.time()) * 1000 |
| 389 | 387 |
| 390 if self.is_uiautomator_test: | 388 if self.is_uiautomator_test: |
| 391 self.adb.ClearApplicationState(self.package_name) | 389 self.adb.ClearApplicationState(self.package_name) |
| 392 # TODO(frankf): Stop-gap solution. Should use annotations. | 390 # TODO(frankf): Stop-gap solution. Should use annotations. |
| 393 if 'FirstRun' in test: | 391 if 'FirstRun' in test: |
| 394 self.flags.RemoveFlags(['--disable-fre']) | 392 self.flags.RemoveFlags(['--disable-fre']) |
| 395 else: | 393 else: |
| 396 self.flags.AddFlags(['--disable-fre']) | 394 self.flags.AddFlags(['--disable-fre']) |
| 397 raw_result = self.adb.RunUIAutomatorTest( | 395 raw_result = self.adb.RunUIAutomatorTest( |
| 398 test, self.test_pkg.GetPackageName(), timeout) | 396 test, self.test_pkg.GetPackageName(), timeout) |
| 399 else: | 397 else: |
| 400 raw_result = self.adb.RunInstrumentationTest( | 398 raw_result = self.adb.RunInstrumentationTest( |
| 401 test, self.test_pkg.GetPackageName(), | 399 test, self.test_pkg.GetPackageName(), |
| 402 self._GetInstrumentationArgs(), timeout) | 400 self._GetInstrumentationArgs(), timeout) |
| 403 | 401 |
| 404 duration_ms = int(time.time()) * 1000 - start_date_ms | 402 duration_ms = int(time.time()) * 1000 - start_date_ms |
| 405 status_code = raw_result.GetStatusCode() | 403 status_code = raw_result.GetStatusCode() |
| 406 if status_code: | 404 if status_code: |
| 407 log = raw_result.GetFailureReason() | 405 log = raw_result.GetFailureReason() |
| 408 if not log: | 406 if not log: |
| 409 log = 'No information.' | 407 log = 'No information.' |
| 410 if self.screenshot_failures or log.find('INJECT_EVENTS perm') >= 0: | 408 if self.screenshot_failures or log.find('INJECT_EVENTS perm') >= 0: |
| 411 self._TakeScreenshot(test) | 409 self._TakeScreenshot(test) |
| 412 test_results.failed = [test_result.SingleTestResult( | 410 result = test_result.InstrumentationTestResult( |
| 413 test, start_date_ms, duration_ms, log)] | 411 test, base_test_result.ResultType.FAIL, start_date_ms, duration_ms, |
| 412 log=log) |
| 414 else: | 413 else: |
| 415 test_results.ok = [test_result.SingleTestResult(test, start_date_ms, | 414 result = test_result.InstrumentationTestResult( |
| 416 duration_ms)] | 415 test, base_test_result.ResultType.PASS, start_date_ms, duration_ms) |
| 416 results.AddResult(result) |
| 417 # Catch exceptions thrown by StartInstrumentation(). | 417 # Catch exceptions thrown by StartInstrumentation(). |
| 418 # See ../../third_party/android/testrunner/adb_interface.py | 418 # See ../../third_party/android/testrunner/adb_interface.py |
| 419 except (android_commands.errors.WaitForResponseTimedOutError, | 419 except (android_commands.errors.WaitForResponseTimedOutError, |
| 420 android_commands.errors.DeviceUnresponsiveError, | 420 android_commands.errors.DeviceUnresponsiveError, |
| 421 android_commands.errors.InstrumentationError), e: | 421 android_commands.errors.InstrumentationError), e: |
| 422 if start_date_ms: | 422 if start_date_ms: |
| 423 duration_ms = int(time.time()) * 1000 - start_date_ms | 423 duration_ms = int(time.time()) * 1000 - start_date_ms |
| 424 else: | 424 else: |
| 425 start_date_ms = int(time.time()) * 1000 | 425 start_date_ms = int(time.time()) * 1000 |
| 426 duration_ms = 0 | 426 duration_ms = 0 |
| 427 message = str(e) | 427 message = str(e) |
| 428 if not message: | 428 if not message: |
| 429 message = 'No information.' | 429 message = 'No information.' |
| 430 test_results.crashed = [test_result.SingleTestResult( | 430 results.AddResult(test_result.InstrumentationTestResult( |
| 431 test, start_date_ms, duration_ms, message)] | 431 test, base_test_result.ResultType.CRASH, start_date_ms, duration_ms, |
| 432 log=message)) |
| 432 raw_result = None | 433 raw_result = None |
| 433 self.TestTeardown(test, raw_result) | 434 self.TestTeardown(test, raw_result) |
| 434 return (test_results, None if test_results.ok else test) | 435 return (results, None if results.DidRunPass() else test) |
| OLD | NEW |