| OLD | NEW |
| 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 | 4 |
| 5 | 5 |
| 6 import logging | 6 import logging |
| 7 import re | 7 import re |
| 8 import os | 8 import os |
| 9 | 9 |
| 10 import constants | 10 import constants |
| 11 from perf_tests_helper import PrintPerfResult | 11 from perf_tests_helper import PrintPerfResult |
| 12 from pylib import pexpect | 12 from pylib import pexpect |
| 13 from test_result import BaseTestResult, TestResults | 13 from test_result import BaseTestResult, TestResults |
| 14 | 14 |
| 15 from android_commands import errors | 15 from android_commands import errors |
| 16 | 16 |
| 17 | 17 |
| 18 class TestPackage(object): | 18 class TestPackage(object): |
| 19 """A helper base class for both APK and stand-alone executables. | 19 """A helper base class for both APK and stand-alone executables. |
| 20 | 20 |
| 21 Args: | 21 Args: |
| 22 adb: ADB interface the tests are using. | 22 adb: ADB interface the tests are using. |
| 23 device: Device to run the tests. | 23 device: Device to run the tests. |
| 24 test_suite: A specific test suite to run, empty to run all. | 24 test_suite: A specific test suite to run, empty to run all. |
| 25 timeout: Timeout for each test. | 25 timeout: Timeout for each test. |
| 26 performance_test: Whether or not performance test(s). | |
| 27 cleanup_test_files: Whether or not to cleanup test files on device. | 26 cleanup_test_files: Whether or not to cleanup test files on device. |
| 28 tool: Name of the Valgrind tool. | 27 tool: Name of the Valgrind tool. |
| 29 dump_debug_info: A debug_info object. | 28 dump_debug_info: A debug_info object. |
| 30 """ | 29 """ |
| 31 | 30 |
| 32 def __init__(self, adb, device, test_suite, timeout, | 31 def __init__(self, adb, device, test_suite, timeout, |
| 33 performance_test, cleanup_test_files, tool, dump_debug_info): | 32 cleanup_test_files, tool, dump_debug_info): |
| 34 self.adb = adb | 33 self.adb = adb |
| 35 self.device = device | 34 self.device = device |
| 36 self.test_suite_full = test_suite | 35 self.test_suite_full = test_suite |
| 37 self.test_suite = os.path.splitext(test_suite)[0] | 36 self.test_suite = os.path.splitext(test_suite)[0] |
| 38 self.test_suite_basename = self._GetTestSuiteBaseName() | 37 self.test_suite_basename = self._GetTestSuiteBaseName() |
| 39 self.test_suite_dirname = os.path.dirname( | 38 self.test_suite_dirname = os.path.dirname( |
| 40 self.test_suite.split(self.test_suite_basename)[0]) | 39 self.test_suite.split(self.test_suite_basename)[0]) |
| 41 self.performance_test = performance_test | |
| 42 self.cleanup_test_files = cleanup_test_files | 40 self.cleanup_test_files = cleanup_test_files |
| 43 self.tool = tool | 41 self.tool = tool |
| 44 if timeout == 0: | 42 if timeout == 0: |
| 45 timeout = 60 | 43 timeout = 60 |
| 46 # On a VM (e.g. chromium buildbots), this timeout is way too small. | 44 # On a VM (e.g. chromium buildbots), this timeout is way too small. |
| 47 if os.environ.get('BUILDBOT_SLAVENAME'): | 45 if os.environ.get('BUILDBOT_SLAVENAME'): |
| 48 timeout = timeout * 2 | 46 timeout = timeout * 2 |
| 49 self.timeout = timeout * self.tool.GetTimeoutScale() | 47 self.timeout = timeout * self.tool.GetTimeoutScale() |
| 50 self.dump_debug_info = dump_debug_info | 48 self.dump_debug_info = dump_debug_info |
| 51 | 49 |
| 52 def _BeginGetIOStats(self): | |
| 53 """Gets I/O statistics before running test. | |
| 54 | |
| 55 Return: | |
| 56 I/O stats object.The I/O stats object may be None if the test is not | |
| 57 performance test. | |
| 58 """ | |
| 59 initial_io_stats = None | |
| 60 # Try to get the disk I/O statistics for all performance tests. | |
| 61 if self.performance_test: | |
| 62 initial_io_stats = self.adb.GetIoStats() | |
| 63 return initial_io_stats | |
| 64 | |
| 65 def _EndGetIOStats(self, initial_io_stats): | |
| 66 """Gets I/O statistics after running test and calcuate the I/O delta. | |
| 67 | |
| 68 Args: | |
| 69 initial_io_stats: I/O stats object got from _BeginGetIOStats. | |
| 70 | |
| 71 Return: | |
| 72 String for formated diso I/O statistics. | |
| 73 """ | |
| 74 disk_io = '' | |
| 75 if self.performance_test and initial_io_stats: | |
| 76 final_io_stats = self.adb.GetIoStats() | |
| 77 for stat in final_io_stats: | |
| 78 disk_io += '\n' + PrintPerfResult(stat, stat, | |
| 79 [final_io_stats[stat] - | |
| 80 initial_io_stats[stat]], | |
| 81 stat.split('_')[1], | |
| 82 print_to_stdout=False) | |
| 83 logging.info(disk_io) | |
| 84 return disk_io | |
| 85 | |
| 86 def GetDisabledPrefixes(self): | 50 def GetDisabledPrefixes(self): |
| 87 return ['DISABLED_', 'FLAKY_', 'FAILS_'] | 51 return ['DISABLED_', 'FLAKY_', 'FAILS_'] |
| 88 | 52 |
| 89 def _ParseGTestListTests(self, all_tests): | 53 def _ParseGTestListTests(self, all_tests): |
| 90 """Parses and filters the raw test lists. | 54 """Parses and filters the raw test lists. |
| 91 | 55 |
| 92 Args: | 56 Args: |
| 93 all_tests: The raw test listing with the following format: | 57 all_tests: The raw test listing with the following format: |
| 94 | 58 |
| 95 IPCChannelTest. | 59 IPCChannelTest. |
| (...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 162 overall_fail = False | 126 overall_fail = False |
| 163 re_run = re.compile('\[ RUN \] ?(.*)\r\n') | 127 re_run = re.compile('\[ RUN \] ?(.*)\r\n') |
| 164 # APK tests rely on the PASSED tag. | 128 # APK tests rely on the PASSED tag. |
| 165 re_passed = re.compile('\[ PASSED \] ?(.*)\r\n') | 129 re_passed = re.compile('\[ PASSED \] ?(.*)\r\n') |
| 166 # Signal handlers are installed before starting tests | 130 # Signal handlers are installed before starting tests |
| 167 # to output the CRASHED marker when a crash happens. | 131 # to output the CRASHED marker when a crash happens. |
| 168 re_crash = re.compile('\[ CRASHED \](.*)\r\n') | 132 re_crash = re.compile('\[ CRASHED \](.*)\r\n') |
| 169 re_fail = re.compile('\[ FAILED \] ?(.*)\r\n') | 133 re_fail = re.compile('\[ FAILED \] ?(.*)\r\n') |
| 170 re_runner_fail = re.compile('\[ RUNNER_FAILED \] ?(.*)\r\n') | 134 re_runner_fail = re.compile('\[ RUNNER_FAILED \] ?(.*)\r\n') |
| 171 re_ok = re.compile('\[ OK \] ?(.*?) .*\r\n') | 135 re_ok = re.compile('\[ OK \] ?(.*?) .*\r\n') |
| 172 io_stats_before = self._BeginGetIOStats() | |
| 173 try: | 136 try: |
| 174 while True: | 137 while True: |
| 175 found = p.expect([re_run, re_passed, re_runner_fail], | 138 found = p.expect([re_run, re_passed, re_runner_fail], |
| 176 timeout=self.timeout) | 139 timeout=self.timeout) |
| 177 if found == 1: # matched PASSED. | 140 if found == 1: # matched PASSED. |
| 178 break | 141 break |
| 179 if found == 2: # RUNNER_FAILED | 142 if found == 2: # RUNNER_FAILED |
| 180 logging.error('RUNNER_FAILED') | 143 logging.error('RUNNER_FAILED') |
| 181 overall_fail = True | 144 overall_fail = True |
| 182 break | 145 break |
| (...skipping 14 matching lines...) Expand all Loading... |
| 197 except pexpect.EOF: | 160 except pexpect.EOF: |
| 198 logging.error('Test terminated - EOF') | 161 logging.error('Test terminated - EOF') |
| 199 raise errors.DeviceUnresponsiveError('Device may be offline') | 162 raise errors.DeviceUnresponsiveError('Device may be offline') |
| 200 except pexpect.TIMEOUT: | 163 except pexpect.TIMEOUT: |
| 201 logging.error('Test terminated after %d second timeout.', | 164 logging.error('Test terminated after %d second timeout.', |
| 202 self.timeout) | 165 self.timeout) |
| 203 timed_out = True | 166 timed_out = True |
| 204 finally: | 167 finally: |
| 205 p.close() | 168 p.close() |
| 206 | 169 |
| 207 ok_tests += self._EndGetIOStats(io_stats_before) | |
| 208 ret_code = self._GetGTestReturnCode() | 170 ret_code = self._GetGTestReturnCode() |
| 209 if ret_code: | 171 if ret_code: |
| 210 failed_tests += [BaseTestResult('gtest exit code: %d' % ret_code, | 172 failed_tests += [BaseTestResult('gtest exit code: %d' % ret_code, |
| 211 'pexpect.before: %s' | 173 'pexpect.before: %s' |
| 212 '\npexpect.after: %s' | 174 '\npexpect.after: %s' |
| 213 % (p.before, | 175 % (p.before, |
| 214 p.after))] | 176 p.after))] |
| 215 # Create TestResults and return | 177 # Create TestResults and return |
| 216 return TestResults.FromRun(ok=ok_tests, failed=failed_tests, | 178 return TestResults.FromRun(ok=ok_tests, failed=failed_tests, |
| 217 crashed=crashed_tests, timed_out=timed_out, | 179 crashed=crashed_tests, timed_out=timed_out, |
| 218 overall_fail=overall_fail) | 180 overall_fail=overall_fail) |
| OLD | NEW |