OLD | NEW |
1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 | 5 |
6 import logging | 6 import logging |
7 import re | 7 import re |
8 import os | 8 import os |
9 | 9 |
10 import constants | 10 import constants |
11 from perf_tests_helper import PrintPerfResult | 11 from perf_tests_helper import PrintPerfResult |
12 from pylib import pexpect | 12 from pylib import pexpect |
13 from test_result import BaseTestResult, TestResults | 13 from test_result import BaseTestResult, TestResults |
14 | 14 |
15 from android_commands import errors | 15 from android_commands import errors |
16 | 16 |
17 # TODO(bulach): TestPackage, TestPackageExecutable and | 17 |
18 # TestPackageApk are a work in progress related to making the native tests | |
19 # run as a NDK-app from an APK rather than a stand-alone executable. | |
20 class TestPackage(object): | 18 class TestPackage(object): |
21 """A helper base class for both APK and stand-alone executables. | 19 """A helper base class for both APK and stand-alone executables. |
22 | 20 |
23 Args: | 21 Args: |
24 adb: ADB interface the tests are using. | 22 adb: ADB interface the tests are using. |
25 device: Device to run the tests. | 23 device: Device to run the tests. |
26 test_suite: A specific test suite to run, empty to run all. | 24 test_suite: A specific test suite to run, empty to run all. |
27 timeout: Timeout for each test. | 25 timeout: Timeout for each test. |
28 rebaseline: Whether or not to run tests in isolation and update the filter. | |
29 performance_test: Whether or not performance test(s). | 26 performance_test: Whether or not performance test(s). |
30 cleanup_test_files: Whether or not to cleanup test files on device. | 27 cleanup_test_files: Whether or not to cleanup test files on device. |
31 tool: Name of the Valgrind tool. | 28 tool: Name of the Valgrind tool. |
32 dump_debug_info: A debug_info object. | 29 dump_debug_info: A debug_info object. |
33 """ | 30 """ |
34 | 31 |
35 def __init__(self, adb, device, test_suite, timeout, rebaseline, | 32 def __init__(self, adb, device, test_suite, timeout, |
36 performance_test, cleanup_test_files, tool, dump_debug_info): | 33 performance_test, cleanup_test_files, tool, dump_debug_info): |
37 self.adb = adb | 34 self.adb = adb |
38 self.device = device | 35 self.device = device |
39 self.test_suite_full = test_suite | 36 self.test_suite_full = test_suite |
40 self.test_suite = os.path.splitext(test_suite)[0] | 37 self.test_suite = os.path.splitext(test_suite)[0] |
41 self.test_suite_basename = self._GetTestSuiteBaseName() | 38 self.test_suite_basename = self._GetTestSuiteBaseName() |
42 self.test_suite_dirname = os.path.dirname( | 39 self.test_suite_dirname = os.path.dirname( |
43 self.test_suite.split(self.test_suite_basename)[0]) | 40 self.test_suite.split(self.test_suite_basename)[0]) |
44 self.rebaseline = rebaseline | |
45 self.performance_test = performance_test | 41 self.performance_test = performance_test |
46 self.cleanup_test_files = cleanup_test_files | 42 self.cleanup_test_files = cleanup_test_files |
47 self.tool = tool | 43 self.tool = tool |
48 if timeout == 0: | 44 if timeout == 0: |
49 timeout = 60 | 45 timeout = 60 |
50 # On a VM (e.g. chromium buildbots), this timeout is way too small. | 46 # On a VM (e.g. chromium buildbots), this timeout is way too small. |
51 if os.environ.get('BUILDBOT_SLAVENAME'): | 47 if os.environ.get('BUILDBOT_SLAVENAME'): |
52 timeout = timeout * 2 | 48 timeout = timeout * 2 |
53 self.timeout = timeout * self.tool.GetTimeoutScale() | 49 self.timeout = timeout * self.tool.GetTimeoutScale() |
54 self.dump_debug_info = dump_debug_info | 50 self.dump_debug_info = dump_debug_info |
55 | 51 |
56 def _BeginGetIOStats(self): | 52 def _BeginGetIOStats(self): |
57 """Gets I/O statistics before running test. | 53 """Gets I/O statistics before running test. |
58 | 54 |
59 Return: | 55 Return: |
60 I/O stats object.The I/O stats object may be None if the test is not | 56 I/O stats object.The I/O stats object may be None if the test is not |
61 performance test. | 57 performance test. |
62 """ | 58 """ |
63 initial_io_stats = None | 59 initial_io_stats = None |
64 # Try to get the disk I/O statistics for all performance tests. | 60 # Try to get the disk I/O statistics for all performance tests. |
65 if self.performance_test and not self.rebaseline: | 61 if self.performance_test: |
66 initial_io_stats = self.adb.GetIoStats() | 62 initial_io_stats = self.adb.GetIoStats() |
67 return initial_io_stats | 63 return initial_io_stats |
68 | 64 |
69 def _EndGetIOStats(self, initial_io_stats): | 65 def _EndGetIOStats(self, initial_io_stats): |
70 """Gets I/O statistics after running test and calcuate the I/O delta. | 66 """Gets I/O statistics after running test and calcuate the I/O delta. |
71 | 67 |
72 Args: | 68 Args: |
73 initial_io_stats: I/O stats object got from _BeginGetIOStats. | 69 initial_io_stats: I/O stats object got from _BeginGetIOStats. |
74 | 70 |
75 Return: | 71 Return: |
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
200 failed_tests += [BaseTestResult(full_test_name, p.before)] | 196 failed_tests += [BaseTestResult(full_test_name, p.before)] |
201 except pexpect.EOF: | 197 except pexpect.EOF: |
202 logging.error('Test terminated - EOF') | 198 logging.error('Test terminated - EOF') |
203 raise errors.DeviceUnresponsiveError('Device may be offline') | 199 raise errors.DeviceUnresponsiveError('Device may be offline') |
204 except pexpect.TIMEOUT: | 200 except pexpect.TIMEOUT: |
205 logging.error('Test terminated after %d second timeout.', | 201 logging.error('Test terminated after %d second timeout.', |
206 self.timeout) | 202 self.timeout) |
207 timed_out = True | 203 timed_out = True |
208 finally: | 204 finally: |
209 p.close() | 205 p.close() |
210 if not self.rebaseline: | 206 |
211 ok_tests += self._EndGetIOStats(io_stats_before) | 207 ok_tests += self._EndGetIOStats(io_stats_before) |
212 ret_code = self._GetGTestReturnCode() | 208 ret_code = self._GetGTestReturnCode() |
213 if ret_code: | 209 if ret_code: |
214 failed_tests += [BaseTestResult('gtest exit code: %d' % ret_code, | 210 failed_tests += [BaseTestResult('gtest exit code: %d' % ret_code, |
215 'pexpect.before: %s' | 211 'pexpect.before: %s' |
216 '\npexpect.after: %s' | 212 '\npexpect.after: %s' |
217 % (p.before, | 213 % (p.before, |
218 p.after))] | 214 p.after))] |
219 # Create TestResults and return | 215 # Create TestResults and return |
220 return TestResults.FromRun(ok=ok_tests, failed=failed_tests, | 216 return TestResults.FromRun(ok=ok_tests, failed=failed_tests, |
221 crashed=crashed_tests, timed_out=timed_out, | 217 crashed=crashed_tests, timed_out=timed_out, |
222 overall_fail=overall_fail) | 218 overall_fail=overall_fail) |
OLD | NEW |