OLD | NEW |
1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 """Class for running instrumentation tests on a single device.""" | 5 """Class for running instrumentation tests on a single device.""" |
6 | 6 |
7 import logging | 7 import logging |
8 import os | 8 import os |
9 import re | 9 import re |
10 import sys | 10 import sys |
11 import time | 11 import time |
12 | 12 |
13 from pylib import constants | 13 from pylib import constants |
14 from pylib import flag_changer | 14 from pylib import flag_changer |
15 from pylib import valgrind_tools | 15 from pylib import valgrind_tools |
16 from pylib.base import base_test_result | 16 from pylib.base import base_test_result |
17 from pylib.base import base_test_runner | 17 from pylib.base import base_test_runner |
18 from pylib.device import device_errors | 18 from pylib.device import device_errors |
19 from pylib.instrumentation import json_perf_parser | 19 from pylib.instrumentation import json_perf_parser |
20 from pylib.instrumentation import test_result | 20 from pylib.instrumentation import test_result |
21 | 21 |
22 sys.path.append(os.path.join(constants.DIR_SOURCE_ROOT, 'build', 'util', 'lib', | 22 sys.path.append(os.path.join(constants.DIR_SOURCE_ROOT, 'build', 'util', 'lib', |
23 'common')) | 23 'common')) |
24 import perf_tests_results_helper # pylint: disable=F0401 | 24 import perf_tests_results_helper # pylint: disable=F0401 |
25 | 25 |
26 | 26 |
27 _PERF_TEST_ANNOTATION = 'PerfTest' | 27 _PERF_TEST_ANNOTATION = 'PerfTest' |
28 | 28 |
29 | 29 |
30 def _GetDataFilesForTestSuite(suite_basename): | |
31 """Returns a list of data files/dirs needed by the test suite. | |
32 | |
33 Args: | |
34 suite_basename: The test suite basename for which to return file paths. | |
35 | |
36 Returns: | |
37 A list of test file and directory paths. | |
38 """ | |
39 test_files = [] | |
40 if suite_basename in ['ChromeTest', 'ContentShellTest']: | |
41 test_files += [ | |
42 'net/data/ssl/certificates/', | |
43 ] | |
44 return test_files | |
45 | |
46 | |
47 class TestRunner(base_test_runner.BaseTestRunner): | 30 class TestRunner(base_test_runner.BaseTestRunner): |
48 """Responsible for running a series of tests connected to a single device.""" | 31 """Responsible for running a series of tests connected to a single device.""" |
49 | 32 |
50 _DEVICE_DATA_DIR = 'chrome/test/data' | |
51 _DEVICE_COVERAGE_DIR = 'chrome/test/coverage' | 33 _DEVICE_COVERAGE_DIR = 'chrome/test/coverage' |
52 _HOSTMACHINE_PERF_OUTPUT_FILE = '/tmp/chrome-profile' | 34 _HOSTMACHINE_PERF_OUTPUT_FILE = '/tmp/chrome-profile' |
53 _DEVICE_PERF_OUTPUT_SEARCH_PREFIX = (constants.DEVICE_PERF_OUTPUT_DIR + | 35 _DEVICE_PERF_OUTPUT_SEARCH_PREFIX = (constants.DEVICE_PERF_OUTPUT_DIR + |
54 '/chrome-profile*') | 36 '/chrome-profile*') |
55 _DEVICE_HAS_TEST_FILES = {} | |
56 | 37 |
57 def __init__(self, test_options, device, shard_index, test_pkg, | 38 def __init__(self, test_options, device, shard_index, test_pkg, |
58 additional_flags=None): | 39 additional_flags=None): |
59 """Create a new TestRunner. | 40 """Create a new TestRunner. |
60 | 41 |
61 Args: | 42 Args: |
62 test_options: An InstrumentationOptions object. | 43 test_options: An InstrumentationOptions object. |
63 device: Attached android device. | 44 device: Attached android device. |
64 shard_index: Shard index. | 45 shard_index: Shard index. |
65 test_pkg: A TestPackage object. | 46 test_pkg: A TestPackage object. |
(...skipping 16 matching lines...) Expand all Loading... |
82 self.flags = flag_changer.FlagChanger(self.device, cmdline_file[0]) | 63 self.flags = flag_changer.FlagChanger(self.device, cmdline_file[0]) |
83 if additional_flags: | 64 if additional_flags: |
84 self.flags.AddFlags(additional_flags) | 65 self.flags.AddFlags(additional_flags) |
85 else: | 66 else: |
86 self.flags = None | 67 self.flags = None |
87 | 68 |
88 #override | 69 #override |
89 def InstallTestPackage(self): | 70 def InstallTestPackage(self): |
90 self.test_pkg.Install(self.device) | 71 self.test_pkg.Install(self.device) |
91 | 72 |
92 #override | |
93 def PushDataDeps(self): | |
94 # TODO(frankf): Implement a general approach for copying/installing | |
95 # once across test runners. | |
96 if TestRunner._DEVICE_HAS_TEST_FILES.get(self.device, False): | |
97 logging.warning('Already copied test files to device %s, skipping.', | |
98 str(self.device)) | |
99 return | |
100 | |
101 host_device_file_tuples = [] | |
102 test_data = _GetDataFilesForTestSuite(self.test_pkg.GetApkName()) | |
103 if test_data: | |
104 # Make sure SD card is ready. | |
105 self.device.WaitUntilFullyBooted(timeout=20) | |
106 host_device_file_tuples += [ | |
107 (os.path.join(constants.DIR_SOURCE_ROOT, p), | |
108 os.path.join(self.device.GetExternalStoragePath(), p)) | |
109 for p in test_data] | |
110 | |
111 # TODO(frankf): Specify test data in this file as opposed to passing | |
112 # as command-line. | |
113 for dest_host_pair in self.options.test_data: | |
114 dst_src = dest_host_pair.split(':', 1) | |
115 dst_layer = dst_src[0] | |
116 host_src = dst_src[1] | |
117 host_test_files_path = os.path.join(constants.DIR_SOURCE_ROOT, | |
118 host_src) | |
119 if os.path.exists(host_test_files_path): | |
120 host_device_file_tuples += [( | |
121 host_test_files_path, | |
122 '%s/%s/%s' % ( | |
123 self.device.GetExternalStoragePath(), | |
124 TestRunner._DEVICE_DATA_DIR, | |
125 dst_layer))] | |
126 if host_device_file_tuples: | |
127 self.device.PushChangedFiles(host_device_file_tuples) | |
128 self.tool.CopyFiles(self.device) | |
129 TestRunner._DEVICE_HAS_TEST_FILES[str(self.device)] = True | |
130 | |
131 def _GetInstrumentationArgs(self): | 73 def _GetInstrumentationArgs(self): |
132 ret = {} | 74 ret = {} |
133 if self.options.wait_for_debugger: | 75 if self.options.wait_for_debugger: |
134 ret['debug'] = 'true' | 76 ret['debug'] = 'true' |
135 if self.coverage_dir: | 77 if self.coverage_dir: |
136 ret['coverage'] = 'true' | 78 ret['coverage'] = 'true' |
137 ret['coverageFile'] = self.coverage_device_file | 79 ret['coverageFile'] = self.coverage_device_file |
138 | 80 |
139 return ret | 81 return ret |
140 | 82 |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
202 def _IsFreTest(self, test): | 144 def _IsFreTest(self, test): |
203 """Determines whether a test is a first run experience test. | 145 """Determines whether a test is a first run experience test. |
204 | 146 |
205 Args: | 147 Args: |
206 test: The name of the test to be checked. | 148 test: The name of the test to be checked. |
207 | 149 |
208 Returns: | 150 Returns: |
209 Whether the feature being tested is FirstRunExperience. | 151 Whether the feature being tested is FirstRunExperience. |
210 """ | 152 """ |
211 annotations = self.test_pkg.GetTestAnnotations(test) | 153 annotations = self.test_pkg.GetTestAnnotations(test) |
212 return ('FirstRunExperience' == annotations.get('Feature', None)) | 154 return 'FirstRunExperience' == annotations.get('Feature', None) |
213 | 155 |
214 def _IsPerfTest(self, test): | 156 def _IsPerfTest(self, test): |
215 """Determines whether a test is a performance test. | 157 """Determines whether a test is a performance test. |
216 | 158 |
217 Args: | 159 Args: |
218 test: The name of the test to be checked. | 160 test: The name of the test to be checked. |
219 | 161 |
220 Returns: | 162 Returns: |
221 Whether the test is annotated as a performance test. | 163 Whether the test is annotated as a performance test. |
222 """ | 164 """ |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
269 test: The name of the test that was just run. | 211 test: The name of the test that was just run. |
270 Raises: | 212 Raises: |
271 Exception: if there's anything wrong with the perf data. | 213 Exception: if there's anything wrong with the perf data. |
272 """ | 214 """ |
273 if not self._IsPerfTest(test): | 215 if not self._IsPerfTest(test): |
274 return | 216 return |
275 raw_test_name = test.split('#')[1] | 217 raw_test_name = test.split('#')[1] |
276 | 218 |
277 # Wait and grab annotation data so we can figure out which traces to parse | 219 # Wait and grab annotation data so we can figure out which traces to parse |
278 regex = self.device.old_interface.WaitForLogMatch( | 220 regex = self.device.old_interface.WaitForLogMatch( |
279 re.compile('\*\*PERFANNOTATION\(' + raw_test_name + '\)\:(.*)'), None) | 221 re.compile(r'\*\*PERFANNOTATION\(' + raw_test_name + r'\)\:(.*)'), |
| 222 None) |
280 | 223 |
281 # If the test is set to run on a specific device type only (IE: only | 224 # If the test is set to run on a specific device type only (IE: only |
282 # tablet or phone) and it is being run on the wrong device, the test | 225 # tablet or phone) and it is being run on the wrong device, the test |
283 # just quits and does not do anything. The java test harness will still | 226 # just quits and does not do anything. The java test harness will still |
284 # print the appropriate annotation for us, but will add --NORUN-- for | 227 # print the appropriate annotation for us, but will add --NORUN-- for |
285 # us so we know to ignore the results. | 228 # us so we know to ignore the results. |
286 # The --NORUN-- tag is managed by MainActivityTestBase.java | 229 # The --NORUN-- tag is managed by MainActivityTestBase.java |
287 if regex.group(1) != '--NORUN--': | 230 if regex.group(1) != '--NORUN--': |
288 | 231 |
289 # Obtain the relevant perf data. The data is dumped to a | 232 # Obtain the relevant perf data. The data is dumped to a |
(...skipping 231 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
521 except device_errors.CommandTimeoutError as e: | 464 except device_errors.CommandTimeoutError as e: |
522 results.AddResult(test_result.InstrumentationTestResult( | 465 results.AddResult(test_result.InstrumentationTestResult( |
523 test, base_test_result.ResultType.TIMEOUT, start_ms, duration_ms, | 466 test, base_test_result.ResultType.TIMEOUT, start_ms, duration_ms, |
524 log=str(e) or 'No information')) | 467 log=str(e) or 'No information')) |
525 except device_errors.DeviceUnreachableError as e: | 468 except device_errors.DeviceUnreachableError as e: |
526 results.AddResult(test_result.InstrumentationTestResult( | 469 results.AddResult(test_result.InstrumentationTestResult( |
527 test, base_test_result.ResultType.CRASH, start_ms, duration_ms, | 470 test, base_test_result.ResultType.CRASH, start_ms, duration_ms, |
528 log=str(e) or 'No information')) | 471 log=str(e) or 'No information')) |
529 self.TestTeardown(test, results) | 472 self.TestTeardown(test, results) |
530 return (results, None if results.DidRunPass() else test) | 473 return (results, None if results.DidRunPass() else test) |
OLD | NEW |