OLD | NEW |
1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 import glob | 5 import glob |
6 import logging | 6 import logging |
7 import os | 7 import os |
8 import sys | 8 import sys |
9 | 9 |
| 10 import android_commands |
| 11 from android_commands import errors |
10 from base_test_runner import BaseTestRunner | 12 from base_test_runner import BaseTestRunner |
11 import android_commands | 13 import constants |
12 import debug_info | 14 import debug_info |
13 import constants | |
14 import perf_tests_helper | 15 import perf_tests_helper |
15 import run_tests_helper | 16 import run_tests_helper |
16 from android_commands import errors | |
17 from test_package_apk import TestPackageApk | 17 from test_package_apk import TestPackageApk |
18 from test_package_executable import TestPackageExecutable | 18 from test_package_executable import TestPackageExecutable |
19 from test_result import BaseTestResult, TestResults | 19 from test_result import BaseTestResult, TestResults |
20 | 20 |
21 | 21 |
22 class SingleTestRunner(BaseTestRunner): | 22 class SingleTestRunner(BaseTestRunner): |
23 """Single test suite attached to a single device. | 23 """Single test suite attached to a single device. |
24 | 24 |
25 Args: | 25 Args: |
26 device: Device to run the tests. | 26 device: Device to run the tests. |
27 test_suite: A specific test suite to run, empty to run all. | 27 test_suite: A specific test suite to run, empty to run all. |
28 gtest_filter: A gtest_filter flag. | 28 gtest_filter: A gtest_filter flag. |
29 test_arguments: Additional arguments to pass to the test binary. | 29 test_arguments: Additional arguments to pass to the test binary. |
30 timeout: Timeout for each test. | 30 timeout: Timeout for each test. |
31 rebaseline: Whether or not to run tests in isolation and update the filter. | |
32 performance_test: Whether or not performance test(s). | 31 performance_test: Whether or not performance test(s). |
33 cleanup_test_files: Whether or not to cleanup test files on device. | 32 cleanup_test_files: Whether or not to cleanup test files on device. |
34 tool: Name of the Valgrind tool. | 33 tool: Name of the Valgrind tool. |
35 shard_index: index number of the shard on which the test suite will run. | 34 shard_index: index number of the shard on which the test suite will run. |
36 dump_debug_info: Whether or not to dump debug information. | 35 dump_debug_info: Whether or not to dump debug information. |
37 build_type: 'Release' or 'Debug'. | 36 build_type: 'Release' or 'Debug'. |
38 in_webkit_checkout: Whether the suite is being run from a WebKit checkout. | 37 in_webkit_checkout: Whether the suite is being run from a WebKit checkout. |
39 """ | 38 """ |
40 | 39 |
41 def __init__(self, device, test_suite, gtest_filter, test_arguments, timeout, | 40 def __init__(self, device, test_suite, gtest_filter, test_arguments, timeout, |
42 rebaseline, performance_test, cleanup_test_files, tool_name, | 41 performance_test, cleanup_test_files, tool_name, |
43 shard_index, dump_debug_info, fast_and_loose, build_type, | 42 shard_index, dump_debug_info, fast_and_loose, build_type, |
44 in_webkit_checkout): | 43 in_webkit_checkout): |
45 BaseTestRunner.__init__(self, device, tool_name, shard_index, build_type) | 44 BaseTestRunner.__init__(self, device, tool_name, shard_index, build_type) |
46 self._running_on_emulator = self.device.startswith('emulator') | 45 self._running_on_emulator = self.device.startswith('emulator') |
47 self._gtest_filter = gtest_filter | 46 self._gtest_filter = gtest_filter |
48 self._test_arguments = test_arguments | 47 self._test_arguments = test_arguments |
49 self.test_results = TestResults() | 48 self.test_results = TestResults() |
50 if dump_debug_info: | 49 if dump_debug_info: |
51 self.dump_debug_info = debug_info.GTestDebugInfo(self.adb, device, | 50 self.dump_debug_info = debug_info.GTestDebugInfo( |
52 os.path.basename(test_suite), gtest_filter) | 51 self.adb, device, |
| 52 os.path.basename(test_suite), gtest_filter) |
53 else: | 53 else: |
54 self.dump_debug_info = None | 54 self.dump_debug_info = None |
55 self.fast_and_loose = fast_and_loose | 55 self.fast_and_loose = fast_and_loose |
56 self.in_webkit_checkout = in_webkit_checkout | 56 self.in_webkit_checkout = in_webkit_checkout |
57 | 57 |
58 logging.warning('Test suite: ' + test_suite) | 58 logging.warning('Test suite: ' + test_suite) |
59 if os.path.splitext(test_suite)[1] == '.apk': | 59 if os.path.splitext(test_suite)[1] == '.apk': |
60 self.test_package = TestPackageApk(self.adb, device, | 60 self.test_package = TestPackageApk( |
61 test_suite, timeout, rebaseline, performance_test, cleanup_test_files, | 61 self.adb, |
62 self.tool, self.dump_debug_info) | 62 device, |
| 63 test_suite, |
| 64 timeout, |
| 65 performance_test, |
| 66 cleanup_test_files, |
| 67 self.tool, |
| 68 self.dump_debug_info) |
63 else: | 69 else: |
64 # Put a copy into the android out/target directory, to allow stack trace | 70 # Put a copy into the android out/target directory, to allow stack trace |
65 # generation. | 71 # generation. |
66 symbols_dir = os.path.join(constants.CHROME_DIR, 'out', build_type, | 72 symbols_dir = os.path.join(constants.CHROME_DIR, 'out', build_type, |
67 'lib.target') | 73 'lib.target') |
68 self.test_package = TestPackageExecutable( | 74 self.test_package = TestPackageExecutable( |
69 self.adb, device, | 75 self.adb, |
70 test_suite, timeout, rebaseline, performance_test, cleanup_test_files, | 76 device, |
71 self.tool, self.dump_debug_info, symbols_dir) | 77 test_suite, timeout, |
| 78 performance_test, |
| 79 cleanup_test_files, |
| 80 self.tool, |
| 81 self.dump_debug_info, |
| 82 symbols_dir) |
72 self._performance_test_setup = None | 83 self._performance_test_setup = None |
73 if performance_test: | 84 if performance_test: |
74 self._performance_test_setup = perf_tests_helper.PerfTestSetup(self.adb) | 85 self._performance_test_setup = perf_tests_helper.PerfTestSetup(self.adb) |
75 | 86 |
76 def _TestSuiteRequiresMockTestServer(self): | 87 def _TestSuiteRequiresMockTestServer(self): |
77 """Returns True if the test suite requires mock test server.""" | 88 """Returns True if the test suite requires mock test server.""" |
78 tests_require_net_test_server = ['unit_tests', 'net_unittests', | 89 tests_require_net_test_server = ['unit_tests', 'net_unittests', |
79 'content_unittests'] | 90 'content_unittests'] |
80 return (self.test_package.test_suite_basename in | 91 return (self.test_package.test_suite_basename in |
81 tests_require_net_test_server) | 92 tests_require_net_test_server) |
82 | 93 |
83 def _GetFilterFileName(self): | 94 def _GetFilterFileName(self): |
84 """Returns the filename of gtest filter.""" | 95 """Returns the filename of gtest filter.""" |
85 return os.path.join(sys.path[0], 'gtest_filter', | 96 return os.path.join( |
| 97 sys.path[0], 'gtest_filter', |
86 self.test_package.test_suite_basename + '_disabled') | 98 self.test_package.test_suite_basename + '_disabled') |
87 | 99 |
88 def _GetAdditionalEmulatorFilterName(self): | 100 def _GetAdditionalEmulatorFilterName(self): |
89 """Returns the filename of additional gtest filter for emulator.""" | 101 """Returns the filename of additional gtest filter for emulator.""" |
90 return os.path.join(sys.path[0], 'gtest_filter', | 102 return os.path.join( |
| 103 sys.path[0], 'gtest_filter', |
91 self.test_package.test_suite_basename + | 104 self.test_package.test_suite_basename + |
92 '_emulator_additional_disabled') | 105 '_emulator_additional_disabled') |
93 | 106 |
94 def GetDisabledTests(self): | 107 def GetDisabledTests(self): |
95 """Returns a list of disabled tests. | 108 """Returns a list of disabled tests. |
96 | 109 |
97 Returns: | 110 Returns: |
98 A list of disabled tests obtained from gtest_filter/test_suite_disabled. | 111 A list of disabled tests obtained from gtest_filter/test_suite_disabled. |
99 """ | 112 """ |
100 disabled_tests = run_tests_helper.GetExpectations(self._GetFilterFileName()) | 113 disabled_tests = run_tests_helper.GetExpectations(self._GetFilterFileName()) |
101 if self._running_on_emulator: | 114 if self._running_on_emulator: |
102 # Append emulator's filter file. | 115 # Append emulator's filter file. |
103 disabled_tests.extend(run_tests_helper.GetExpectations( | 116 disabled_tests.extend(run_tests_helper.GetExpectations( |
104 self._GetAdditionalEmulatorFilterName())) | 117 self._GetAdditionalEmulatorFilterName())) |
105 return disabled_tests | 118 return disabled_tests |
106 | 119 |
107 def UpdateFilter(self, failed_tests): | |
108 """Updates test_suite_disabled file with the new filter (deletes if empty). | |
109 | |
110 If running in Emulator, only the failed tests which are not in the normal | |
111 filter returned by _GetFilterFileName() are written to emulator's | |
112 additional filter file. | |
113 | |
114 Args: | |
115 failed_tests: A sorted list of failed tests. | |
116 """ | |
117 disabled_tests = [] | |
118 if not self._running_on_emulator: | |
119 filter_file_name = self._GetFilterFileName() | |
120 else: | |
121 filter_file_name = self._GetAdditionalEmulatorFilterName() | |
122 disabled_tests.extend( | |
123 run_tests_helper.GetExpectations(self._GetFilterFileName())) | |
124 logging.info('About to update emulator\'s additional filter (%s).' | |
125 % filter_file_name) | |
126 | |
127 new_failed_tests = [] | |
128 if failed_tests: | |
129 for test in failed_tests: | |
130 if test.name not in disabled_tests: | |
131 new_failed_tests.append(test.name) | |
132 | |
133 if not new_failed_tests: | |
134 if os.path.exists(filter_file_name): | |
135 os.unlink(filter_file_name) | |
136 return | |
137 | |
138 filter_file = file(filter_file_name, 'w') | |
139 if self._running_on_emulator: | |
140 filter_file.write('# Addtional list of suppressions from emulator\n') | |
141 else: | |
142 filter_file.write('# List of suppressions\n') | |
143 filter_file.write('# This file was automatically generated by %s\n' | |
144 % sys.argv[0]) | |
145 filter_file.write('\n'.join(sorted(new_failed_tests))) | |
146 filter_file.write('\n') | |
147 filter_file.close() | |
148 | |
149 def GetDataFilesForTestSuite(self): | 120 def GetDataFilesForTestSuite(self): |
150 """Returns a list of data files/dirs needed by the test suite.""" | 121 """Returns a list of data files/dirs needed by the test suite.""" |
151 # Ideally, we'd just push all test data. However, it has >100MB, and a lot | 122 # Ideally, we'd just push all test data. However, it has >100MB, and a lot |
152 # of the files are not relevant (some are used for browser_tests, others for | 123 # of the files are not relevant (some are used for browser_tests, others for |
153 # features not supported, etc..). | 124 # features not supported, etc..). |
154 if self.test_package.test_suite_basename in ['base_unittests', | 125 if self.test_package.test_suite_basename in ['base_unittests', |
155 'sql_unittests', | 126 'sql_unittests', |
156 'unit_tests']: | 127 'unit_tests']: |
157 test_files = [ | 128 test_files = [ |
158 'base/data/file_util_unittest', | 129 'base/data/file_util_unittest', |
(...skipping 29 matching lines...) Expand all Loading... |
188 'chrome/test/data/web_app_info/', | 159 'chrome/test/data/web_app_info/', |
189 'chrome/test/data/web_database', | 160 'chrome/test/data/web_database', |
190 'chrome/test/data/webui/', | 161 'chrome/test/data/webui/', |
191 'chrome/test/data/zip', | 162 'chrome/test/data/zip', |
192 'chrome/third_party/mock4js/', | 163 'chrome/third_party/mock4js/', |
193 'content/browser/gpu/software_rendering_list.json', | 164 'content/browser/gpu/software_rendering_list.json', |
194 'net/data/cache_tests/insert_load1', | 165 'net/data/cache_tests/insert_load1', |
195 'net/data/cache_tests/dirty_entry5', | 166 'net/data/cache_tests/dirty_entry5', |
196 'net/data/ssl/certificates/', | 167 'net/data/ssl/certificates/', |
197 'ui/base/test/data/data_pack_unittest', | 168 'ui/base/test/data/data_pack_unittest', |
198 ] | 169 ] |
199 if self.test_package.test_suite_basename == 'unit_tests': | 170 if self.test_package.test_suite_basename == 'unit_tests': |
200 test_files += ['chrome/test/data/simple_open_search.xml'] | 171 test_files += ['chrome/test/data/simple_open_search.xml'] |
201 # The following are spell check data. Now only list the data under | 172 # The following are spell check data. Now only list the data under |
202 # third_party/hunspell_dictionaries which are used by unit tests. | 173 # third_party/hunspell_dictionaries which are used by unit tests. |
203 old_cwd = os.getcwd() | 174 old_cwd = os.getcwd() |
204 os.chdir(constants.CHROME_DIR) | 175 os.chdir(constants.CHROME_DIR) |
205 test_files += glob.glob('third_party/hunspell_dictionaries/*.bdic') | 176 test_files += glob.glob('third_party/hunspell_dictionaries/*.bdic') |
206 os.chdir(old_cwd) | 177 os.chdir(old_cwd) |
207 return test_files | 178 return test_files |
208 elif self.test_package.test_suite_basename == 'media_unittests': | 179 elif self.test_package.test_suite_basename == 'media_unittests': |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
270 | 241 |
271 The path of this directory is different when the suite is being run as | 242 The path of this directory is different when the suite is being run as |
272 part of a WebKit check-out. | 243 part of a WebKit check-out. |
273 """ | 244 """ |
274 webkit_src = os.path.join(constants.CHROME_DIR, 'third_party', 'WebKit') | 245 webkit_src = os.path.join(constants.CHROME_DIR, 'third_party', 'WebKit') |
275 if self.in_webkit_checkout: | 246 if self.in_webkit_checkout: |
276 webkit_src = os.path.join(constants.CHROME_DIR, '..', '..', '..') | 247 webkit_src = os.path.join(constants.CHROME_DIR, '..', '..', '..') |
277 | 248 |
278 self.adb.PushIfNeeded( | 249 self.adb.PushIfNeeded( |
279 os.path.join(webkit_src, 'Source/WebKit/chromium/tests/data'), | 250 os.path.join(webkit_src, 'Source/WebKit/chromium/tests/data'), |
280 os.path.join(self.adb.GetExternalStorage(), | 251 os.path.join( |
| 252 self.adb.GetExternalStorage(), |
281 'third_party/WebKit/Source/WebKit/chromium/tests/data')) | 253 'third_party/WebKit/Source/WebKit/chromium/tests/data')) |
282 | 254 |
283 def RunTestsWithFilter(self): | |
284 """Runs a tests via a small, temporary shell script.""" | |
285 self.test_package.CreateTestRunnerScript(self._gtest_filter, | |
286 self._test_arguments) | |
287 self.test_results = self.test_package.RunTestsAndListResults() | |
288 | |
289 def RebaselineTests(self): | |
290 """Runs all available tests, restarting in case of failures.""" | |
291 if self._gtest_filter: | |
292 all_tests = set(self._gtest_filter.split(':')) | |
293 else: | |
294 all_tests = set(self.test_package.GetAllTests()) | |
295 failed_results = set() | |
296 executed_results = set() | |
297 while True: | |
298 executed_names = set([f.name for f in executed_results]) | |
299 self._gtest_filter = ':'.join(all_tests - executed_names) | |
300 self.RunTestsWithFilter() | |
301 failed_results.update(self.test_results.crashed, | |
302 self.test_results.failed) | |
303 executed_results.update(self.test_results.crashed, | |
304 self.test_results.failed, | |
305 self.test_results.ok) | |
306 executed_names = set([f.name for f in executed_results]) | |
307 logging.info('*' * 80) | |
308 logging.info(self.device) | |
309 logging.info('Executed: ' + str(len(executed_names)) + ' of ' + | |
310 str(len(all_tests))) | |
311 logging.info('Failed so far: ' + str(len(failed_results)) + ' ' + | |
312 str([f.name for f in failed_results])) | |
313 logging.info('Remaining: ' + str(len(all_tests - executed_names)) + ' ' + | |
314 str(all_tests - executed_names)) | |
315 logging.info('*' * 80) | |
316 if executed_names == all_tests: | |
317 break | |
318 self.test_results = TestResults.FromRun( | |
319 ok=list(executed_results - failed_results), | |
320 failed=list(failed_results)) | |
321 | |
322 def RunTests(self): | 255 def RunTests(self): |
323 """Runs all tests (in rebaseline mode, runs each test in isolation). | 256 """Runs tests on a single device. |
324 | 257 |
325 Returns: | 258 Returns: |
326 A TestResults object. | 259 A TestResults object. |
327 """ | 260 """ |
328 try: | 261 try: |
329 if self.test_package.rebaseline: | 262 self.test_package.CreateTestRunnerScript(self._gtest_filter, |
330 self.RebaselineTests() | 263 self._test_arguments) |
331 else: | 264 self.test_results = self.test_package.RunTestsAndListResults() |
332 self.RunTestsWithFilter() | |
333 except errors.DeviceUnresponsiveError as e: | 265 except errors.DeviceUnresponsiveError as e: |
334 # Make sure this device is not attached | 266 # Make sure this device is not attached |
335 if android_commands.IsDeviceAttached(self.device): | 267 if android_commands.IsDeviceAttached(self.device): |
336 raise e | 268 raise e |
337 | 269 |
| 270 # TODO(frankf): We should report these as "skipped" not "failures". |
338 # Wrap the results | 271 # Wrap the results |
339 logging.warning(e) | 272 logging.warning(e) |
340 failed_tests = [] | 273 failed_tests = [] |
341 for t in self._gtest_filter.split(':'): | 274 for t in self._gtest_filter.split(':'): |
342 failed_tests += [BaseTestResult(t, '')] | 275 failed_tests += [BaseTestResult(t, '')] |
343 self.test_results = TestResults.FromRun( | 276 self.test_results = TestResults.FromRun( |
344 failed=failed_tests, device_exception=self.device) | 277 failed=failed_tests, device_exception=self.device) |
345 | 278 |
346 return self.test_results | 279 return self.test_results |
347 | 280 |
(...skipping 14 matching lines...) Expand all Loading... |
362 self.tool.CleanUpEnvironment() | 295 self.tool.CleanUpEnvironment() |
363 if self.test_package.cleanup_test_files: | 296 if self.test_package.cleanup_test_files: |
364 self.adb.RemovePushedFiles() | 297 self.adb.RemovePushedFiles() |
365 if self.dump_debug_info: | 298 if self.dump_debug_info: |
366 self.dump_debug_info.StopRecordingLog() | 299 self.dump_debug_info.StopRecordingLog() |
367 if self._performance_test_setup: | 300 if self._performance_test_setup: |
368 self._performance_test_setup.TearDown() | 301 self._performance_test_setup.TearDown() |
369 if self.dump_debug_info: | 302 if self.dump_debug_info: |
370 self.dump_debug_info.ArchiveNewCrashFiles() | 303 self.dump_debug_info.ArchiveNewCrashFiles() |
371 super(SingleTestRunner, self).TearDown() | 304 super(SingleTestRunner, self).TearDown() |
OLD | NEW |