OLD | NEW |
| (Empty) |
1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
2 # Use of this source code is governed by a BSD-style license that can be | |
3 # found in the LICENSE file. | |
4 | |
5 import logging | |
6 import os | |
7 import sys | |
8 | |
9 from base_test_runner import BaseTestRunner | |
10 import debug_info | |
11 import run_tests_helper | |
12 from test_package_apk import TestPackageApk | |
13 from test_package_executable import TestPackageExecutable | |
14 from test_result import TestResults | |
15 | |
16 | |
17 class SingleTestRunner(BaseTestRunner): | |
18 """Single test suite attached to a single device. | |
19 | |
20 Args: | |
21 device: Device to run the tests. | |
22 test_suite: A specific test suite to run, empty to run all. | |
23 gtest_filter: A gtest_filter flag. | |
24 test_arguments: Additional arguments to pass to the test binary. | |
25 timeout: Timeout for each test. | |
26 rebaseline: Whether or not to run tests in isolation and update the filter. | |
27 performance_test: Whether or not performance test(s). | |
28 cleanup_test_files: Whether or not to cleanup test files on device. | |
29 tool: Name of the Valgrind tool. | |
30 shard_index: index number of the shard on which the test suite will run. | |
31 dump_debug_info: Whether or not to dump debug information. | |
32 """ | |
33 | |
34 def __init__(self, device, test_suite, gtest_filter, test_arguments, timeout, | |
35 rebaseline, performance_test, cleanup_test_files, tool, | |
36 shard_index, dump_debug_info=False, | |
37 fast_and_loose=False): | |
38 BaseTestRunner.__init__(self, device, shard_index) | |
39 self._running_on_emulator = self.device.startswith('emulator') | |
40 self._gtest_filter = gtest_filter | |
41 self._test_arguments = test_arguments | |
42 self.test_results = TestResults() | |
43 if dump_debug_info: | |
44 self.dump_debug_info = debug_info.GTestDebugInfo(self.adb, device, | |
45 os.path.basename(test_suite), gtest_filter) | |
46 else: | |
47 self.dump_debug_info = None | |
48 self.fast_and_loose = fast_and_loose | |
49 | |
50 if os.path.splitext(test_suite)[1] == '.apk': | |
51 self.test_package = TestPackageApk( | |
52 self.adb, device, | |
53 test_suite, timeout, rebaseline, performance_test, cleanup_test_files, | |
54 tool, self.dump_debug_info) | |
55 else: | |
56 self.test_package = TestPackageExecutable( | |
57 self.adb, device, | |
58 test_suite, timeout, rebaseline, performance_test, cleanup_test_files, | |
59 tool, self.dump_debug_info) | |
60 | |
61 def _GetHttpServerDocumentRootForTestSuite(self): | |
62 """Returns the document root needed by the test suite.""" | |
63 if self.test_package.test_suite_basename == 'page_cycler_tests': | |
64 return os.path.join(run_tests_helper.CHROME_DIR, 'data', 'page_cycler') | |
65 return None | |
66 | |
67 | |
68 def _TestSuiteRequiresMockTestServer(self): | |
69 """Returns True if the test suite requires mock test server.""" | |
70 return False | |
71 # TODO(yfriedman): Disabled because of flakiness. | |
72 # (self.test_package.test_suite_basename == 'unit_tests' or | |
73 # self.test_package.test_suite_basename == 'net_unittests' or | |
74 # False) | |
75 | |
76 def _GetFilterFileName(self): | |
77 """Returns the filename of gtest filter.""" | |
78 return os.path.join(sys.path[0], 'gtest_filter', | |
79 self.test_package.test_suite_basename + '_disabled') | |
80 | |
81 def _GetAdditionalEmulatorFilterName(self): | |
82 """Returns the filename of additional gtest filter for emulator.""" | |
83 return os.path.join(sys.path[0], 'gtest_filter', | |
84 self.test_package.test_suite_basename + | |
85 '_emulator_additional_disabled') | |
86 | |
87 def GetDisabledTests(self): | |
88 """Returns a list of disabled tests. | |
89 | |
90 Returns: | |
91 A list of disabled tests obtained from gtest_filter/test_suite_disabled. | |
92 """ | |
93 disabled_tests = run_tests_helper.GetExpectations(self._GetFilterFileName()) | |
94 if self._running_on_emulator: | |
95 # Append emulator's filter file. | |
96 disabled_tests.extend(run_tests_helper.GetExpectations( | |
97 self._GetAdditionalEmulatorFilterName())) | |
98 return disabled_tests | |
99 | |
100 def UpdateFilter(self, failed_tests): | |
101 """Updates test_suite_disabled file with the new filter (deletes if empty). | |
102 | |
103 If running in Emulator, only the failed tests which are not in the normal | |
104 filter returned by _GetFilterFileName() are written to emulator's | |
105 additional filter file. | |
106 | |
107 Args: | |
108 failed_tests: A sorted list of failed tests. | |
109 """ | |
110 disabled_tests = [] | |
111 if not self._running_on_emulator: | |
112 filter_file_name = self._GetFilterFileName() | |
113 else: | |
114 filter_file_name = self._GetAdditionalEmulatorFilterName() | |
115 disabled_tests.extend( | |
116 run_tests_helper.GetExpectations(self._GetFilterFileName())) | |
117 logging.info('About to update emulator\'s additional filter (%s).' | |
118 % filter_file_name) | |
119 | |
120 new_failed_tests = [] | |
121 if failed_tests: | |
122 for test in failed_tests: | |
123 if test.name not in disabled_tests: | |
124 new_failed_tests.append(test.name) | |
125 | |
126 if not new_failed_tests: | |
127 if os.path.exists(filter_file_name): | |
128 os.unlink(filter_file_name) | |
129 return | |
130 | |
131 filter_file = file(filter_file_name, 'w') | |
132 if self._running_on_emulator: | |
133 filter_file.write('# Addtional list of suppressions from emulator\n') | |
134 else: | |
135 filter_file.write('# List of suppressions\n') | |
136 filter_file.write('# This file was automatically generated by %s\n' | |
137 % sys.argv[0]) | |
138 filter_file.write('\n'.join(sorted(new_failed_tests))) | |
139 filter_file.write('\n') | |
140 filter_file.close() | |
141 | |
142 def GetDataFilesForTestSuite(self): | |
143 """Returns a list of data files/dirs needed by the test suite.""" | |
144 # Ideally, we'd just push all test data. However, it has >100MB, and a lot | |
145 # of the files are not relevant (some are used for browser_tests, others for | |
146 # features not supported, etc..). | |
147 if self.test_package.test_suite_basename in ['base_unittests', | |
148 'sql_unittests', | |
149 'unit_tests']: | |
150 return [ | |
151 'base/data/json/bom_feff.json', | |
152 'net/data/cache_tests/insert_load1', | |
153 'net/data/cache_tests/dirty_entry5', | |
154 'ui/base/test/data/data_pack_unittest', | |
155 'chrome/test/data/bookmarks/History_with_empty_starred', | |
156 'chrome/test/data/bookmarks/History_with_starred', | |
157 'chrome/test/data/extensions/json_schema_test.js', | |
158 'chrome/test/data/History/', | |
159 'chrome/test/data/json_schema_validator/', | |
160 'chrome/test/data/serializer_nested_test.js', | |
161 'chrome/test/data/serializer_test.js', | |
162 'chrome/test/data/serializer_test_nowhitespace.js', | |
163 'chrome/test/data/top_sites/', | |
164 'chrome/test/data/web_database', | |
165 'chrome/test/data/zip', | |
166 ] | |
167 elif self.test_package.test_suite_basename == 'net_unittests': | |
168 return [ | |
169 'net/data/cache_tests', | |
170 'net/data/filter_unittests', | |
171 'net/data/ftp', | |
172 'net/data/proxy_resolver_v8_unittest', | |
173 'net/data/ssl/certificates', | |
174 ] | |
175 elif self.test_package.test_suite_basename == 'ui_tests': | |
176 return [ | |
177 'chrome/test/data/dromaeo', | |
178 'chrome/test/data/json2.js', | |
179 'chrome/test/data/sunspider', | |
180 'chrome/test/data/v8_benchmark', | |
181 'chrome/test/ui/sunspider_uitest.js', | |
182 'chrome/test/ui/v8_benchmark_uitest.js', | |
183 ] | |
184 elif self.test_package.test_suite_basename == 'page_cycler_tests': | |
185 data = [ | |
186 'tools/page_cycler', | |
187 'data/page_cycler', | |
188 ] | |
189 for d in data: | |
190 if not os.path.exists(d): | |
191 raise Exception('Page cycler data not found.') | |
192 return data | |
193 elif self.test_package.test_suite_basename == 'webkit_unit_tests': | |
194 return [ | |
195 'third_party/WebKit/Source/WebKit/chromium/tests/data', | |
196 ] | |
197 elif self.test_package.test_suite_basename == 'content_unittests': | |
198 return [ | |
199 'webkit/data/dom_storage/webcore_test_database.localstorage', | |
200 ] | |
201 return [] | |
202 | |
203 def LaunchHelperToolsForTestSuite(self): | |
204 """Launches helper tools for the test suite. | |
205 | |
206 Sometimes one test may need to run some helper tools first in order to | |
207 successfully complete the test. | |
208 """ | |
209 document_root = self._GetHttpServerDocumentRootForTestSuite() | |
210 if document_root: | |
211 self.LaunchTestHttpServer(document_root) | |
212 if self._TestSuiteRequiresMockTestServer(): | |
213 self.LaunchChromeTestServerSpawner() | |
214 | |
215 def StripAndCopyFiles(self): | |
216 """Strips and copies the required data files for the test suite.""" | |
217 self.test_package.StripAndCopyExecutable() | |
218 self.test_package.PushDataAndPakFiles() | |
219 self.test_package.tool.CopyFiles() | |
220 test_data = self.GetDataFilesForTestSuite() | |
221 if test_data and not self.fast_and_loose: | |
222 if self.test_package.test_suite_basename == 'page_cycler_tests': | |
223 # Since the test data for page cycler are huge (around 200M), we use | |
224 # sdcard to store the data and create symbol links to map them to | |
225 # data/local/tmp/ later. | |
226 self.CopyTestData(test_data, '/sdcard/') | |
227 for p in [os.path.dirname(d) for d in test_data if os.path.isdir(d)]: | |
228 mapped_device_path = '/data/local/tmp/' + p | |
229 # Unlink the mapped_device_path at first in case it was mapped to | |
230 # a wrong path. Add option '-r' becuase the old path could be a dir. | |
231 self.adb.RunShellCommand('rm -r %s' % mapped_device_path) | |
232 self.adb.RunShellCommand( | |
233 'ln -s /sdcard/%s %s' % (p, mapped_device_path)) | |
234 else: | |
235 self.CopyTestData(test_data, '/data/local/tmp/') | |
236 | |
237 def RunTestsWithFilter(self): | |
238 """Runs a tests via a small, temporary shell script.""" | |
239 self.test_package.CreateTestRunnerScript(self._gtest_filter, | |
240 self._test_arguments) | |
241 self.test_results = self.test_package.RunTestsAndListResults() | |
242 | |
243 def RebaselineTests(self): | |
244 """Runs all available tests, restarting in case of failures.""" | |
245 if self._gtest_filter: | |
246 all_tests = set(self._gtest_filter.split(':')) | |
247 else: | |
248 all_tests = set(self.test_package.GetAllTests()) | |
249 failed_results = set() | |
250 executed_results = set() | |
251 while True: | |
252 executed_names = set([f.name for f in executed_results]) | |
253 self._gtest_filter = ':'.join(all_tests - executed_names) | |
254 self.RunTestsWithFilter() | |
255 failed_results.update(self.test_results.crashed, | |
256 self.test_results.failed) | |
257 executed_results.update(self.test_results.crashed, | |
258 self.test_results.failed, | |
259 self.test_results.ok) | |
260 executed_names = set([f.name for f in executed_results]) | |
261 logging.info('*' * 80) | |
262 logging.info(self.device) | |
263 logging.info('Executed: ' + str(len(executed_names)) + ' of ' + | |
264 str(len(all_tests))) | |
265 logging.info('Failed so far: ' + str(len(failed_results)) + ' ' + | |
266 str([f.name for f in failed_results])) | |
267 logging.info('Remaining: ' + str(len(all_tests - executed_names)) + ' ' + | |
268 str(all_tests - executed_names)) | |
269 logging.info('*' * 80) | |
270 if executed_names == all_tests: | |
271 break | |
272 self.test_results = TestResults.FromRun( | |
273 ok=list(executed_results - failed_results), | |
274 failed=list(failed_results)) | |
275 | |
276 def RunTests(self): | |
277 """Runs all tests (in rebaseline mode, runs each test in isolation). | |
278 | |
279 Returns: | |
280 A TestResults object. | |
281 """ | |
282 if self.test_package.rebaseline: | |
283 self.RebaselineTests() | |
284 else: | |
285 if not self._gtest_filter: | |
286 self._gtest_filter = ('-' + ':'.join(self.GetDisabledTests()) + ':' + | |
287 ':'.join(['*.' + x + '*' for x in | |
288 self.test_package.GetDisabledPrefixes()])) | |
289 self.RunTestsWithFilter() | |
290 return self.test_results | |
291 | |
292 def SetUp(self): | |
293 """Sets up necessary test enviroment for the test suite.""" | |
294 super(SingleTestRunner, self).SetUp() | |
295 if self.test_package.performance_test: | |
296 if run_tests_helper.IsRunningAsBuildbot(): | |
297 self.adb.SetJavaAssertsEnabled(enable=False) | |
298 self.adb.Reboot(full_reboot=False) | |
299 self.adb.SetupPerformanceTest() | |
300 if self.dump_debug_info: | |
301 self.dump_debug_info.StartRecordingLog(True) | |
302 self.StripAndCopyFiles() | |
303 self.LaunchHelperToolsForTestSuite() | |
304 self.test_package.tool.SetupEnvironment() | |
305 | |
306 def TearDown(self): | |
307 """Cleans up the test enviroment for the test suite.""" | |
308 self.test_package.tool.CleanUpEnvironment() | |
309 if self.test_package.cleanup_test_files: | |
310 self.adb.RemovePushedFiles() | |
311 if self.dump_debug_info: | |
312 self.dump_debug_info.StopRecordingLog() | |
313 if self.test_package.performance_test: | |
314 self.adb.TearDownPerformanceTest() | |
315 super(SingleTestRunner, self).TearDown() | |
OLD | NEW |