OLD | NEW |
---|---|
(Empty) | |
1 #!/usr/bin/python | |
2 # Copyright (c) 2011 The Chromium Authors. All rights reserved. | |
3 # Use of this source code is governed by a BSD-style license that can be | |
4 # found in the LICENSE file. | |
5 | |
6 import os | |
7 import sys | |
8 | |
9 from base_test_runner import BaseTestRunner | |
10 import debug_info | |
11 import run_tests_helper | |
12 from test_package_executable import TestPackageExecutable | |
13 from test_result import * | |
Nirnimesh
2011/10/21 08:16:15
do not import *
michaelbai
2011/10/21 21:08:41
Done.
| |
14 | |
15 | |
16 class SingleTestRunner(BaseTestRunner): | |
17 """Single test suite attached to a single device. | |
18 | |
19 Args: | |
20 device: Device to run the tests. | |
21 test_suite: A specific test suite to run, empty to run all. | |
22 gtest_filter: A gtest_filter flag. | |
23 test_arguments: Additional arguments to pass to the test binary. | |
24 timeout: Timeout for each test. | |
25 rebaseline: Whether or not to run tests in isolation and update the filter. | |
26 performance_test: Whether or not performance test(s). | |
27 cleanup_test_files: Whether or not to cleanup test files on device. | |
28 tool: Name of the Valgrind tool. | |
29 dump_debug_info: Whether or not to dump debug information. | |
30 """ | |
31 | |
32 def __init__(self, device, test_suite, gtest_filter, test_arguments, timeout, | |
33 rebaseline, performance_test, cleanup_test_files, tool, | |
34 dump_debug_info=False): | |
35 BaseTestRunner.__init__(self, device) | |
36 self.running_on_emulator = self.device.startswith('emulator') | |
37 self.gtest_filter = gtest_filter | |
38 self.test_arguments = test_arguments | |
39 self.test_results = TestResults() | |
40 if dump_debug_info: | |
41 self.dump_debug_info = debug_info.GTestDebugInfo(self.adb, device, | |
42 os.path.basename(test_suite), gtest_filter) | |
43 else: | |
44 self.dump_debug_info = None | |
45 | |
46 self.test_package = TestPackageExecutable(self.adb, device, | |
47 test_suite, timeout, rebaseline, performance_test, cleanup_test_files, | |
48 tool, self.dump_debug_info) | |
49 | |
50 def _GetHttpServerDocumentRootForTestSuite(self): | |
51 """Returns the document root needed by the test suite.""" | |
52 if self.test_package.test_suite_basename == 'page_cycler_tests': | |
53 return os.path.join(run_tests_helper.CHROME_DIR, 'data', 'page_cycler') | |
54 return None | |
55 | |
56 | |
57 def _TestSuiteRequiresMockTestServer(self): | |
58 """Returns True if the test suite requires mock test server.""" | |
59 return False | |
60 # TODO(yfriedman): Disabled because of flakiness. | |
Nirnimesh
2011/10/21 08:16:15
Pleawe reference the bug (or file one if necessary
michaelbai
2011/10/21 21:08:41
Done.
| |
61 #(self.test_package.test_suite_basename == 'unit_tests' or | |
Nirnimesh
2011/10/21 08:16:15
need a blank space after #
Or remove unused code
michaelbai
2011/10/21 21:08:41
Done.
| |
62 # self.test_package.test_suite_basename == 'net_unittests' or | |
63 # False) | |
64 | |
65 def _GetFilterFileName(self): | |
66 """Returns the filename of gtest filter.""" | |
67 filter_dir = os.path.join(sys.path[0], 'gtest_filter') | |
68 filter_name = self.test_package.test_suite_basename + '_disabled' | |
69 disabled_filter = os.path.join(filter_dir, filter_name) | |
70 return disabled_filter | |
71 | |
72 def _GetAdditionalEmulatorFilterName(self): | |
73 """Returns the filename of additional gtest filter for emulator.""" | |
74 filter_dir = os.path.join(sys.path[0], 'gtest_filter') | |
75 filter_name = ('%s%s') % (self.test_package.test_suite_basename, | |
Nirnimesh
2011/10/21 08:16:15
remove parens around '%s%s'
michaelbai
2011/10/21 21:08:41
Done.
| |
76 '_emulator_additional_disabled') | |
77 disabled_filter = os.path.join(filter_dir, filter_name) | |
78 return disabled_filter | |
79 | |
80 def GetDisabledTests(self): | |
81 """Returns a list of disabled tests. | |
82 | |
83 Returns: | |
84 A list of disabled tests obtained from gtest_filter/test_suite_disabled. | |
85 """ | |
86 disabled_tests = run_tests_helper.GetExpectations(self._GetFilterFileName()) | |
87 if self.running_on_emulator: | |
88 # Append emulator's filter file. | |
89 disabled_tests.extend(run_tests_helper.GetExpectations( | |
90 self._GetAdditionalEmulatorFilterName())) | |
91 return disabled_tests | |
92 | |
93 def UpdateFilter(self, failed_tests): | |
94 """Updates test_suite_disabled file with the new filter (deletes if empty). | |
95 | |
96 If running in Emulator, only the failed tests which are not in the normal | |
97 filter returned by _GetFilterFileName() are written to emulator's | |
98 additional filter file. | |
99 | |
100 Args: | |
101 failed_tests: A sorted list of failed tests. | |
102 """ | |
103 disabled_tests = [] | |
104 if not self.running_on_emulator: | |
105 filter_file_name = self._GetFilterFileName() | |
106 else: | |
107 filter_file_name = self._GetAdditionalEmulatorFilterName() | |
108 disabled_tests.extend( | |
109 run_tests_helper.GetExpectations(self._GetFilterFileName())) | |
110 logging.info('About to update emulator\'s addtional filter (%s).' | |
111 % filter_file_name) | |
112 | |
113 new_failed_tests = [] | |
114 if failed_tests: | |
115 for test in failed_tests: | |
116 if test.name not in disabled_tests: | |
117 new_failed_tests.append(test.name) | |
118 | |
119 if not new_failed_tests: | |
120 if os.path.exists(filter_file_name): | |
121 os.unlink(filter_file_name) | |
122 return | |
123 | |
124 filter_file = file(filter_file_name, 'w') | |
125 if self.running_on_emulator: | |
126 filter_file.write('# Addtional list of suppressions from emulator\n') | |
127 else: | |
128 filter_file.write('# List of suppressions\n') | |
129 filter_file.write("""This file was automatically generated by run_tests.py | |
130 """) | |
Nirnimesh
2011/10/21 08:16:15
indent 4 spaces wrt previous line
michaelbai
2011/10/21 21:08:41
Done.
| |
131 filter_file.write('\n'.join(sorted(new_failed_tests))) | |
132 filter_file.write('\n') | |
133 filter_file.close() | |
134 | |
135 def GetDataFilesForTestSuite(self): | |
136 """Returns a list of data files/dirs needed by the test suite.""" | |
137 # Ideally, we'd just push all test data. However, it has >100MB, and a lot | |
138 # of the files are not relevant (some are used for browser_tests, others for | |
139 # features not supported, etc..). | |
140 if self.test_package.test_suite_basename in ['base_unittests', | |
141 'sql_unittests', | |
142 'unit_tests']: | |
143 return [ | |
144 'net/data/cache_tests/insert_load1', | |
145 'net/data/cache_tests/dirty_entry5', | |
146 'ui/base/test/data/data_pack_unittest', | |
147 'chrome/test/data/bookmarks/History_with_empty_starred', | |
148 'chrome/test/data/bookmarks/History_with_starred', | |
149 'chrome/test/data/extensions/json_schema_test.js', | |
150 'chrome/test/data/History/', | |
151 'chrome/test/data/json_schema_validator/', | |
152 'chrome/test/data/serializer_nested_test.js', | |
153 'chrome/test/data/serializer_test.js', | |
154 'chrome/test/data/serializer_test_nowhitespace.js', | |
155 'chrome/test/data/top_sites/', | |
156 'chrome/test/data/web_database', | |
157 'chrome/test/data/zip', | |
158 ] | |
159 elif self.test_package.test_suite_basename == 'net_unittests': | |
160 return [ | |
161 'net/data/cache_tests', | |
162 'net/data/filter_unittests', | |
163 'net/data/ftp', | |
164 'net/data/proxy_resolver_v8_unittest', | |
165 'net/data/ssl/certificates', | |
166 ] | |
167 elif self.test_package.test_suite_basename == 'ui_tests': | |
168 return [ | |
169 'chrome/test/data/dromaeo', | |
170 'chrome/test/data/json2.js', | |
171 'chrome/test/data/sunspider', | |
172 'chrome/test/data/v8_benchmark', | |
173 'chrome/test/ui/sunspider_uitest.js', | |
174 'chrome/test/ui/v8_benchmark_uitest.js', | |
175 ] | |
176 elif self.test_package.test_suite_basename == 'page_cycler_tests': | |
177 data = [ | |
178 'tools/page_cycler', | |
179 'data/page_cycler', | |
180 ] | |
181 for d in data: | |
182 if not os.path.exists(d): | |
183 raise Exception('Page cycler data not found.') | |
184 return data | |
185 elif self.test_package.test_suite_basename == 'webkit_unit_tests': | |
186 return [ | |
187 'third_party/WebKit/Source/WebKit/chromium/tests/data', | |
188 ] | |
189 return [] | |
190 | |
191 def LaunchHelperToolsForTestSuite(self): | |
192 """Launches helper tools for the test suite. | |
193 | |
194 Sometimes one test may need to run some helper tools first in order to | |
195 successfully complete the test. | |
196 """ | |
197 document_root = self._GetHttpServerDocumentRootForTestSuite() | |
198 if document_root: | |
199 self.LaunchTestHttpServer(document_root) | |
200 if self._TestSuiteRequiresMockTestServer(): | |
201 self.LaunchChromeTestServerSpawner() | |
202 | |
203 def StripAndCopyFiles(self): | |
204 """Strips and copies the required data files for the test suite.""" | |
205 self.test_package.StripAndCopyExecutable() | |
206 self.test_package.tool.CopyFiles() | |
207 test_data = self.GetDataFilesForTestSuite() | |
208 if test_data: | |
209 if self.test_package.test_suite_basename == 'page_cycler_tests': | |
210 # Since the test data for page cycler are huge (around 200M), we use | |
211 # sdcard to store the data and create symbol links to map them to | |
212 # data/local/tmp/ later. | |
213 self.CopyTestData(test_data, '/sdcard/') | |
214 for p in [os.path.dirname(d) for d in test_data if os.path.isdir(d)]: | |
215 mapped_device_path = '/data/local/tmp/' + p | |
216 # Unlink the mapped_device_path at first in case it was mapped to | |
217 # a wrong path. Add option '-r' becuase the old path could be a dir. | |
218 self.adb.RunShellCommand('rm -r %s' % mapped_device_path) | |
219 self.adb.RunShellCommand( | |
220 'ln -s /sdcard/%s %s' % (p, mapped_device_path)) | |
221 else: | |
222 self.CopyTestData(test_data, '/data/local/tmp/') | |
223 | |
224 def RunTestsWithFilter(self): | |
225 """Runs a tests via a small, temporary shell script.""" | |
226 self.test_package.CreateTestRunnerScript(self.gtest_filter, | |
227 self.test_arguments) | |
228 self.test_results = self.test_package.RunTestsAndListResults() | |
229 | |
230 def RebaselineTests(self): | |
231 """Runs all available tests, restarting in case of failures.""" | |
232 if self.gtest_filter: | |
233 all_tests = set(self.gtest_filter.split(':')) | |
234 else: | |
235 all_tests = set(self.test_package.GetAllTests()) | |
236 failed_results = set() | |
237 executed_results = set() | |
238 while True: | |
239 executed_names = set([f.name for f in executed_results]) | |
240 self.gtest_filter = ':'.join(all_tests - executed_names) | |
241 self.RunTestsWithFilter() | |
242 failed_results.update(self.test_results.crashed, self.test_results.failed) | |
243 executed_results.update(self.test_results.crashed, | |
244 self.test_results.failed, | |
245 self.test_results.ok) | |
246 executed_names = set([f.name for f in executed_results]) | |
247 logging.info('*' * 80) | |
248 logging.info(self.device) | |
249 logging.info('Executed: ' + str(len(executed_names)) + ' of ' + | |
250 str(len(all_tests))) | |
251 logging.info('Failed so far: ' + str(len(failed_results)) + ' ' + | |
252 str([f.name for f in failed_results])) | |
253 logging.info('Remaining: ' + str(len(all_tests - executed_names)) + ' ' + | |
254 str(all_tests - executed_names)) | |
255 logging.info('*' * 80) | |
256 if executed_names == all_tests: | |
257 break | |
258 self.test_results = TestResults.FromOkAndFailed(list(executed_results - | |
259 failed_results), | |
260 list(failed_results)) | |
261 | |
262 def _RunTestsForSuiteInternal(self): | |
263 """Runs all tests (in rebaseline mode, run each test in isolation). | |
264 | |
265 Returns: | |
266 A TestResults object. | |
267 """ | |
268 if self.test_package.rebaseline: | |
269 self.RebaselineTests() | |
270 else: | |
271 if not self.gtest_filter: | |
272 self.gtest_filter = ('-' + ':'.join(self.GetDisabledTests()) + ':' + | |
273 ':'.join(['*.' + x + '*' for x in | |
274 self.test_package.GetDisabledPrefixes()])) | |
275 self.RunTestsWithFilter() | |
276 | |
277 def SetUp(self): | |
278 """Sets up necessary test enviroment for the test suite.""" | |
279 super(SingleTestRunner, self).SetUp() | |
280 if self.test_package.performance_test: | |
281 if run_tests_helper.IsRunningAsBuildbot(): | |
282 self.adb.SetJavaAssertsEnabled(enable=False) | |
283 self.adb.Reboot(full_reboot=False) | |
284 self.adb.SetupPerformanceTest() | |
285 if self.dump_debug_info: | |
286 self.dump_debug_info.StartRecordingLog(True) | |
287 self.StripAndCopyFiles() | |
288 self.LaunchHelperToolsForTestSuite() | |
289 self.test_package.tool.SetupEnvironment() | |
290 | |
291 def TearDown(self): | |
292 """Cleans up the test enviroment for the test suite.""" | |
293 super(SingleTestRunner, self).TearDown() | |
294 self.test_package.tool.CleanUpEnvironment() | |
295 if self.test_package.cleanup_test_files: | |
296 self.adb.RemovePushedFiles() | |
297 if self.dump_debug_info: | |
298 self.dump_debug_info.StopRecordingLog() | |
299 if self.test_package.performance_test: | |
300 self.adb.TearDownPerformanceTest() | |
301 | |
302 def RunTests(self): | |
303 """Runs the tests and cleans up the files once finished. | |
304 | |
305 Returns: | |
306 A TestResults object. | |
307 """ | |
308 self.SetUp() | |
309 try: | |
310 self._RunTestsForSuiteInternal() | |
311 finally: | |
312 self.TearDown() | |
313 return self.test_results | |
OLD | NEW |