Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(65)

Side by Side Diff: tools/valgrind/chrome_tests.py

Issue 55034: Support valgrinding layout tests. (Closed) Base URL: svn://chrome-svn/chrome/trunk/src/
Patch Set: '' Created 11 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | tools/valgrind/valgrind_analyze.py » ('j') | tools/valgrind/valgrind_analyze.py » ('J')
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/python 1 #!/usr/bin/python
2 # Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. 2 # Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 # chrome_tests.py 6 # chrome_tests.py
7 7
8 ''' Runs various chrome tests through valgrind_test.py. 8 ''' Runs various chrome tests through valgrind_test.py.
9 9
10 This file is a copy of ../purify/chrome_tests.py. Eventually, it would be nice 10 This file is a copy of ../purify/chrome_tests.py. Eventually, it would be nice
11 to merge these two files. For now, I'm leaving it here with sections that 11 to merge these two files.
12 aren't supported commented out as this is more of a work in progress.
13 ''' 12 '''
14 13
15 import glob 14 import glob
16 import logging 15 import logging
17 import optparse 16 import optparse
18 import os 17 import os
19 import stat 18 import stat
20 import sys 19 import sys
21 20
22 import google.logging_utils 21 import google.logging_utils
23 import google.path_utils 22 import google.path_utils
24 # Import the platform_utils up in the layout tests which have been modified to 23 # Import the platform_utils up in the layout tests which have been modified to
25 # work under non-Windows platforms instead of the ones that are in the 24 # work under non-Windows platforms instead of the ones that are in the
26 # tools/python/google directory. (See chrome_tests.sh which sets PYTHONPATH 25 # tools/python/google directory. (See chrome_tests.sh which sets PYTHONPATH
27 # correctly.) 26 # correctly.)
28 # 27 #
29 # TODO(erg): Copy/Move the relevant functions from the layout_package version 28 # TODO(erg): Copy/Move the relevant functions from the layout_package version
30 # of platform_utils back up to google.platform_utils 29 # of platform_utils back up to google.platform_utils
31 # package. http://crbug.com/6164 30 # package. http://crbug.com/6164
32 import layout_package.platform_utils 31 import layout_package.platform_utils
33 32
34 import common 33 import common
35 34
36 35
37 class TestNotFound(Exception): pass 36 class TestNotFound(Exception): pass
38 37
39 38
40 class ChromeTests: 39 class ChromeTests:
41 '''This class is derived from the chrome_tests.py file in ../purify/. 40 '''This class is derived from the chrome_tests.py file in ../purify/.
42
43 TODO(erg): Finish implementing this. I've commented out all the parts that I
44 don't have working yet. We still need to deal with layout tests, and long
45 term, the UI tests.
46 ''' 41 '''
47 42
48 def __init__(self, options, args, test): 43 def __init__(self, options, args, test):
49 # the known list of tests 44 # the known list of tests
50 self._test_list = { 45 self._test_list = {
51 "test_shell": self.TestTestShell, 46 "test_shell": self.TestTestShell,
52 "unit": self.TestUnit, 47 "unit": self.TestUnit,
53 "net": self.TestNet, 48 "net": self.TestNet,
54 "ipc": self.TestIpc, 49 "ipc": self.TestIpc,
55 "base": self.TestBase, 50 "base": self.TestBase,
56 "googleurl": self.TestGoogleurl, 51 "googleurl": self.TestGoogleurl,
57 "media": self.TestMedia, 52 "media": self.TestMedia,
58 "printing": self.TestPrinting, 53 "printing": self.TestPrinting,
59 # "layout": self.TestLayout, 54 "layout": self.TestLayout,
60 # "layout_all": self.TestLayoutAll,
61 "ui": self.TestUI 55 "ui": self.TestUI
62 } 56 }
63 57
64 if test not in self._test_list: 58 if test not in self._test_list:
65 raise TestNotFound("Unknown test: %s" % test) 59 raise TestNotFound("Unknown test: %s" % test)
66 60
67 self._options = options 61 self._options = options
68 self._args = args 62 self._args = args
69 self._test = test 63 self._test = test
70 64
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
136 cmd.append("--show_all_leaks") 130 cmd.append("--show_all_leaks")
137 if self._options.track_origins: 131 if self._options.track_origins:
138 cmd.append("--track_origins") 132 cmd.append("--track_origins")
139 if self._options.generate_suppressions: 133 if self._options.generate_suppressions:
140 cmd.append("--generate_suppressions") 134 cmd.append("--generate_suppressions")
141 if exe == "ui_tests": 135 if exe == "ui_tests":
142 cmd.append("--trace_children") 136 cmd.append("--trace_children")
143 cmd.append("--indirect") 137 cmd.append("--indirect")
144 if exe: 138 if exe:
145 cmd.append(os.path.join(self._options.build_dir, exe)) 139 cmd.append(os.path.join(self._options.build_dir, exe))
146 # Valgrind runs tests slowly, so slow tests hurt more; show elapased time 140 # Valgrind runs tests slowly, so slow tests hurt more; show elapased time
147 # so we can find the slowpokes. 141 # so we can find the slowpokes.
148 cmd.append("--gtest_print_time"); 142 cmd.append("--gtest_print_time")
149 return cmd 143 return cmd
150 144
151 def Run(self): 145 def Run(self):
152 ''' Runs the test specified by command-line argument --test ''' 146 ''' Runs the test specified by command-line argument --test '''
153 logging.info("running test %s" % (self._test)) 147 logging.info("running test %s" % (self._test))
154 return self._test_list[self._test]() 148 return self._test_list[self._test]()
155 149
156 def _ReadGtestFilterFile(self, name, cmd): 150 def _ReadGtestFilterFile(self, name, cmd):
157 '''Read a file which is a list of tests to filter out with --gtest_filter 151 '''Read a file which is a list of tests to filter out with --gtest_filter
158 and append the command-line option to cmd. 152 and append the command-line option to cmd.
(...skipping 20 matching lines...) Expand all
179 if gtest_filter: 173 if gtest_filter:
180 cmd.append("--gtest_filter=%s" % gtest_filter) 174 cmd.append("--gtest_filter=%s" % gtest_filter)
181 175
182 def SimpleTest(self, module, name, cmd_args=None): 176 def SimpleTest(self, module, name, cmd_args=None):
183 cmd = self._DefaultCommand(module, name) 177 cmd = self._DefaultCommand(module, name)
184 self._ReadGtestFilterFile(name, cmd) 178 self._ReadGtestFilterFile(name, cmd)
185 if cmd_args: 179 if cmd_args:
186 cmd.extend(cmd_args) 180 cmd.extend(cmd_args)
187 return common.RunSubprocess(cmd, 0) 181 return common.RunSubprocess(cmd, 0)
188 182
189 def ScriptedTest(self, module, exe, name, script, multi=False, cmd_args=None,
190 out_dir_extra=None):
191 '''Valgrind a target binary, which will be executed one or more times via a
192 script or driver program.
193 Args:
194 module - which top level component this test is from (webkit, base, etc.)
195 exe - the name of the exe (it's assumed to exist in build_dir)
196 name - the name of this test (used to name output files)
197 script - the driver program or script. If it's python.exe, we use
198 search-path behavior to execute, otherwise we assume that it is in
199 build_dir.
200 multi - a boolean hint that the exe will be run multiple times, generating
201 multiple output files (without this option, only the last run will be
202 recorded and analyzed)
203 cmd_args - extra arguments to pass to the valgrind_test.py script
204 '''
205 cmd = self._DefaultCommand(module)
206 exe = os.path.join(self._options.build_dir, exe)
207 cmd.append("--exe=%s" % exe)
208 cmd.append("--name=%s" % name)
209 if multi:
210 out = os.path.join(google.path_utils.ScriptDir(),
211 "latest")
212 if out_dir_extra:
213 out = os.path.join(out, out_dir_extra)
214 if os.path.exists(out):
215 old_files = glob.glob(os.path.join(out, "*.txt"))
216 for f in old_files:
217 os.remove(f)
218 else:
219 os.makedirs(out)
220 out = os.path.join(out, "%s%%5d.txt" % name)
221 cmd.append("--out_file=%s" % out)
222 if cmd_args:
223 cmd.extend(cmd_args)
224 if script[0] != "python.exe" and not os.path.exists(script[0]):
225 script[0] = os.path.join(self._options.build_dir, script[0])
226 cmd.extend(script)
227 self._ReadGtestFilterFile(name, cmd)
228 return common.RunSubprocess(cmd, 0)
229
230 def TestBase(self): 183 def TestBase(self):
231 return self.SimpleTest("base", "base_unittests") 184 return self.SimpleTest("base", "base_unittests")
232 185
233 def TestGoogleurl(self): 186 def TestGoogleurl(self):
234 return self.SimpleTest("chrome", "googleurl_unittests") 187 return self.SimpleTest("chrome", "googleurl_unittests")
235 188
236 def TestMedia(self): 189 def TestMedia(self):
237 return self.SimpleTest("chrome", "media_unittests") 190 return self.SimpleTest("chrome", "media_unittests")
238 191
239 def TestPrinting(self): 192 def TestPrinting(self):
(...skipping 11 matching lines...) Expand all
251 def TestUnit(self): 204 def TestUnit(self):
252 return self.SimpleTest("chrome", "unit_tests") 205 return self.SimpleTest("chrome", "unit_tests")
253 206
254 def TestUI(self): 207 def TestUI(self):
255 return self.SimpleTest("chrome", "ui_tests", 208 return self.SimpleTest("chrome", "ui_tests",
256 cmd_args=["--", 209 cmd_args=["--",
257 "--ui-test-timeout=120000", 210 "--ui-test-timeout=120000",
258 "--ui-test-action-timeout=80000", 211 "--ui-test-action-timeout=80000",
259 "--ui-test-action-max-timeout=180000"]) 212 "--ui-test-action-max-timeout=180000"])
260 213
261 # def TestLayoutAll(self): 214 def TestLayoutChunk(self, chunk_num, chunk_size):
262 # return self.TestLayout(run_all=True) 215 # Run tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size) from the
216 # list of tests. Wrap around to beginning of list at end.
217 # If chunk_size is zero, run all tests in the list once.
218 # If a text file is given as argument, it is used as the list of tests.
Erik does not do reviews 2009/03/30 16:22:48 clarify this comment to refer explicitly to comman
219 #
220 # Build the ginormous commandline in 'cmd'.
221 # It's going to be roughly
222 # python valgrind_test.py ... python run_webkit_tests.py ...
223 # but we'll use the --indirect flag to valgrind_test.py
224 # to avoid valgrinding python.
225 # Start by building the valgrind_test.py commandline.
226 cmd = self._DefaultCommand("webkit")
227 cmd.append("--trace_children")
228 cmd.append("--indirect")
229 # Now build script_cmd, the run_webkits_tests.py commandline
230 # Store each chunk in its own directory so that we can find the data later
231 chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num)
232 test_shell = os.path.join(self._options.build_dir, "test_shell")
233 out_dir = os.path.join(google.path_utils.ScriptDir(), "latest")
234 out_dir = os.path.join(out_dir, chunk_dir)
235 if os.path.exists(out_dir):
236 old_files = glob.glob(os.path.join(out_dir, "*.txt"))
237 for f in old_files:
238 os.remove(f)
239 else:
240 os.makedirs(out_dir)
241 script = os.path.join(self._source_dir, "webkit", "tools", "layout_tests",
242 "run_webkit_tests.py")
243 script_cmd = ["python", script, "--run-singly", "-v",
244 "--noshow-results", "--time-out-ms=200000",
245 "--nocheck-sys-deps"]
246 if (chunk_size > 0):
247 script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size))
248 if len(self._args):
249 # if the arg is a txt file, then treat it as a list of tests
250 if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt":
251 script_cmd.append("--test-list=%s" % self._args[0])
252 else:
253 script_cmd.extend(self._args)
254 self._ReadGtestFilterFile("layout", script_cmd)
255 # Now run script_cmd with the wrapper in cmd
256 cmd.extend(["--"])
257 cmd.extend(script_cmd)
258 ret = common.RunSubprocess(cmd, 0)
259 return ret
263 260
264 # def TestLayout(self, run_all=False): 261 def TestLayout(self):
265 # # A "chunk file" is maintained in the local directory so that each test 262 # A "chunk file" is maintained in the local directory so that each test
266 # # runs a slice of the layout tests of size chunk_size that increments with 263 # runs a slice of the layout tests of size chunk_size that increments with
267 # # each run. Since tests can be added and removed from the layout tests at 264 # each run. Since tests can be added and removed from the layout tests at
268 # # any time, this is not going to give exact coverage, but it will allow us 265 # any time, this is not going to give exact coverage, but it will allow us
269 # # to continuously run small slices of the layout tests under purify rather 266 # to continuously run small slices of the layout tests under purify rather
270 # # than having to run all of them in one shot. 267 # than having to run all of them in one shot.
271 # chunk_num = 0 268 chunk_size = self._options.num_tests
272 # # Tests currently seem to take about 20-30s each. 269 if (chunk_size == 0):
273 # chunk_size = 120 # so about 40-60 minutes per run 270 return self.TestLayoutChunk(0, 0)
274 # chunk_file = os.path.join(os.environ["TEMP"], "purify_layout_chunk.txt") 271 chunk_num = 0
275 # if not run_all: 272 chunk_file = os.path.join("valgrind_layout_chunk.txt")
276 # try: 273 logging.info("Reading state from " + chunk_file)
277 # f = open(chunk_file) 274 try:
278 # if f: 275 f = open(chunk_file)
279 # str = f.read() 276 if f:
280 # if len(str): 277 str = f.read()
281 # chunk_num = int(str) 278 if len(str):
282 # # This should be enough so that we have a couple of complete runs 279 chunk_num = int(str)
283 # # of test data stored in the archive (although note that when we loo p 280 # This should be enough so that we have a couple of complete runs
284 # # that we almost guaranteed won't be at the end of the test list) 281 # of test data stored in the archive (although note that when we loop
285 # if chunk_num > 10000: 282 # that we almost guaranteed won't be at the end of the test list)
286 # chunk_num = 0 283 if chunk_num > 10000:
287 # f.close() 284 chunk_num = 0
288 # except IOError, (errno, strerror): 285 f.close()
289 # logging.error("error reading from file %s (%d, %s)" % (chunk_file, 286 except IOError, (errno, strerror):
290 # errno, strerror)) 287 logging.error("error reading from file %s (%d, %s)" % (chunk_file,
291 288 errno, strerror))
292 # script = os.path.join(self._source_dir, "webkit", "tools", "layout_tests", 289 ret = self.TestLayoutChunk(chunk_num, chunk_size)
293 # "run_webkit_tests.py") 290 # Wait until after the test runs to completion to write out the new chunk
294 # script_cmd = ["python.exe", script, "--run-singly", "-v", 291 # number. This way, if the bot is killed, we'll start running again from
295 # "--noshow-results", "--time-out-ms=200000", 292 # the current chunk rather than skipping it.
296 # "--nocheck-sys-deps"] 293 logging.info("Saving state to " + chunk_file)
297 # if not run_all: 294 try:
298 # script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size)) 295 f = open(chunk_file, "w")
299 296 chunk_num += 1
300 # if len(self._args): 297 f.write("%d" % chunk_num)
301 # # if the arg is a txt file, then treat it as a list of tests 298 f.close()
302 # if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt": 299 except IOError, (errno, strerror):
303 # script_cmd.append("--test-list=%s" % self._args[0]) 300 logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno,
304 # else: 301 strerror))
305 # script_cmd.extend(self._args) 302 # Since we're running small chunks of the layout tests, it's important to
306 303 # mark the ones that have errors in them. These won't be visible in the
307 # if run_all: 304 # summary list for long, but will be useful for someone reviewing this bot.
308 # ret = self.ScriptedTest("webkit", "test_shell.exe", "layout", 305 return ret
309 # script_cmd, multi=True, cmd_args=["--timeout=0"] )
310 # return ret
311
312 # # store each chunk in its own directory so that we can find the data later
313 # chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num)
314 # ret = self.ScriptedTest("webkit", "test_shell.exe", "layout",
315 # script_cmd, multi=True, cmd_args=["--timeout=0"],
316 # out_dir_extra=chunk_dir)
317
318 # # Wait until after the test runs to completion to write out the new chunk
319 # # number. This way, if the bot is killed, we'll start running again from
320 # # the current chunk rather than skipping it.
321 # try:
322 # f = open(chunk_file, "w")
323 # chunk_num += 1
324 # f.write("%d" % chunk_num)
325 # f.close()
326 # except IOError, (errno, strerror):
327 # logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno,
328 # strerror))
329 # # Since we're running small chunks of the layout tests, it's important to
330 # # mark the ones that have errors in them. These won't be visible in the
331 # # summary list for long, but will be useful for someone reviewing this bot .
332 # return ret
333
334 # def TestUI(self):
335 # if not self._options.no_reinstrument:
336 # instrumentation_error = self.InstrumentDll()
337 # if instrumentation_error:
338 # return instrumentation_error
339 # return self.ScriptedTest("chrome", "chrome.exe", "ui_tests",
340 # ["ui_tests.exe",
341 # "--single-process",
342 # "--ui-test-timeout=120000",
343 # "--ui-test-action-timeout=80000",
344 # "--ui-test-action-max-timeout=180000"],
345 # multi=True)
346
347 306
348 def _main(_): 307 def _main(_):
349 parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> " 308 parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> "
350 "[-t <test> ...]") 309 "[-t <test> ...]")
351 parser.disable_interspersed_args() 310 parser.disable_interspersed_args()
352 parser.add_option("-b", "--build_dir", 311 parser.add_option("-b", "--build_dir",
353 help="the location of the output of the compiler output") 312 help="the location of the output of the compiler output")
354 parser.add_option("-t", "--test", action="append", 313 parser.add_option("-t", "--test", action="append",
355 help="which test to run") 314 help="which test to run")
356 parser.add_option("", "--baseline", action="store_true", default=False, 315 parser.add_option("", "--baseline", action="store_true", default=False,
357 help="generate baseline data instead of validating") 316 help="generate baseline data instead of validating")
358 parser.add_option("", "--gtest_filter", 317 parser.add_option("", "--gtest_filter",
359 help="additional arguments to --gtest_filter") 318 help="additional arguments to --gtest_filter")
360 parser.add_option("-v", "--verbose", action="store_true", default=False, 319 parser.add_option("-v", "--verbose", action="store_true", default=False,
361 help="verbose output - enable debug log messages") 320 help="verbose output - enable debug log messages")
362 parser.add_option("", "--show_all_leaks", action="store_true", 321 parser.add_option("", "--show_all_leaks", action="store_true",
363 default=False, 322 default=False,
364 help="also show even less blatant leaks") 323 help="also show even less blatant leaks")
365 parser.add_option("", "--track_origins", action="store_true", 324 parser.add_option("", "--track_origins", action="store_true",
366 default=False, 325 default=False,
367 help="Show whence uninit bytes came. 30% slower.") 326 help="Show whence uninit bytes came. 30% slower.")
368 parser.add_option("", "--no-reinstrument", action="store_true", default=False, 327 parser.add_option("", "--no-reinstrument", action="store_true", default=False,
369 help="Don't force a re-instrumentation for ui_tests") 328 help="Don't force a re-instrumentation for ui_tests")
370 parser.add_option("", "--generate_suppressions", action="store_true", 329 parser.add_option("", "--generate_suppressions", action="store_true",
371 default=False, 330 default=False,
372 help="Skip analysis and generate suppressions") 331 help="Skip analysis and generate suppressions")
332 # My machine can do about 120 layout tests/hour in release mode.
333 # Let's do 30 minutes worth per run.
334 # The CPU is mostly idle, so perhaps we can raise this when
335 # we figure out how to run them more efficiently.
336 parser.add_option("-n", "--num_tests", default=60, type="int",
337 help="for layout tests: number of subtests per run. 0 for a ll.")
Erik does not do reviews 2009/03/30 16:22:48 80 columns
373 338
374 options, args = parser.parse_args() 339 options, args = parser.parse_args()
375 340
376 if options.verbose: 341 if options.verbose:
377 google.logging_utils.config_root(logging.DEBUG) 342 google.logging_utils.config_root(logging.DEBUG)
378 else: 343 else:
379 google.logging_utils.config_root() 344 google.logging_utils.config_root()
380 345
381 if not options.test or not len(options.test): 346 if not options.test or not len(options.test):
382 parser.error("--test not specified") 347 parser.error("--test not specified")
383 348
384 for t in options.test: 349 for t in options.test:
385 tests = ChromeTests(options, args, t) 350 tests = ChromeTests(options, args, t)
386 ret = tests.Run() 351 ret = tests.Run()
387 if ret: return ret 352 if ret: return ret
388 return 0 353 return 0
389 354
390 355
391 if __name__ == "__main__": 356 if __name__ == "__main__":
392 ret = _main(sys.argv) 357 ret = _main(sys.argv)
393 sys.exit(ret) 358 sys.exit(ret)
OLDNEW
« no previous file with comments | « no previous file | tools/valgrind/valgrind_analyze.py » ('j') | tools/valgrind/valgrind_analyze.py » ('J')

Powered by Google App Engine
This is Rietveld 408576698