Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(359)

Side by Side Diff: tools/valgrind/chrome_tests.py

Issue 2062813002: Remove some scripts that were used by the valgrind bots. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: rebsae Created 3 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « tools/valgrind/chrome_tests.bat ('k') | tools/valgrind/chrome_tests.sh » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 #!/usr/bin/env python
2 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
5
6 ''' Runs various chrome tests through valgrind_test.py.'''
7
8 import glob
9 import logging
10 import multiprocessing
11 import optparse
12 import os
13 import stat
14 import subprocess
15 import sys
16
17 import logging_utils
18 import path_utils
19
20 import common
21 import valgrind_test
22
23 class TestNotFound(Exception): pass
24
25 class MultipleGTestFiltersSpecified(Exception): pass
26
27 class BuildDirNotFound(Exception): pass
28
29 class BuildDirAmbiguous(Exception): pass
30
31 class ExecutableNotFound(Exception): pass
32
33 class BadBinary(Exception): pass
34
35 class ChromeTests:
36 SLOW_TOOLS = ["memcheck", "drmemory"]
37 LAYOUT_TESTS_DEFAULT_CHUNK_SIZE = 300
38
39 def __init__(self, options, args, test):
40 if ':' in test:
41 (self._test, self._gtest_filter) = test.split(':', 1)
42 else:
43 self._test = test
44 self._gtest_filter = options.gtest_filter
45
46 if self._test not in self._test_list:
47 raise TestNotFound("Unknown test: %s" % test)
48
49 if options.gtest_filter and options.gtest_filter != self._gtest_filter:
50 raise MultipleGTestFiltersSpecified("Can not specify both --gtest_filter "
51 "and --test %s" % test)
52
53 self._options = options
54 self._args = args
55
56 script_dir = path_utils.ScriptDir()
57 # Compute the top of the tree (the "source dir") from the script dir (where
58 # this script lives). We assume that the script dir is in tools/valgrind/
59 # relative to the top of the tree.
60 self._source_dir = os.path.dirname(os.path.dirname(script_dir))
61 # since this path is used for string matching, make sure it's always
62 # an absolute Unix-style path
63 self._source_dir = os.path.abspath(self._source_dir).replace('\\', '/')
64 valgrind_test_script = os.path.join(script_dir, "valgrind_test.py")
65 self._command_preamble = ["--source-dir=%s" % (self._source_dir)]
66
67 if not self._options.build_dir:
68 dirs = [
69 os.path.join(self._source_dir, "xcodebuild", "Debug"),
70 os.path.join(self._source_dir, "out", "Debug"),
71 os.path.join(self._source_dir, "build", "Debug"),
72 ]
73 build_dir = [d for d in dirs if os.path.isdir(d)]
74 if len(build_dir) > 1:
75 raise BuildDirAmbiguous("Found more than one suitable build dir:\n"
76 "%s\nPlease specify just one "
77 "using --build-dir" % ", ".join(build_dir))
78 elif build_dir:
79 self._options.build_dir = build_dir[0]
80 else:
81 self._options.build_dir = None
82
83 if self._options.build_dir:
84 build_dir = os.path.abspath(self._options.build_dir)
85 self._command_preamble += ["--build-dir=%s" % (self._options.build_dir)]
86
87 def _EnsureBuildDirFound(self):
88 if not self._options.build_dir:
89 raise BuildDirNotFound("Oops, couldn't find a build dir, please "
90 "specify it manually using --build-dir")
91
92 def _DefaultCommand(self, tool, exe=None, valgrind_test_args=None):
93 '''Generates the default command array that most tests will use.'''
94 if exe and common.IsWindows():
95 exe += '.exe'
96
97 cmd = list(self._command_preamble)
98
99 # Find all suppressions matching the following pattern:
100 # tools/valgrind/TOOL/suppressions[_PLATFORM].txt
101 # and list them with --suppressions= prefix.
102 script_dir = path_utils.ScriptDir()
103 tool_name = tool.ToolName();
104 suppression_file = os.path.join(script_dir, tool_name, "suppressions.txt")
105 if os.path.exists(suppression_file):
106 cmd.append("--suppressions=%s" % suppression_file)
107 # Platform-specific suppression
108 for platform in common.PlatformNames():
109 platform_suppression_file = \
110 os.path.join(script_dir, tool_name, 'suppressions_%s.txt' % platform)
111 if os.path.exists(platform_suppression_file):
112 cmd.append("--suppressions=%s" % platform_suppression_file)
113
114 if tool_name == "drmemory":
115 if self._options.drmemory_ops:
116 # prepending " " to avoid Dr. Memory's option confusing optparse
117 cmd += ["--drmemory_ops", " " + self._options.drmemory_ops]
118
119 if self._options.valgrind_tool_flags:
120 cmd += self._options.valgrind_tool_flags.split(" ")
121 if self._options.keep_logs:
122 cmd += ["--keep_logs"]
123 if valgrind_test_args != None:
124 for arg in valgrind_test_args:
125 cmd.append(arg)
126 if exe:
127 self._EnsureBuildDirFound()
128 exe_path = os.path.join(self._options.build_dir, exe)
129 if not os.path.exists(exe_path):
130 raise ExecutableNotFound("Couldn't find '%s'" % exe_path)
131
132 # Make sure we don't try to test ASan-built binaries
133 # with other dynamic instrumentation-based tools.
134 # TODO(timurrrr): also check TSan and MSan?
135 # `nm` might not be available, so use try-except.
136 try:
137 # Do not perform this check on OS X, as 'nm' on 10.6 can't handle
138 # binaries built with Clang 3.5+.
139 if not common.IsMac():
140 nm_output = subprocess.check_output(["nm", exe_path])
141 if nm_output.find("__asan_init") != -1:
142 raise BadBinary("You're trying to run an executable instrumented "
143 "with AddressSanitizer under %s. Please provide "
144 "an uninstrumented executable." % tool_name)
145 except OSError:
146 pass
147
148 cmd.append(exe_path)
149 # Valgrind runs tests slowly, so slow tests hurt more; show elapased time
150 # so we can find the slowpokes.
151 cmd.append("--gtest_print_time")
152 # Built-in test launcher for gtest-based executables runs tests using
153 # multiple process by default. Force the single-process mode back.
154 cmd.append("--single-process-tests")
155 if self._options.gtest_repeat:
156 cmd.append("--gtest_repeat=%s" % self._options.gtest_repeat)
157 if self._options.gtest_shuffle:
158 cmd.append("--gtest_shuffle")
159 if self._options.gtest_break_on_failure:
160 cmd.append("--gtest_break_on_failure")
161 if self._options.test_launcher_bot_mode:
162 cmd.append("--test-launcher-bot-mode")
163 if self._options.test_launcher_total_shards is not None:
164 cmd.append("--test-launcher-total-shards=%d" % self._options.test_launcher _total_shards)
165 if self._options.test_launcher_shard_index is not None:
166 cmd.append("--test-launcher-shard-index=%d" % self._options.test_launcher_ shard_index)
167 return cmd
168
169 def Run(self):
170 ''' Runs the test specified by command-line argument --test '''
171 logging.info("running test %s" % (self._test))
172 return self._test_list[self._test](self)
173
174 def _AppendGtestFilter(self, tool, name, cmd):
175 '''Append an appropriate --gtest_filter flag to the googletest binary
176 invocation.
177 If the user passed their own filter mentioning only one test, just use
178 it. Otherwise, filter out tests listed in the appropriate gtest_exclude
179 files.
180 '''
181 if (self._gtest_filter and
182 ":" not in self._gtest_filter and
183 "?" not in self._gtest_filter and
184 "*" not in self._gtest_filter):
185 cmd.append("--gtest_filter=%s" % self._gtest_filter)
186 return
187
188 filters = []
189 gtest_files_dir = os.path.join(path_utils.ScriptDir(), "gtest_exclude")
190
191 gtest_filter_files = [
192 os.path.join(gtest_files_dir, name + ".gtest-%s.txt" % tool.ToolName())]
193 # Use ".gtest.txt" files only for slow tools, as they now contain
194 # Valgrind- and Dr.Memory-specific filters.
195 # TODO(glider): rename the files to ".gtest_slow.txt"
196 if tool.ToolName() in ChromeTests.SLOW_TOOLS:
197 gtest_filter_files += [os.path.join(gtest_files_dir, name + ".gtest.txt")]
198 for platform_suffix in common.PlatformNames():
199 gtest_filter_files += [
200 os.path.join(gtest_files_dir, name + ".gtest_%s.txt" % platform_suffix),
201 os.path.join(gtest_files_dir, name + ".gtest-%s_%s.txt" % \
202 (tool.ToolName(), platform_suffix))]
203 logging.info("Reading gtest exclude filter files:")
204 for filename in gtest_filter_files:
205 # strip the leading absolute path (may be very long on the bot)
206 # and the following / or \.
207 readable_filename = filename.replace("\\", "/") # '\' on Windows
208 readable_filename = readable_filename.replace(self._source_dir, "")[1:]
209 if not os.path.exists(filename):
210 logging.info(" \"%s\" - not found" % readable_filename)
211 continue
212 logging.info(" \"%s\" - OK" % readable_filename)
213 f = open(filename, 'r')
214 for line in f.readlines():
215 if line.startswith("#") or line.startswith("//") or line.isspace():
216 continue
217 line = line.rstrip()
218 test_prefixes = ["FLAKY", "FAILS"]
219 for p in test_prefixes:
220 # Strip prefixes from the test names.
221 line = line.replace(".%s_" % p, ".")
222 # Exclude the original test name.
223 filters.append(line)
224 if line[-2:] != ".*":
225 # List all possible prefixes if line doesn't end with ".*".
226 for p in test_prefixes:
227 filters.append(line.replace(".", ".%s_" % p))
228 # Get rid of duplicates.
229 filters = set(filters)
230 gtest_filter = self._gtest_filter
231 if len(filters):
232 if gtest_filter:
233 gtest_filter += ":"
234 if gtest_filter.find("-") < 0:
235 gtest_filter += "-"
236 else:
237 gtest_filter = "-"
238 gtest_filter += ":".join(filters)
239 if gtest_filter:
240 cmd.append("--gtest_filter=%s" % gtest_filter)
241
242 @staticmethod
243 def ShowTests():
244 test_to_names = {}
245 for name, test_function in ChromeTests._test_list.iteritems():
246 test_to_names.setdefault(test_function, []).append(name)
247
248 name_to_aliases = {}
249 for names in test_to_names.itervalues():
250 names.sort(key=lambda name: len(name))
251 name_to_aliases[names[0]] = names[1:]
252
253 print
254 print "Available tests:"
255 print "----------------"
256 for name, aliases in sorted(name_to_aliases.iteritems()):
257 if aliases:
258 print " {} (aka {})".format(name, ', '.join(aliases))
259 else:
260 print " {}".format(name)
261
262 def SetupLdPath(self, requires_build_dir):
263 if requires_build_dir:
264 self._EnsureBuildDirFound()
265 elif not self._options.build_dir:
266 return
267
268 # Append build_dir to LD_LIBRARY_PATH so external libraries can be loaded.
269 if (os.getenv("LD_LIBRARY_PATH")):
270 os.putenv("LD_LIBRARY_PATH", "%s:%s" % (os.getenv("LD_LIBRARY_PATH"),
271 self._options.build_dir))
272 else:
273 os.putenv("LD_LIBRARY_PATH", self._options.build_dir)
274
275 def SimpleTest(self, module, name, valgrind_test_args=None, cmd_args=None):
276 tool = valgrind_test.CreateTool(self._options.valgrind_tool)
277 cmd = self._DefaultCommand(tool, name, valgrind_test_args)
278 self._AppendGtestFilter(tool, name, cmd)
279 cmd.extend(['--test-tiny-timeout=1000'])
280 if cmd_args:
281 cmd.extend(cmd_args)
282
283 self.SetupLdPath(True)
284 return tool.Run(cmd, module)
285
286 def RunCmdLine(self):
287 tool = valgrind_test.CreateTool(self._options.valgrind_tool)
288 cmd = self._DefaultCommand(tool, None, self._args)
289 self.SetupLdPath(False)
290 return tool.Run(cmd, None)
291
292 def TestAccessibility(self):
293 return self.SimpleTest("accessibility", "accessibility_unittests")
294
295 def TestAddressInput(self):
296 return self.SimpleTest("addressinput", "libaddressinput_unittests")
297
298 def TestAngle(self):
299 return self.SimpleTest("angle", "angle_unittests")
300
301 def TestAppList(self):
302 return self.SimpleTest("app_list", "app_list_unittests")
303
304 def TestAsh(self):
305 return self.SimpleTest("ash", "ash_unittests")
306
307 def TestAura(self):
308 return self.SimpleTest("aura", "aura_unittests")
309
310 def TestBase(self):
311 return self.SimpleTest("base", "base_unittests")
312
313 def TestBlinkHeap(self):
314 return self.SimpleTest("blink_heap", "blink_heap_unittests")
315
316 def TestBlinkPlatform(self):
317 return self.SimpleTest("blink_platform", "blink_platform_unittests")
318
319 def TestCacheInvalidation(self):
320 return self.SimpleTest("cacheinvalidation", "cacheinvalidation_unittests")
321
322 def TestCast(self):
323 return self.SimpleTest("chrome", "cast_unittests")
324
325 def TestCC(self):
326 return self.SimpleTest("cc", "cc_unittests",
327 cmd_args=[
328 "--cc-layer-tree-test-long-timeout"])
329
330 def TestChromeApp(self):
331 return self.SimpleTest("chrome_app", "chrome_app_unittests")
332
333 def TestChromeElf(self):
334 return self.SimpleTest("chrome_elf", "chrome_elf_unittests")
335
336 def TestChromeDriver(self):
337 return self.SimpleTest("chromedriver", "chromedriver_unittests")
338
339 def TestChromeOS(self):
340 return self.SimpleTest("chromeos", "chromeos_unittests")
341
342 def TestComponents(self):
343 return self.SimpleTest("components", "components_unittests")
344
345 def TestCompositor(self):
346 return self.SimpleTest("compositor", "compositor_unittests")
347
348 def TestContent(self):
349 return self.SimpleTest("content", "content_unittests")
350
351 def TestCourgette(self):
352 return self.SimpleTest("courgette", "courgette_unittests")
353
354 def TestCrypto(self):
355 return self.SimpleTest("crypto", "crypto_unittests")
356
357 def TestDevice(self):
358 return self.SimpleTest("device", "device_unittests")
359
360 def TestDisplay(self):
361 return self.SimpleTest("display", "display_unittests")
362
363 def TestEvents(self):
364 return self.SimpleTest("events", "events_unittests")
365
366 def TestExtensions(self):
367 return self.SimpleTest("extensions", "extensions_unittests")
368
369 def TestFFmpegRegressions(self):
370 return self.SimpleTest("chrome", "ffmpeg_regression_tests")
371
372 def TestGCM(self):
373 return self.SimpleTest("gcm", "gcm_unit_tests")
374
375 def TestGfx(self):
376 return self.SimpleTest("gfx", "gfx_unittests")
377
378 def TestGin(self):
379 return self.SimpleTest("gin", "gin_unittests")
380
381 def TestGoogleApis(self):
382 return self.SimpleTest("google_apis", "google_apis_unittests")
383
384 def TestGPU(self):
385 return self.SimpleTest("gpu", "gpu_unittests")
386
387 def TestIpc(self):
388 return self.SimpleTest("ipc", "ipc_tests",
389 valgrind_test_args=["--trace_children"])
390
391 def TestInstallerUtil(self):
392 return self.SimpleTest("installer_util", "installer_util_unittests")
393
394 def TestInstallStatic(self):
395 return self.SimpleTest("install_static", "install_static_unittests")
396
397 def TestJingle(self):
398 return self.SimpleTest("chrome", "jingle_unittests")
399
400 def TestKeyboard(self):
401 return self.SimpleTest("keyboard", "keyboard_unittests")
402
403 def TestLatency(self):
404 return self.SimpleTest("latency", "latency_unittests")
405
406 def TestMedia(self):
407 return self.SimpleTest("chrome", "media_unittests")
408
409 def TestMessageCenter(self):
410 return self.SimpleTest("message_center", "message_center_unittests")
411
412 def TestMidi(self):
413 return self.SimpleTest("chrome", "midi_unittests")
414
415 def TestMojoCommon(self):
416 return self.SimpleTest("mojo_common", "mojo_common_unittests")
417
418 def TestMojoPublicBindings(self):
419 return self.SimpleTest("mojo_public_bindings",
420 "mojo_public_bindings_unittests")
421
422 def TestMojoPublicSystem(self):
423 return self.SimpleTest("mojo_public_system",
424 "mojo_public_system_unittests")
425
426 def TestMojoPublicSysPerf(self):
427 return self.SimpleTest("mojo_public_sysperf",
428 "mojo_public_system_perftests")
429
430 def TestMojoSystem(self):
431 return self.SimpleTest("mojo_system", "mojo_system_unittests")
432
433 def TestNet(self):
434 return self.SimpleTest("net", "net_unittests")
435
436 def TestNetPerf(self):
437 return self.SimpleTest("net", "net_perftests")
438
439 def TestPhoneNumber(self):
440 return self.SimpleTest("phonenumber", "libphonenumber_unittests")
441
442 def TestPPAPI(self):
443 return self.SimpleTest("chrome", "ppapi_unittests")
444
445 def TestPrinting(self):
446 return self.SimpleTest("chrome", "printing_unittests")
447
448 def TestRemoting(self):
449 return self.SimpleTest("chrome", "remoting_unittests",
450 cmd_args=[
451 "--ui-test-action-timeout=60000",
452 "--ui-test-action-max-timeout=150000"])
453
454 def TestSkia(self):
455 return self.SimpleTest("skia", "skia_unittests")
456
457 def TestSql(self):
458 return self.SimpleTest("chrome", "sql_unittests")
459
460 def TestStorage(self):
461 return self.SimpleTest("storage", "storage_unittests")
462
463 def TestLinuxSandbox(self):
464 return self.SimpleTest("sandbox", "sandbox_linux_unittests")
465
466 def TestUnit(self):
467 # http://crbug.com/51716
468 # Disabling all unit tests
469 # Problems reappeared after r119922
470 if common.IsMac() and (self._options.valgrind_tool == "memcheck"):
471 logging.warning("unit_tests are disabled for memcheck on MacOS.")
472 return 0;
473 return self.SimpleTest("chrome", "unit_tests")
474
475 def TestUIBaseUnit(self):
476 return self.SimpleTest("chrome", "ui_base_unittests")
477
478 def TestUIChromeOS(self):
479 return self.SimpleTest("chrome", "ui_chromeos_unittests")
480
481 def TestURL(self):
482 return self.SimpleTest("chrome", "url_unittests")
483
484 def TestViews(self):
485 return self.SimpleTest("views", "views_unittests")
486
487
488 # Valgrind timeouts are in seconds.
489 UI_VALGRIND_ARGS = ["--timeout=14400", "--trace_children", "--indirect"]
490 # UI test timeouts are in milliseconds.
491 UI_TEST_ARGS = ["--ui-test-action-timeout=60000",
492 "--ui-test-action-max-timeout=150000",
493 "--no-sandbox"]
494
495 # TODO(thestig) fine-tune these values.
496 # Valgrind timeouts are in seconds.
497 BROWSER_VALGRIND_ARGS = ["--timeout=50000", "--trace_children", "--indirect"]
498 # Browser test timeouts are in milliseconds.
499 BROWSER_TEST_ARGS = ["--ui-test-action-timeout=400000",
500 "--ui-test-action-max-timeout=800000",
501 "--no-sandbox"]
502
503 def TestBrowser(self):
504 return self.SimpleTest("chrome", "browser_tests",
505 valgrind_test_args=self.BROWSER_VALGRIND_ARGS,
506 cmd_args=self.BROWSER_TEST_ARGS)
507
508 def TestContentBrowser(self):
509 return self.SimpleTest("content", "content_browsertests",
510 valgrind_test_args=self.BROWSER_VALGRIND_ARGS,
511 cmd_args=self.BROWSER_TEST_ARGS)
512
513 def TestInteractiveUI(self):
514 return self.SimpleTest("chrome", "interactive_ui_tests",
515 valgrind_test_args=self.UI_VALGRIND_ARGS,
516 cmd_args=self.UI_TEST_ARGS)
517
518 def TestSyncIntegration(self):
519 return self.SimpleTest("chrome", "sync_integration_tests",
520 valgrind_test_args=self.UI_VALGRIND_ARGS,
521 cmd_args=(["--ui-test-action-max-timeout=450000"]))
522
523 def TestLayoutChunk(self, chunk_num, chunk_size):
524 # Run tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size) from the
525 # list of tests. Wrap around to beginning of list at end.
526 # If chunk_size is zero, run all tests in the list once.
527 # If a text file is given as argument, it is used as the list of tests.
528 assert((chunk_size == 0) != (len(self._args) == 0))
529 # Build the ginormous commandline in 'cmd'.
530 # It's going to be roughly
531 # python valgrind_test.py ...
532 # but we'll use the --indirect flag to valgrind_test.py
533 # to avoid valgrinding python.
534 # Start by building the valgrind_test.py commandline.
535 tool = valgrind_test.CreateTool(self._options.valgrind_tool)
536 cmd = self._DefaultCommand(tool)
537 cmd.append("--trace_children")
538 cmd.append("--indirect_webkit_layout")
539 cmd.append("--ignore_exit_code")
540 # Now build script_cmd, the run-webkits-tests commandline.
541 # Store each chunk in its own directory so that we can find the data later
542 chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num)
543 out_dir = os.path.join(path_utils.ScriptDir(), "latest")
544 out_dir = os.path.join(out_dir, chunk_dir)
545 if os.path.exists(out_dir):
546 old_files = glob.glob(os.path.join(out_dir, "*.txt"))
547 for f in old_files:
548 os.remove(f)
549 else:
550 os.makedirs(out_dir)
551 script = os.path.join(self._source_dir, "third_party", "WebKit", "Tools",
552 "Scripts", "run-webkit-tests")
553 # http://crbug.com/260627: After the switch to content_shell from DRT, each
554 # test now brings up 3 processes. Under Valgrind, they become memory bound
555 # and can eventually OOM if we don't reduce the total count.
556 # It'd be nice if content_shell automatically throttled the startup of new
557 # tests if we're low on memory.
558 jobs = max(1, int(multiprocessing.cpu_count() * 0.3))
559 script_cmd = ["python", script, "-v",
560 # run a separate DumpRenderTree for each test
561 "--batch-size=1",
562 "--fully-parallel",
563 "--child-processes=%d" % jobs,
564 "--time-out-ms=800000",
565 "--no-retry-failures", # retrying takes too much time
566 # http://crbug.com/176908: Don't launch a browser when done.
567 "--no-show-results",
568 "--nocheck-sys-deps",
569 "--additional-driver-flag=--no-sandbox"]
570 # Pass build mode to run-webkit-tests. We aren't passed it directly,
571 # so parse it out of build_dir. run-webkit-tests can only handle
572 # the two values "Release" and "Debug".
573 # TODO(Hercules): unify how all our scripts pass around build mode
574 # (--mode / --target / --build-dir / --debug)
575 if self._options.build_dir:
576 build_root, mode = os.path.split(self._options.build_dir)
577 script_cmd.extend(["--build-directory", build_root, "--target", mode])
578 if (chunk_size > 0):
579 script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size))
580 if len(self._args):
581 # if the arg is a txt file, then treat it as a list of tests
582 if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt":
583 script_cmd.append("--test-list=%s" % self._args[0])
584 else:
585 script_cmd.extend(self._args)
586 self._AppendGtestFilter(tool, "layout", script_cmd)
587 # Now run script_cmd with the wrapper in cmd
588 cmd.extend(["--"])
589 cmd.extend(script_cmd)
590
591 # Layout tests often times fail quickly, but the buildbot remains green.
592 # Detect this situation when running with the default chunk size.
593 if chunk_size == self.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE:
594 min_runtime_in_seconds=120
595 else:
596 min_runtime_in_seconds=0
597 ret = tool.Run(cmd, "layout", min_runtime_in_seconds=min_runtime_in_seconds)
598 return ret
599
600
601 def TestLayout(self):
602 # A "chunk file" is maintained in the local directory so that each test
603 # runs a slice of the layout tests of size chunk_size that increments with
604 # each run. Since tests can be added and removed from the layout tests at
605 # any time, this is not going to give exact coverage, but it will allow us
606 # to continuously run small slices of the layout tests under valgrind rather
607 # than having to run all of them in one shot.
608 chunk_size = self._options.num_tests
609 if chunk_size == 0 or len(self._args):
610 return self.TestLayoutChunk(0, 0)
611 chunk_num = 0
612 chunk_file = os.path.join("valgrind_layout_chunk.txt")
613 logging.info("Reading state from " + chunk_file)
614 try:
615 f = open(chunk_file)
616 if f:
617 chunk_str = f.read()
618 if len(chunk_str):
619 chunk_num = int(chunk_str)
620 # This should be enough so that we have a couple of complete runs
621 # of test data stored in the archive (although note that when we loop
622 # that we almost guaranteed won't be at the end of the test list)
623 if chunk_num > 10000:
624 chunk_num = 0
625 f.close()
626 except IOError, (errno, strerror):
627 logging.error("error reading from file %s (%d, %s)" % (chunk_file,
628 errno, strerror))
629 # Save the new chunk size before running the tests. Otherwise if a
630 # particular chunk hangs the bot, the chunk number will never get
631 # incremented and the bot will be wedged.
632 logging.info("Saving state to " + chunk_file)
633 try:
634 f = open(chunk_file, "w")
635 chunk_num += 1
636 f.write("%d" % chunk_num)
637 f.close()
638 except IOError, (errno, strerror):
639 logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno,
640 strerror))
641 # Since we're running small chunks of the layout tests, it's important to
642 # mark the ones that have errors in them. These won't be visible in the
643 # summary list for long, but will be useful for someone reviewing this bot.
644 return self.TestLayoutChunk(chunk_num, chunk_size)
645
646 # The known list of tests.
647 # Recognise the original abbreviations as well as full executable names.
648 _test_list = {
649 "cmdline" : RunCmdLine,
650 "addressinput": TestAddressInput,
651 "libaddressinput_unittests": TestAddressInput,
652 "accessibility": TestAccessibility,
653 "angle": TestAngle, "angle_unittests": TestAngle,
654 "app_list": TestAppList, "app_list_unittests": TestAppList,
655 "ash": TestAsh, "ash_unittests": TestAsh,
656 "aura": TestAura, "aura_unittests": TestAura,
657 "base": TestBase, "base_unittests": TestBase,
658 "blink_heap": TestBlinkHeap,
659 "blink_platform": TestBlinkPlatform,
660 "browser": TestBrowser, "browser_tests": TestBrowser,
661 "cacheinvalidation": TestCacheInvalidation,
662 "cacheinvalidation_unittests": TestCacheInvalidation,
663 "cast": TestCast, "cast_unittests": TestCast,
664 "cc": TestCC, "cc_unittests": TestCC,
665 "chrome_app": TestChromeApp,
666 "chrome_elf": TestChromeElf,
667 "chromedriver": TestChromeDriver,
668 "chromeos": TestChromeOS, "chromeos_unittests": TestChromeOS,
669 "components": TestComponents,"components_unittests": TestComponents,
670 "compositor": TestCompositor,"compositor_unittests": TestCompositor,
671 "content": TestContent, "content_unittests": TestContent,
672 "content_browsertests": TestContentBrowser,
673 "courgette": TestCourgette, "courgette_unittests": TestCourgette,
674 "crypto": TestCrypto, "crypto_unittests": TestCrypto,
675 "device": TestDevice, "device_unittests": TestDevice,
676 "display": TestDisplay, "display_unittests": TestDisplay,
677 "events": TestEvents, "events_unittests": TestEvents,
678 "extensions": TestExtensions, "extensions_unittests": TestExtensions,
679 "ffmpeg_regression_tests": TestFFmpegRegressions,
680 "gcm": TestGCM, "gcm_unit_tests": TestGCM,
681 "gin": TestGin, "gin_unittests": TestGin,
682 "gfx": TestGfx, "gfx_unittests": TestGfx,
683 "google_apis": TestGoogleApis,
684 "gpu": TestGPU, "gpu_unittests": TestGPU,
685 "ipc": TestIpc, "ipc_tests": TestIpc,
686 "installer_util": TestInstallerUtil,
687 "installer_util_unittests": TestInstallerUtil,
688 "install_static_unittests": TestInstallStatic,
689 "interactive_ui": TestInteractiveUI,
690 "jingle": TestJingle, "jingle_unittests": TestJingle,
691 "keyboard": TestKeyboard, "keyboard_unittests": TestKeyboard,
692 "latency": TestLatency, "latency_unittests": TestLatency,
693 "layout": TestLayout, "layout_tests": TestLayout,
694 "media": TestMedia, "media_unittests": TestMedia,
695 "message_center": TestMessageCenter,
696 "message_center_unittests" : TestMessageCenter,
697 "midi": TestMidi, "midi_unittests": TestMidi,
698 "mojo_common": TestMojoCommon,
699 "mojo_common_unittests": TestMojoCommon,
700 "mojo_system": TestMojoSystem,
701 "mojo_system_unittests": TestMojoSystem,
702 "mojo_public_system": TestMojoPublicSystem,
703 "mojo_public_system_unittests": TestMojoPublicSystem,
704 "mojo_public_bindings": TestMojoPublicBindings,
705 "mojo_public_bindings_unittests": TestMojoPublicBindings,
706 "mojo_public_sysperf": TestMojoPublicSysPerf,
707 "net": TestNet, "net_unittests": TestNet,
708 "net_perf": TestNetPerf, "net_perftests": TestNetPerf,
709 "phonenumber": TestPhoneNumber,
710 "libphonenumber_unittests": TestPhoneNumber,
711 "ppapi": TestPPAPI, "ppapi_unittests": TestPPAPI,
712 "printing": TestPrinting, "printing_unittests": TestPrinting,
713 "remoting": TestRemoting, "remoting_unittests": TestRemoting,
714 "sandbox": TestLinuxSandbox, "sandbox_linux_unittests": TestLinuxSandbox,
715 "skia": TestSkia, "skia_unittests": TestSkia,
716 "sql": TestSql, "sql_unittests": TestSql,
717 "storage": TestStorage, "storage_unittests": TestStorage,
718 "sync_integration_tests": TestSyncIntegration,
719 "sync_integration": TestSyncIntegration,
720 "ui_base_unit": TestUIBaseUnit, "ui_base_unittests": TestUIBaseUnit,
721 "ui_chromeos": TestUIChromeOS, "ui_chromeos_unittests": TestUIChromeOS,
722 "unit": TestUnit, "unit_tests": TestUnit,
723 "url": TestURL, "url_unittests": TestURL,
724 "views": TestViews, "views_unittests": TestViews,
725 "webkit": TestLayout,
726 }
727
728
729 def _main():
730 parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> "
731 "[-t <test> ...]")
732
733 parser.add_option("--help-tests", dest="help_tests", action="store_true",
734 default=False, help="List all available tests")
735 parser.add_option("-b", "--build-dir",
736 help="the location of the compiler output")
737 parser.add_option("--target", help="Debug or Release")
738 parser.add_option("-t", "--test", action="append", default=[],
739 help="which test to run, supports test:gtest_filter format "
740 "as well.")
741 parser.add_option("--baseline", action="store_true", default=False,
742 help="generate baseline data instead of validating")
743 parser.add_option("-f", "--force", action="store_true", default=False,
744 help="run a broken test anyway")
745 parser.add_option("--gtest_filter",
746 help="additional arguments to --gtest_filter")
747 parser.add_option("--gtest_repeat", help="argument for --gtest_repeat")
748 parser.add_option("--gtest_shuffle", action="store_true", default=False,
749 help="Randomize tests' orders on every iteration.")
750 parser.add_option("--gtest_break_on_failure", action="store_true",
751 default=False,
752 help="Drop in to debugger on assertion failure. Also "
753 "useful for forcing tests to exit with a stack dump "
754 "on the first assertion failure when running with "
755 "--gtest_repeat=-1")
756 parser.add_option("-v", "--verbose", action="store_true", default=False,
757 help="verbose output - enable debug log messages")
758 parser.add_option("--tool", dest="valgrind_tool", default="memcheck",
759 help="specify a valgrind tool to run the tests under")
760 parser.add_option("--tool_flags", dest="valgrind_tool_flags", default="",
761 help="specify custom flags for the selected valgrind tool")
762 parser.add_option("--keep_logs", action="store_true", default=False,
763 help="store memory tool logs in the <tool>.logs directory "
764 "instead of /tmp.\nThis can be useful for tool "
765 "developers/maintainers.\nPlease note that the <tool>"
766 ".logs directory will be clobbered on tool startup.")
767 parser.add_option("-n", "--num_tests", type="int",
768 default=ChromeTests.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE,
769 help="for layout tests: # of subtests per run. 0 for all.")
770 parser.add_option("--test-launcher-bot-mode", action="store_true",
771 help="run the tests with --test-launcher-bot-mode")
772 parser.add_option("--test-launcher-total-shards", type=int,
773 help="run the tests with --test-launcher-total-shards")
774 parser.add_option("--test-launcher-shard-index", type=int,
775 help="run the tests with --test-launcher-shard-index")
776 parser.add_option("--drmemory_ops",
777 help="extra options passed to Dr. Memory")
778
779 options, args = parser.parse_args()
780
781 # Bake target into build_dir.
782 if options.target and options.build_dir:
783 assert (options.target !=
784 os.path.basename(os.path.dirname(options.build_dir)))
785 options.build_dir = os.path.join(os.path.abspath(options.build_dir),
786 options.target)
787
788 if options.verbose:
789 logging_utils.config_root(logging.DEBUG)
790 else:
791 logging_utils.config_root()
792
793 if options.help_tests:
794 ChromeTests.ShowTests()
795 return 0
796
797 if not options.test:
798 parser.error("--test not specified")
799
800 if len(options.test) != 1 and options.gtest_filter:
801 parser.error("--gtest_filter and multiple tests don't make sense together")
802
803 BROKEN_TESTS = {
804 'drmemory_light': [
805 'addressinput',
806 'aura',
807 'base_unittests',
808 'cc',
809 'components', # x64 only?
810 'content',
811 'gfx',
812 'mojo_public_bindings',
813 ],
814 'drmemory_full': [
815 'addressinput',
816 'aura',
817 'base_unittests',
818 'blink_heap',
819 'blink_platform',
820 'browser_tests',
821 'cast',
822 'cc',
823 'chromedriver',
824 'compositor',
825 'content',
826 'content_browsertests',
827 'device',
828 'events',
829 'extensions',
830 'gfx',
831 'google_apis',
832 'gpu',
833 'ipc_tests',
834 'jingle',
835 'keyboard',
836 'media',
837 'midi',
838 'mojo_common',
839 'mojo_public_bindings',
840 'mojo_public_sysperf',
841 'mojo_public_system',
842 'mojo_system',
843 'net',
844 'remoting',
845 'unit',
846 'url',
847 ],
848 }
849
850 for t in options.test:
851 if t in BROKEN_TESTS[options.valgrind_tool] and not options.force:
852 logging.info("Skipping broken %s test %s -- see crbug.com/633693" %
853 (options.valgrind_tool, t))
854 return 0
855
856 tests = ChromeTests(options, args, t)
857 ret = tests.Run()
858 if ret: return ret
859 return 0
860
861
862 if __name__ == "__main__":
863 sys.exit(_main())
OLDNEW
« no previous file with comments | « tools/valgrind/chrome_tests.bat ('k') | tools/valgrind/chrome_tests.sh » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698