Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 #!/usr/bin/env python | |
| 2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
| 3 # Use of this source code is governed by a BSD-style license that can be | |
| 4 # found in the LICENSE file. | |
| 5 | |
| 6 ''' Runs various chrome tests through valgrind_test.py.''' | |
| 7 | |
| 8 import glob | |
| 9 import logging | |
| 10 import multiprocessing | |
| 11 import optparse | |
| 12 import os | |
| 13 import stat | |
| 14 import subprocess | |
| 15 import sys | |
| 16 | |
| 17 import logging_utils | |
| 18 import path_utils | |
| 19 | |
| 20 import common | |
| 21 import valgrind_test | |
| 22 | |
| 23 class TestNotFound(Exception): pass | |
| 24 | |
| 25 class MultipleGTestFiltersSpecified(Exception): pass | |
| 26 | |
| 27 class BuildDirNotFound(Exception): pass | |
| 28 | |
| 29 class BuildDirAmbiguous(Exception): pass | |
| 30 | |
| 31 class ExecutableNotFound(Exception): pass | |
| 32 | |
| 33 class BadBinary(Exception): pass | |
| 34 | |
| 35 class ChromeTests: | |
| 36 SLOW_TOOLS = ["memcheck", "drmemory"] | |
| 37 LAYOUT_TESTS_DEFAULT_CHUNK_SIZE = 300 | |
| 38 | |
| 39 def __init__(self, options, args, test): | |
| 40 if ':' in test: | |
| 41 (self._test, self._gtest_filter) = test.split(':', 1) | |
| 42 else: | |
| 43 self._test = test | |
| 44 self._gtest_filter = options.gtest_filter | |
| 45 | |
| 46 if self._test not in self._test_list: | |
| 47 raise TestNotFound("Unknown test: %s" % test) | |
| 48 | |
| 49 if options.gtest_filter and options.gtest_filter != self._gtest_filter: | |
| 50 raise MultipleGTestFiltersSpecified("Can not specify both --gtest_filter " | |
| 51 "and --test %s" % test) | |
| 52 | |
| 53 self._options = options | |
| 54 self._args = args | |
| 55 | |
| 56 script_dir = path_utils.ScriptDir() | |
| 57 # Compute the top of the tree (the "source dir") from the script dir (where | |
| 58 # this script lives). We assume that the script dir is in tools/valgrind/ | |
| 59 # relative to the top of the tree. | |
| 60 self._source_dir = os.path.dirname(os.path.dirname(script_dir)) | |
| 61 # since this path is used for string matching, make sure it's always | |
| 62 # an absolute Unix-style path | |
| 63 self._source_dir = os.path.abspath(self._source_dir).replace('\\', '/') | |
| 64 valgrind_test_script = os.path.join(script_dir, "valgrind_test.py") | |
| 65 self._command_preamble = ["--source-dir=%s" % (self._source_dir)] | |
| 66 | |
| 67 if not self._options.build_dir: | |
| 68 dirs = [ | |
| 69 os.path.join(self._source_dir, "xcodebuild", "Debug"), | |
| 70 os.path.join(self._source_dir, "out", "Debug"), | |
| 71 os.path.join(self._source_dir, "build", "Debug"), | |
| 72 ] | |
| 73 build_dir = [d for d in dirs if os.path.isdir(d)] | |
| 74 if len(build_dir) > 1: | |
| 75 raise BuildDirAmbiguous("Found more than one suitable build dir:\n" | |
| 76 "%s\nPlease specify just one " | |
| 77 "using --build-dir" % ", ".join(build_dir)) | |
| 78 elif build_dir: | |
| 79 self._options.build_dir = build_dir[0] | |
| 80 else: | |
| 81 self._options.build_dir = None | |
| 82 | |
| 83 if self._options.build_dir: | |
| 84 build_dir = os.path.abspath(self._options.build_dir) | |
| 85 self._command_preamble += ["--build-dir=%s" % (self._options.build_dir)] | |
| 86 | |
| 87 def _EnsureBuildDirFound(self): | |
| 88 if not self._options.build_dir: | |
| 89 raise BuildDirNotFound("Oops, couldn't find a build dir, please " | |
| 90 "specify it manually using --build-dir") | |
| 91 | |
| 92 def _DefaultCommand(self, tool, exe=None, valgrind_test_args=None): | |
| 93 '''Generates the default command array that most tests will use.''' | |
| 94 if exe and common.IsWindows(): | |
| 95 exe += '.exe' | |
| 96 | |
| 97 cmd = list(self._command_preamble) | |
| 98 | |
| 99 # Find all suppressions matching the following pattern: | |
| 100 # tools/valgrind/TOOL/suppressions[_PLATFORM].txt | |
| 101 # and list them with --suppressions= prefix. | |
| 102 script_dir = path_utils.ScriptDir() | |
| 103 tool_name = tool.ToolName(); | |
| 104 suppression_file = os.path.join(script_dir, tool_name, "suppressions.txt") | |
| 105 if os.path.exists(suppression_file): | |
| 106 cmd.append("--suppressions=%s" % suppression_file) | |
| 107 # Platform-specific suppression | |
| 108 for platform in common.PlatformNames(): | |
| 109 platform_suppression_file = \ | |
| 110 os.path.join(script_dir, tool_name, 'suppressions_%s.txt' % platform) | |
| 111 if os.path.exists(platform_suppression_file): | |
| 112 cmd.append("--suppressions=%s" % platform_suppression_file) | |
| 113 | |
| 114 if self._options.valgrind_tool_flags: | |
| 115 cmd += self._options.valgrind_tool_flags.split(" ") | |
| 116 if self._options.keep_logs: | |
| 117 cmd += ["--keep_logs"] | |
| 118 if valgrind_test_args != None: | |
| 119 for arg in valgrind_test_args: | |
| 120 cmd.append(arg) | |
| 121 if exe: | |
| 122 self._EnsureBuildDirFound() | |
| 123 exe_path = os.path.join(self._options.build_dir, exe) | |
| 124 if not os.path.exists(exe_path): | |
| 125 raise ExecutableNotFound("Couldn't find '%s'" % exe_path) | |
| 126 | |
| 127 # Make sure we don't try to test ASan-built binaries | |
| 128 # with other dynamic instrumentation-based tools. | |
| 129 # TODO(timurrrr): also check TSan and MSan? | |
| 130 # `nm` might not be available, so use try-except. | |
| 131 try: | |
| 132 # Do not perform this check on OS X, as 'nm' on 10.6 can't handle | |
| 133 # binaries built with Clang 3.5+. | |
| 134 if not common.IsMac(): | |
| 135 nm_output = subprocess.check_output(["nm", exe_path]) | |
| 136 if nm_output.find("__asan_init") != -1: | |
| 137 raise BadBinary("You're trying to run an executable instrumented " | |
| 138 "with AddressSanitizer under %s. Please provide " | |
| 139 "an uninstrumented executable." % tool_name) | |
| 140 except OSError: | |
| 141 pass | |
| 142 | |
| 143 cmd.append(exe_path) | |
| 144 # Valgrind runs tests slowly, so slow tests hurt more; show elapased time | |
| 145 # so we can find the slowpokes. | |
| 146 cmd.append("--gtest_print_time") | |
| 147 # Built-in test launcher for gtest-based executables runs tests using | |
| 148 # multiple process by default. Force the single-process mode back. | |
| 149 cmd.append("--single-process-tests") | |
| 150 if self._options.gtest_repeat: | |
| 151 cmd.append("--gtest_repeat=%s" % self._options.gtest_repeat) | |
| 152 if self._options.gtest_shuffle: | |
| 153 cmd.append("--gtest_shuffle") | |
| 154 if self._options.gtest_break_on_failure: | |
| 155 cmd.append("--gtest_break_on_failure") | |
| 156 if self._options.test_launcher_bot_mode: | |
| 157 cmd.append("--test-launcher-bot-mode") | |
| 158 if self._options.test_launcher_total_shards is not None: | |
| 159 cmd.append("--test-launcher-total-shards=%d" % self._options.test_launcher _total_shards) | |
| 160 if self._options.test_launcher_shard_index is not None: | |
| 161 cmd.append("--test-launcher-shard-index=%d" % self._options.test_launcher_ shard_index) | |
| 162 return cmd | |
| 163 | |
| 164 def Run(self): | |
| 165 ''' Runs the test specified by command-line argument --test ''' | |
| 166 logging.info("running test %s" % (self._test)) | |
| 167 return self._test_list[self._test](self) | |
| 168 | |
| 169 def _AppendGtestFilter(self, tool, name, cmd): | |
| 170 '''Append an appropriate --gtest_filter flag to the googletest binary | |
| 171 invocation. | |
| 172 If the user passed his own filter mentioning only one test, just use it. | |
| 173 Othewise, filter out tests listed in the appropriate gtest_exclude files. | |
| 174 ''' | |
| 175 if (self._gtest_filter and | |
| 176 ":" not in self._gtest_filter and | |
| 177 "?" not in self._gtest_filter and | |
| 178 "*" not in self._gtest_filter): | |
| 179 cmd.append("--gtest_filter=%s" % self._gtest_filter) | |
| 180 return | |
| 181 | |
| 182 filters = [] | |
| 183 gtest_files_dir = os.path.join(path_utils.ScriptDir(), "gtest_exclude") | |
| 184 | |
| 185 gtest_filter_files = [ | |
| 186 os.path.join(gtest_files_dir, name + ".gtest-%s.txt" % tool.ToolName())] | |
| 187 # Use ".gtest.txt" files only for slow tools, as they now contain | |
| 188 # Valgrind- and Dr.Memory-specific filters. | |
| 189 # TODO(glider): rename the files to ".gtest_slow.txt" | |
| 190 if tool.ToolName() in ChromeTests.SLOW_TOOLS: | |
| 191 gtest_filter_files += [os.path.join(gtest_files_dir, name + ".gtest.txt")] | |
| 192 for platform_suffix in common.PlatformNames(): | |
| 193 gtest_filter_files += [ | |
| 194 os.path.join(gtest_files_dir, name + ".gtest_%s.txt" % platform_suffix), | |
| 195 os.path.join(gtest_files_dir, name + ".gtest-%s_%s.txt" % \ | |
| 196 (tool.ToolName(), platform_suffix))] | |
| 197 logging.info("Reading gtest exclude filter files:") | |
| 198 for filename in gtest_filter_files: | |
| 199 # strip the leading absolute path (may be very long on the bot) | |
| 200 # and the following / or \. | |
| 201 readable_filename = filename.replace("\\", "/") # '\' on Windows | |
| 202 readable_filename = readable_filename.replace(self._source_dir, "")[1:] | |
| 203 if not os.path.exists(filename): | |
| 204 logging.info(" \"%s\" - not found" % readable_filename) | |
| 205 continue | |
| 206 logging.info(" \"%s\" - OK" % readable_filename) | |
| 207 f = open(filename, 'r') | |
| 208 for line in f.readlines(): | |
| 209 if line.startswith("#") or line.startswith("//") or line.isspace(): | |
| 210 continue | |
| 211 line = line.rstrip() | |
| 212 test_prefixes = ["FLAKY", "FAILS"] | |
| 213 for p in test_prefixes: | |
| 214 # Strip prefixes from the test names. | |
| 215 line = line.replace(".%s_" % p, ".") | |
| 216 # Exclude the original test name. | |
| 217 filters.append(line) | |
| 218 if line[-2:] != ".*": | |
| 219 # List all possible prefixes if line doesn't end with ".*". | |
| 220 for p in test_prefixes: | |
| 221 filters.append(line.replace(".", ".%s_" % p)) | |
| 222 # Get rid of duplicates. | |
| 223 filters = set(filters) | |
| 224 gtest_filter = self._gtest_filter | |
| 225 if len(filters): | |
| 226 if gtest_filter: | |
| 227 gtest_filter += ":" | |
| 228 if gtest_filter.find("-") < 0: | |
| 229 gtest_filter += "-" | |
| 230 else: | |
| 231 gtest_filter = "-" | |
| 232 gtest_filter += ":".join(filters) | |
| 233 if gtest_filter: | |
| 234 cmd.append("--gtest_filter=%s" % gtest_filter) | |
| 235 | |
| 236 @staticmethod | |
| 237 def ShowTests(): | |
| 238 test_to_names = {} | |
| 239 for name, test_function in ChromeTests._test_list.iteritems(): | |
| 240 test_to_names.setdefault(test_function, []).append(name) | |
| 241 | |
| 242 name_to_aliases = {} | |
| 243 for names in test_to_names.itervalues(): | |
| 244 names.sort(key=lambda name: len(name)) | |
| 245 name_to_aliases[names[0]] = names[1:] | |
| 246 | |
| 247 print | |
| 248 print "Available tests:" | |
| 249 print "----------------" | |
| 250 for name, aliases in sorted(name_to_aliases.iteritems()): | |
| 251 if aliases: | |
| 252 print " {} (aka {})".format(name, ', '.join(aliases)) | |
| 253 else: | |
| 254 print " {}".format(name) | |
| 255 | |
| 256 def SetupLdPath(self, requires_build_dir): | |
| 257 if requires_build_dir: | |
| 258 self._EnsureBuildDirFound() | |
| 259 elif not self._options.build_dir: | |
| 260 return | |
| 261 | |
| 262 # Append build_dir to LD_LIBRARY_PATH so external libraries can be loaded. | |
| 263 if (os.getenv("LD_LIBRARY_PATH")): | |
| 264 os.putenv("LD_LIBRARY_PATH", "%s:%s" % (os.getenv("LD_LIBRARY_PATH"), | |
| 265 self._options.build_dir)) | |
| 266 else: | |
| 267 os.putenv("LD_LIBRARY_PATH", self._options.build_dir) | |
| 268 | |
| 269 def SimpleTest(self, module, name, valgrind_test_args=None, cmd_args=None): | |
| 270 tool = valgrind_test.CreateTool(self._options.valgrind_tool) | |
| 271 cmd = self._DefaultCommand(tool, name, valgrind_test_args) | |
| 272 self._AppendGtestFilter(tool, name, cmd) | |
| 273 cmd.extend(['--test-tiny-timeout=1000']) | |
| 274 if cmd_args: | |
| 275 cmd.extend(cmd_args) | |
| 276 | |
| 277 self.SetupLdPath(True) | |
| 278 return tool.Run(cmd, module) | |
| 279 | |
| 280 def RunCmdLine(self): | |
| 281 tool = valgrind_test.CreateTool(self._options.valgrind_tool) | |
| 282 cmd = self._DefaultCommand(tool, None, self._args) | |
| 283 self.SetupLdPath(False) | |
| 284 return tool.Run(cmd, None) | |
| 285 | |
| 286 def TestPDFiumUnitTests(self): | |
| 287 return self.SimpleTest("pdfium_unittests", "pdfium_unittests") | |
| 288 | |
| 289 def TestAccessibility(self): | |
| 290 return self.SimpleTest("accessibility", "accessibility_unittests") | |
| 291 | |
| 292 def TestAddressInput(self): | |
| 293 return self.SimpleTest("addressinput", "libaddressinput_unittests") | |
| 294 | |
| 295 def TestAngle(self): | |
| 296 return self.SimpleTest("angle", "angle_unittests") | |
| 297 | |
| 298 def TestAppList(self): | |
| 299 return self.SimpleTest("app_list", "app_list_unittests") | |
| 300 | |
| 301 def TestAsh(self): | |
| 302 return self.SimpleTest("ash", "ash_unittests") | |
| 303 | |
| 304 def TestAura(self): | |
| 305 return self.SimpleTest("aura", "aura_unittests") | |
| 306 | |
| 307 def TestBase(self): | |
| 308 return self.SimpleTest("base", "base_unittests") | |
| 309 | |
| 310 def TestBlinkHeap(self): | |
| 311 return self.SimpleTest("blink_heap", "blink_heap_unittests") | |
| 312 | |
| 313 def TestBlinkPlatform(self): | |
| 314 return self.SimpleTest("blink_platform", "blink_platform_unittests") | |
| 315 | |
| 316 def TestCacheInvalidation(self): | |
| 317 return self.SimpleTest("cacheinvalidation", "cacheinvalidation_unittests") | |
| 318 | |
| 319 def TestCast(self): | |
| 320 return self.SimpleTest("chrome", "cast_unittests") | |
| 321 | |
| 322 def TestCC(self): | |
| 323 return self.SimpleTest("cc", "cc_unittests") | |
| 324 | |
| 325 def TestChromeApp(self): | |
| 326 return self.SimpleTest("chrome_app", "chrome_app_unittests") | |
| 327 | |
| 328 def TestChromeElf(self): | |
| 329 return self.SimpleTest("chrome_elf", "chrome_elf_unittests") | |
| 330 | |
| 331 def TestChromeDriver(self): | |
| 332 return self.SimpleTest("chromedriver", "chromedriver_unittests") | |
| 333 | |
| 334 def TestChromeOS(self): | |
| 335 return self.SimpleTest("chromeos", "chromeos_unittests") | |
| 336 | |
| 337 def TestCloudPrint(self): | |
| 338 return self.SimpleTest("cloud_print", "cloud_print_unittests") | |
| 339 | |
| 340 def TestComponents(self): | |
| 341 return self.SimpleTest("components", "components_unittests") | |
| 342 | |
| 343 def TestCompositor(self): | |
| 344 return self.SimpleTest("compositor", "compositor_unittests") | |
| 345 | |
| 346 def TestContent(self): | |
| 347 return self.SimpleTest("content", "content_unittests") | |
| 348 | |
| 349 def TestCourgette(self): | |
| 350 return self.SimpleTest("courgette", "courgette_unittests") | |
| 351 | |
| 352 def TestCrypto(self): | |
| 353 return self.SimpleTest("crypto", "crypto_unittests") | |
| 354 | |
| 355 def TestDevice(self): | |
| 356 return self.SimpleTest("device", "device_unittests") | |
| 357 | |
| 358 def TestDisplay(self): | |
| 359 return self.SimpleTest("display", "display_unittests") | |
| 360 | |
| 361 def TestEvents(self): | |
| 362 return self.SimpleTest("events", "events_unittests") | |
| 363 | |
| 364 def TestExtensions(self): | |
| 365 return self.SimpleTest("extensions", "extensions_unittests") | |
| 366 | |
| 367 def TestFFmpegRegressions(self): | |
| 368 return self.SimpleTest("chrome", "ffmpeg_regression_tests") | |
| 369 | |
| 370 def TestGCM(self): | |
| 371 return self.SimpleTest("gcm", "gcm_unit_tests") | |
| 372 | |
| 373 def TestGfx(self): | |
| 374 return self.SimpleTest("gfx", "gfx_unittests") | |
| 375 | |
| 376 def TestGin(self): | |
| 377 return self.SimpleTest("gin", "gin_unittests") | |
| 378 | |
| 379 def TestGoogleApis(self): | |
| 380 return self.SimpleTest("google_apis", "google_apis_unittests") | |
| 381 | |
| 382 def TestGPU(self): | |
| 383 return self.SimpleTest("gpu", "gpu_unittests") | |
| 384 | |
| 385 def TestIpc(self): | |
| 386 return self.SimpleTest("ipc", "ipc_tests", | |
| 387 valgrind_test_args=["--trace_children"]) | |
| 388 | |
| 389 def TestInstallerUtil(self): | |
| 390 return self.SimpleTest("installer_util", "installer_util_unittests") | |
| 391 | |
| 392 def TestJingle(self): | |
| 393 return self.SimpleTest("chrome", "jingle_unittests") | |
| 394 | |
| 395 def TestKeyboard(self): | |
| 396 return self.SimpleTest("keyboard", "keyboard_unittests") | |
| 397 | |
| 398 def TestMedia(self): | |
| 399 return self.SimpleTest("chrome", "media_unittests") | |
| 400 | |
| 401 def TestMessageCenter(self): | |
| 402 return self.SimpleTest("message_center", "message_center_unittests") | |
| 403 | |
| 404 def TestMidi(self): | |
| 405 return self.SimpleTest("chrome", "midi_unittests") | |
| 406 | |
| 407 def TestMojoCommon(self): | |
| 408 return self.SimpleTest("mojo_common", "mojo_common_unittests") | |
| 409 | |
| 410 def TestMojoPublicBindings(self): | |
| 411 return self.SimpleTest("mojo_public_bindings", | |
| 412 "mojo_public_bindings_unittests") | |
| 413 | |
| 414 def TestMojoPublicEnv(self): | |
| 415 return self.SimpleTest("mojo_public_env", | |
| 416 "mojo_public_environment_unittests") | |
| 417 | |
| 418 def TestMojoPublicSystem(self): | |
| 419 return self.SimpleTest("mojo_public_system", | |
| 420 "mojo_public_system_unittests") | |
| 421 | |
| 422 def TestMojoPublicSysPerf(self): | |
| 423 return self.SimpleTest("mojo_public_sysperf", | |
| 424 "mojo_public_system_perftests") | |
| 425 | |
| 426 def TestMojoPublicUtility(self): | |
| 427 return self.SimpleTest("mojo_public_utility", | |
| 428 "mojo_public_utility_unittests") | |
| 429 | |
| 430 def TestMojoSystem(self): | |
| 431 return self.SimpleTest("mojo_system", "mojo_system_unittests") | |
| 432 | |
| 433 def TestNet(self): | |
| 434 return self.SimpleTest("net", "net_unittests") | |
| 435 | |
| 436 def TestNetPerf(self): | |
| 437 return self.SimpleTest("net", "net_perftests") | |
| 438 | |
| 439 def TestPhoneNumber(self): | |
| 440 return self.SimpleTest("phonenumber", "libphonenumber_unittests") | |
| 441 | |
| 442 def TestPPAPI(self): | |
| 443 return self.SimpleTest("chrome", "ppapi_unittests") | |
| 444 | |
| 445 def TestPrinting(self): | |
| 446 return self.SimpleTest("chrome", "printing_unittests") | |
| 447 | |
| 448 def TestRemoting(self): | |
| 449 return self.SimpleTest("chrome", "remoting_unittests", | |
| 450 cmd_args=[ | |
| 451 "--ui-test-action-timeout=60000", | |
| 452 "--ui-test-action-max-timeout=150000"]) | |
| 453 | |
| 454 def TestSkia(self): | |
| 455 return self.SimpleTest("skia", "skia_unittests") | |
| 456 | |
| 457 def TestSql(self): | |
| 458 return self.SimpleTest("chrome", "sql_unittests") | |
| 459 | |
| 460 def TestSync(self): | |
| 461 return self.SimpleTest("chrome", "sync_unit_tests") | |
| 462 | |
| 463 def TestLinuxSandbox(self): | |
| 464 return self.SimpleTest("sandbox", "sandbox_linux_unittests") | |
| 465 | |
| 466 def TestUnit(self): | |
| 467 # http://crbug.com/51716 | |
| 468 # Disabling all unit tests | |
| 469 # Problems reappeared after r119922 | |
| 470 if common.IsMac() and (self._options.valgrind_tool == "memcheck"): | |
| 471 logging.warning("unit_tests are disabled for memcheck on MacOS.") | |
| 472 return 0; | |
| 473 return self.SimpleTest("chrome", "unit_tests") | |
| 474 | |
| 475 def TestUIBaseUnit(self): | |
| 476 return self.SimpleTest("chrome", "ui_base_unittests") | |
| 477 | |
| 478 def TestUIChromeOS(self): | |
| 479 return self.SimpleTest("chrome", "ui_chromeos_unittests") | |
| 480 | |
| 481 def TestURL(self): | |
| 482 return self.SimpleTest("chrome", "url_unittests") | |
| 483 | |
| 484 def TestViews(self): | |
| 485 return self.SimpleTest("views", "views_unittests") | |
| 486 | |
| 487 | |
| 488 # Valgrind timeouts are in seconds. | |
| 489 UI_VALGRIND_ARGS = ["--timeout=14400", "--trace_children", "--indirect"] | |
| 490 # UI test timeouts are in milliseconds. | |
| 491 UI_TEST_ARGS = ["--ui-test-action-timeout=60000", | |
| 492 "--ui-test-action-max-timeout=150000", | |
| 493 "--no-sandbox"] | |
| 494 | |
| 495 # TODO(thestig) fine-tune these values. | |
| 496 # Valgrind timeouts are in seconds. | |
| 497 BROWSER_VALGRIND_ARGS = ["--timeout=50000", "--trace_children", "--indirect"] | |
| 498 # Browser test timeouts are in milliseconds. | |
| 499 BROWSER_TEST_ARGS = ["--ui-test-action-timeout=400000", | |
| 500 "--ui-test-action-max-timeout=800000", | |
| 501 "--no-sandbox"] | |
| 502 | |
| 503 def TestBrowser(self): | |
| 504 return self.SimpleTest("chrome", "browser_tests", | |
| 505 valgrind_test_args=self.BROWSER_VALGRIND_ARGS, | |
| 506 cmd_args=self.BROWSER_TEST_ARGS) | |
| 507 | |
| 508 def TestContentBrowser(self): | |
| 509 return self.SimpleTest("content", "content_browsertests", | |
| 510 valgrind_test_args=self.BROWSER_VALGRIND_ARGS, | |
| 511 cmd_args=self.BROWSER_TEST_ARGS) | |
| 512 | |
| 513 def TestInteractiveUI(self): | |
| 514 return self.SimpleTest("chrome", "interactive_ui_tests", | |
| 515 valgrind_test_args=self.UI_VALGRIND_ARGS, | |
| 516 cmd_args=self.UI_TEST_ARGS) | |
| 517 | |
| 518 def TestSafeBrowsing(self): | |
| 519 return self.SimpleTest("chrome", "safe_browsing_tests", | |
| 520 valgrind_test_args=self.UI_VALGRIND_ARGS, | |
| 521 cmd_args=(["--ui-test-action-max-timeout=450000"])) | |
| 522 | |
| 523 def TestSyncIntegration(self): | |
| 524 return self.SimpleTest("chrome", "sync_integration_tests", | |
| 525 valgrind_test_args=self.UI_VALGRIND_ARGS, | |
| 526 cmd_args=(["--ui-test-action-max-timeout=450000"])) | |
| 527 | |
| 528 def TestLayoutChunk(self, chunk_num, chunk_size): | |
|
Lei Zhang
2015/11/17 23:07:23
Can you clean up this file a bit? There's a lot of
| |
| 529 # Run tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size) from the | |
| 530 # list of tests. Wrap around to beginning of list at end. | |
| 531 # If chunk_size is zero, run all tests in the list once. | |
| 532 # If a text file is given as argument, it is used as the list of tests. | |
| 533 assert((chunk_size == 0) != (len(self._args) == 0)) | |
| 534 # Build the ginormous commandline in 'cmd'. | |
| 535 # It's going to be roughly | |
| 536 # python valgrind_test.py ... | |
| 537 # but we'll use the --indirect flag to valgrind_test.py | |
| 538 # to avoid valgrinding python. | |
| 539 # Start by building the valgrind_test.py commandline. | |
| 540 tool = valgrind_test.CreateTool(self._options.valgrind_tool) | |
| 541 cmd = self._DefaultCommand(tool) | |
| 542 cmd.append("--trace_children") | |
| 543 cmd.append("--indirect_webkit_layout") | |
| 544 cmd.append("--ignore_exit_code") | |
| 545 # Now build script_cmd, the run-webkits-tests commandline. | |
| 546 # Store each chunk in its own directory so that we can find the data later | |
| 547 chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num) | |
| 548 out_dir = os.path.join(path_utils.ScriptDir(), "latest") | |
| 549 out_dir = os.path.join(out_dir, chunk_dir) | |
| 550 if os.path.exists(out_dir): | |
| 551 old_files = glob.glob(os.path.join(out_dir, "*.txt")) | |
| 552 for f in old_files: | |
| 553 os.remove(f) | |
| 554 else: | |
| 555 os.makedirs(out_dir) | |
| 556 script = os.path.join(self._source_dir, "third_party", "WebKit", "Tools", | |
| 557 "Scripts", "run-webkit-tests") | |
| 558 # http://crbug.com/260627: After the switch to content_shell from DRT, each | |
| 559 # test now brings up 3 processes. Under Valgrind, they become memory bound | |
| 560 # and can eventually OOM if we don't reduce the total count. | |
| 561 # It'd be nice if content_shell automatically throttled the startup of new | |
| 562 # tests if we're low on memory. | |
| 563 jobs = max(1, int(multiprocessing.cpu_count() * 0.3)) | |
| 564 script_cmd = ["python", script, "-v", | |
| 565 # run a separate DumpRenderTree for each test | |
| 566 "--batch-size=1", | |
| 567 "--fully-parallel", | |
| 568 "--child-processes=%d" % jobs, | |
| 569 "--time-out-ms=800000", | |
| 570 "--no-retry-failures", # retrying takes too much time | |
| 571 # http://crbug.com/176908: Don't launch a browser when done. | |
| 572 "--no-show-results", | |
| 573 "--nocheck-sys-deps", | |
| 574 "--additional-driver-flag=--no-sandbox"] | |
| 575 # Pass build mode to run-webkit-tests. We aren't passed it directly, | |
| 576 # so parse it out of build_dir. run-webkit-tests can only handle | |
| 577 # the two values "Release" and "Debug". | |
| 578 # TODO(Hercules): unify how all our scripts pass around build mode | |
| 579 # (--mode / --target / --build-dir / --debug) | |
| 580 if self._options.build_dir: | |
| 581 build_root, mode = os.path.split(self._options.build_dir) | |
| 582 script_cmd.extend(["--build-directory", build_root, "--target", mode]) | |
| 583 if (chunk_size > 0): | |
| 584 script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size)) | |
| 585 if len(self._args): | |
| 586 # if the arg is a txt file, then treat it as a list of tests | |
| 587 if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt": | |
| 588 script_cmd.append("--test-list=%s" % self._args[0]) | |
| 589 else: | |
| 590 script_cmd.extend(self._args) | |
| 591 self._AppendGtestFilter(tool, "layout", script_cmd) | |
| 592 # Now run script_cmd with the wrapper in cmd | |
| 593 cmd.extend(["--"]) | |
| 594 cmd.extend(script_cmd) | |
| 595 | |
| 596 # Layout tests often times fail quickly, but the buildbot remains green. | |
| 597 # Detect this situation when running with the default chunk size. | |
| 598 if chunk_size == self.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE: | |
| 599 min_runtime_in_seconds=120 | |
| 600 else: | |
| 601 min_runtime_in_seconds=0 | |
| 602 ret = tool.Run(cmd, "layout", min_runtime_in_seconds=min_runtime_in_seconds) | |
| 603 return ret | |
| 604 | |
| 605 | |
| 606 def TestLayout(self): | |
| 607 # A "chunk file" is maintained in the local directory so that each test | |
| 608 # runs a slice of the layout tests of size chunk_size that increments with | |
| 609 # each run. Since tests can be added and removed from the layout tests at | |
| 610 # any time, this is not going to give exact coverage, but it will allow us | |
| 611 # to continuously run small slices of the layout tests under valgrind rather | |
| 612 # than having to run all of them in one shot. | |
| 613 chunk_size = self._options.num_tests | |
| 614 if chunk_size == 0 or len(self._args): | |
| 615 return self.TestLayoutChunk(0, 0) | |
| 616 chunk_num = 0 | |
| 617 chunk_file = os.path.join("valgrind_layout_chunk.txt") | |
| 618 logging.info("Reading state from " + chunk_file) | |
| 619 try: | |
| 620 f = open(chunk_file) | |
| 621 if f: | |
| 622 chunk_str = f.read() | |
| 623 if len(chunk_str): | |
| 624 chunk_num = int(chunk_str) | |
| 625 # This should be enough so that we have a couple of complete runs | |
| 626 # of test data stored in the archive (although note that when we loop | |
| 627 # that we almost guaranteed won't be at the end of the test list) | |
| 628 if chunk_num > 10000: | |
| 629 chunk_num = 0 | |
| 630 f.close() | |
| 631 except IOError, (errno, strerror): | |
| 632 logging.error("error reading from file %s (%d, %s)" % (chunk_file, | |
| 633 errno, strerror)) | |
| 634 # Save the new chunk size before running the tests. Otherwise if a | |
| 635 # particular chunk hangs the bot, the chunk number will never get | |
| 636 # incremented and the bot will be wedged. | |
| 637 logging.info("Saving state to " + chunk_file) | |
| 638 try: | |
| 639 f = open(chunk_file, "w") | |
| 640 chunk_num += 1 | |
| 641 f.write("%d" % chunk_num) | |
| 642 f.close() | |
| 643 except IOError, (errno, strerror): | |
| 644 logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno, | |
| 645 strerror)) | |
| 646 # Since we're running small chunks of the layout tests, it's important to | |
| 647 # mark the ones that have errors in them. These won't be visible in the | |
| 648 # summary list for long, but will be useful for someone reviewing this bot. | |
| 649 return self.TestLayoutChunk(chunk_num, chunk_size) | |
| 650 | |
| 651 # The known list of tests. | |
| 652 # Recognise the original abbreviations as well as full executable names. | |
| 653 _test_list = { | |
| 654 "cmdline" : RunCmdLine, | |
| 655 "pdfium_unittests": TestPDFiumUnitTests, | |
| 656 "addressinput": TestAddressInput, | |
| 657 "libaddressinput_unittests": TestAddressInput, | |
| 658 "accessibility": TestAccessibility, | |
| 659 "angle": TestAngle, "angle_unittests": TestAngle, | |
| 660 "app_list": TestAppList, "app_list_unittests": TestAppList, | |
| 661 "ash": TestAsh, "ash_unittests": TestAsh, | |
| 662 "aura": TestAura, "aura_unittests": TestAura, | |
| 663 "base": TestBase, "base_unittests": TestBase, | |
| 664 "blink_heap": TestBlinkHeap, | |
| 665 "blink_platform": TestBlinkPlatform, | |
| 666 "browser": TestBrowser, "browser_tests": TestBrowser, | |
| 667 "cacheinvalidation": TestCacheInvalidation, | |
| 668 "cacheinvalidation_unittests": TestCacheInvalidation, | |
| 669 "cast": TestCast, "cast_unittests": TestCast, | |
| 670 "cc": TestCC, "cc_unittests": TestCC, | |
| 671 "chrome_app": TestChromeApp, | |
| 672 "chrome_elf": TestChromeElf, | |
| 673 "chromedriver": TestChromeDriver, | |
| 674 "chromeos": TestChromeOS, "chromeos_unittests": TestChromeOS, | |
| 675 "cloud_print": TestCloudPrint, | |
| 676 "cloud_print_unittests": TestCloudPrint, | |
| 677 "components": TestComponents,"components_unittests": TestComponents, | |
| 678 "compositor": TestCompositor,"compositor_unittests": TestCompositor, | |
| 679 "content": TestContent, "content_unittests": TestContent, | |
| 680 "content_browsertests": TestContentBrowser, | |
| 681 "courgette": TestCourgette, "courgette_unittests": TestCourgette, | |
| 682 "crypto": TestCrypto, "crypto_unittests": TestCrypto, | |
| 683 "device": TestDevice, "device_unittests": TestDevice, | |
| 684 "display": TestDisplay, "display_unittests": TestDisplay, | |
| 685 "events": TestEvents, "events_unittests": TestEvents, | |
| 686 "extensions": TestExtensions, "extensions_unittests": TestExtensions, | |
| 687 "ffmpeg_regression_tests": TestFFmpegRegressions, | |
| 688 "gcm": TestGCM, "gcm_unit_tests": TestGCM, | |
| 689 "gin": TestGin, "gin_unittests": TestGin, | |
| 690 "gfx": TestGfx, "gfx_unittests": TestGfx, | |
| 691 "google_apis": TestGoogleApis, | |
| 692 "gpu": TestGPU, "gpu_unittests": TestGPU, | |
| 693 "ipc": TestIpc, "ipc_tests": TestIpc, | |
| 694 "installer_util": TestInstallerUtil, | |
| 695 "interactive_ui": TestInteractiveUI, | |
| 696 "jingle": TestJingle, "jingle_unittests": TestJingle, | |
| 697 "keyboard": TestKeyboard, "keyboard_unittests": TestKeyboard, | |
| 698 "layout": TestLayout, "layout_tests": TestLayout, | |
| 699 "media": TestMedia, "media_unittests": TestMedia, | |
| 700 "message_center": TestMessageCenter, | |
| 701 "message_center_unittests" : TestMessageCenter, | |
| 702 "midi": TestMidi, "midi_unittests": TestMidi, | |
| 703 "mojo_common": TestMojoCommon, | |
| 704 "mojo_system": TestMojoSystem, | |
| 705 "mojo_public_system": TestMojoPublicSystem, | |
| 706 "mojo_public_utility": TestMojoPublicUtility, | |
| 707 "mojo_public_bindings": TestMojoPublicBindings, | |
| 708 "mojo_public_env": TestMojoPublicEnv, | |
| 709 "mojo_public_sysperf": TestMojoPublicSysPerf, | |
| 710 "net": TestNet, "net_unittests": TestNet, | |
| 711 "net_perf": TestNetPerf, "net_perftests": TestNetPerf, | |
| 712 "phonenumber": TestPhoneNumber, | |
| 713 "libphonenumber_unittests": TestPhoneNumber, | |
| 714 "ppapi": TestPPAPI, "ppapi_unittests": TestPPAPI, | |
| 715 "printing": TestPrinting, "printing_unittests": TestPrinting, | |
| 716 "remoting": TestRemoting, "remoting_unittests": TestRemoting, | |
| 717 "safe_browsing": TestSafeBrowsing, "safe_browsing_tests": TestSafeBrowsing, | |
| 718 "sandbox": TestLinuxSandbox, "sandbox_linux_unittests": TestLinuxSandbox, | |
| 719 "skia": TestSkia, "skia_unittests": TestSkia, | |
| 720 "sql": TestSql, "sql_unittests": TestSql, | |
| 721 "sync": TestSync, "sync_unit_tests": TestSync, | |
| 722 "sync_integration_tests": TestSyncIntegration, | |
| 723 "sync_integration": TestSyncIntegration, | |
| 724 "ui_base_unit": TestUIBaseUnit, "ui_base_unittests": TestUIBaseUnit, | |
| 725 "ui_chromeos": TestUIChromeOS, "ui_chromeos_unittests": TestUIChromeOS, | |
| 726 "unit": TestUnit, "unit_tests": TestUnit, | |
| 727 "url": TestURL, "url_unittests": TestURL, | |
| 728 "views": TestViews, "views_unittests": TestViews, | |
| 729 "webkit": TestLayout, | |
| 730 } | |
| 731 | |
| 732 | |
| 733 def _main(): | |
| 734 parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> " | |
| 735 "[-t <test> ...]") | |
| 736 | |
| 737 parser.add_option("--help-tests", dest="help_tests", action="store_true", | |
| 738 default=False, help="List all available tests") | |
| 739 parser.add_option("-b", "--build-dir", | |
| 740 help="the location of the compiler output") | |
| 741 parser.add_option("--target", help="Debug or Release") | |
| 742 parser.add_option("-t", "--test", action="append", default=[], | |
| 743 help="which test to run, supports test:gtest_filter format " | |
| 744 "as well.") | |
| 745 parser.add_option("--baseline", action="store_true", default=False, | |
| 746 help="generate baseline data instead of validating") | |
| 747 parser.add_option("--gtest_filter", | |
| 748 help="additional arguments to --gtest_filter") | |
| 749 parser.add_option("--gtest_repeat", help="argument for --gtest_repeat") | |
| 750 parser.add_option("--gtest_shuffle", action="store_true", default=False, | |
| 751 help="Randomize tests' orders on every iteration.") | |
| 752 parser.add_option("--gtest_break_on_failure", action="store_true", | |
| 753 default=False, | |
| 754 help="Drop in to debugger on assertion failure. Also " | |
| 755 "useful for forcing tests to exit with a stack dump " | |
| 756 "on the first assertion failure when running with " | |
| 757 "--gtest_repeat=-1") | |
| 758 parser.add_option("-v", "--verbose", action="store_true", default=False, | |
| 759 help="verbose output - enable debug log messages") | |
| 760 parser.add_option("--tool", dest="valgrind_tool", default="drmemory_full", | |
| 761 help="specify a valgrind tool to run the tests under") | |
| 762 parser.add_option("--tool_flags", dest="valgrind_tool_flags", default="", | |
| 763 help="specify custom flags for the selected valgrind tool") | |
| 764 parser.add_option("--keep_logs", action="store_true", default=False, | |
| 765 help="store memory tool logs in the <tool>.logs directory " | |
| 766 "instead of /tmp.\nThis can be useful for tool " | |
| 767 "developers/maintainers.\nPlease note that the <tool>" | |
| 768 ".logs directory will be clobbered on tool startup.") | |
| 769 parser.add_option("-n", "--num_tests", type="int", | |
| 770 default=ChromeTests.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE, | |
| 771 help="for layout tests: # of subtests per run. 0 for all.") | |
| 772 parser.add_option("--test-launcher-bot-mode", action="store_true", | |
| 773 help="run the tests with --test-launcher-bot-mode") | |
| 774 parser.add_option("--test-launcher-total-shards", type=int, | |
| 775 help="run the tests with --test-launcher-total-shards") | |
| 776 parser.add_option("--test-launcher-shard-index", type=int, | |
| 777 help="run the tests with --test-launcher-shard-index") | |
| 778 | |
| 779 options, args = parser.parse_args() | |
| 780 | |
| 781 # Bake target into build_dir. | |
| 782 if options.target and options.build_dir: | |
| 783 assert (options.target != | |
| 784 os.path.basename(os.path.dirname(options.build_dir))) | |
| 785 options.build_dir = os.path.join(os.path.abspath(options.build_dir), | |
| 786 options.target) | |
| 787 | |
| 788 if options.verbose: | |
| 789 logging_utils.config_root(logging.DEBUG) | |
| 790 else: | |
| 791 logging_utils.config_root() | |
| 792 | |
| 793 if options.help_tests: | |
| 794 ChromeTests.ShowTests() | |
| 795 return 0 | |
| 796 | |
| 797 if not options.test: | |
| 798 parser.error("--test not specified") | |
| 799 | |
| 800 if len(options.test) != 1 and options.gtest_filter: | |
| 801 parser.error("--gtest_filter and multiple tests don't make sense together") | |
| 802 | |
| 803 for t in options.test: | |
| 804 tests = ChromeTests(options, args, t) | |
| 805 ret = tests.Run() | |
| 806 if ret: return ret | |
| 807 return 0 | |
| 808 | |
| 809 | |
| 810 if __name__ == "__main__": | |
| 811 sys.exit(_main()) | |
| OLD | NEW |