OLD | NEW |
(Empty) | |
| 1 #!/usr/bin/python |
| 2 # Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. |
| 3 # Use of this source code is governed by a BSD-style license that can be |
| 4 # found in the LICENSE file. |
| 5 |
| 6 # chrome_tests.py |
| 7 |
| 8 ''' Runs various chrome tests through valgrind_test.py. |
| 9 |
| 10 This file is a copy of ../purify/chrome_tests.py. Eventually, it would be nice |
| 11 to merge these two files. For now, I'm leaving it here with sections that |
| 12 aren't supported commented out as this is more of a work in progress. |
| 13 ''' |
| 14 |
| 15 import glob |
| 16 import logging |
| 17 import optparse |
| 18 import os |
| 19 import stat |
| 20 import sys |
| 21 |
| 22 import google.logging_utils |
| 23 import google.path_utils |
| 24 # Import the platform_utils up in the layout tests which have been modified to |
| 25 # work under non-Windows platforms instead of the ones that are in the |
| 26 # tools/python/google directory. (See chrome_tests.sh which sets PYTHONPATH |
| 27 # correctly.) |
| 28 import platform_utils |
| 29 |
| 30 import common |
| 31 |
| 32 |
| 33 class TestNotFound(Exception): pass |
| 34 |
| 35 |
| 36 class ChromeTests: |
| 37 '''This class is derived from the chrome_tests.py file in ../purify/. |
| 38 |
| 39 TODO(erg): Finish implementing this. I've commented out all the parts that I |
| 40 don't have working yet. We still need to deal with layout tests, and long |
| 41 term, the UI tests. |
| 42 ''' |
| 43 |
| 44 def __init__(self, options, args, test): |
| 45 # the known list of tests |
| 46 self._test_list = { |
| 47 # "test_shell": self.TestTestShell, |
| 48 "unit": self.TestUnit, |
| 49 "net": self.TestNet, |
| 50 "ipc": self.TestIpc, |
| 51 "base": self.TestBase, |
| 52 # "layout": self.TestLayout, |
| 53 # "layout_all": self.TestLayoutAll, |
| 54 # "ui": self.TestUI |
| 55 } |
| 56 |
| 57 if test not in self._test_list: |
| 58 raise TestNotFound("Unknown test: %s" % test) |
| 59 |
| 60 self._options = options |
| 61 self._args = args |
| 62 self._test = test |
| 63 |
| 64 script_dir = google.path_utils.ScriptDir() |
| 65 utility = platform_utils.PlatformUtility(script_dir) |
| 66 # Compute the top of the tree (the "source dir") from the script dir (where |
| 67 # this script lives). We assume that the script dir is in tools/valgrind/ |
| 68 # relative to the top of the tree. |
| 69 self._source_dir = os.path.dirname(os.path.dirname(script_dir)) |
| 70 # since this path is used for string matching, make sure it's always |
| 71 # an absolute Windows-style path |
| 72 self._source_dir = utility.GetAbsolutePath(self._source_dir) |
| 73 valgrind_test = os.path.join(script_dir, "valgrind_test.py") |
| 74 self._command_preamble = ["python", valgrind_test, "--echo_to_stdout", |
| 75 "--source_dir=%s" % (self._source_dir)] |
| 76 |
| 77 def _DefaultCommand(self, module, exe=None): |
| 78 '''Generates the default command array that most tests will use.''' |
| 79 module_dir = os.path.join(self._source_dir, module) |
| 80 |
| 81 # For now, the suppressions files are all the same across all modules. Copy |
| 82 # the code in the purify version of chrome_tests.py if we ever need |
| 83 # per-module suppressions again... |
| 84 self._data_dir = google.path_utils.ScriptDir() |
| 85 |
| 86 if not self._options.build_dir: |
| 87 dir_chrome = os.path.join(self._source_dir, "chrome", "Hammer") |
| 88 dir_module = os.path.join(module_dir, "Hammer") |
| 89 if exe: |
| 90 exe_chrome = os.path.join(dir_chrome, exe) |
| 91 exe_module = os.path.join(dir_module, exe) |
| 92 if os.path.isfile(exe_chrome) and not os.path.isfile(exe_module): |
| 93 self._options.build_dir = dir_chrome |
| 94 elif os.path.isfile(exe_module) and not os.path.isfile(exe_chrome): |
| 95 self._options.build_dir = dir_module |
| 96 elif (os.stat(exe_module)[stat.ST_MTIME] > |
| 97 os.stat(exe_chrome)[stat.ST_MTIME]): |
| 98 self._options.build_dir = dir_module |
| 99 else: |
| 100 self._options.build_dir = dir_chrome |
| 101 else: |
| 102 if os.path.isdir(dir_chrome) and not os.path.isdir(dir_module): |
| 103 self._options.build_dir = dir_chrome |
| 104 elif os.path.isdir(dir_module) and not os.path.isdir(dir_chrome): |
| 105 self._options.build_dir = dir_module |
| 106 elif (os.stat(dir_module)[stat.ST_MTIME] > |
| 107 os.stat(dir_chrome)[stat.ST_MTIME]): |
| 108 self._options.build_dir = dir_module |
| 109 else: |
| 110 self._options.build_dir = dir_chrome |
| 111 |
| 112 cmd = list(self._command_preamble) |
| 113 cmd.append("--data_dir=%s" % self._data_dir) |
| 114 if self._options.baseline: |
| 115 cmd.append("--baseline") |
| 116 if self._options.verbose: |
| 117 cmd.append("--verbose") |
| 118 if self._options.generate_suppressions: |
| 119 cmd.append("--generate_suppressions") |
| 120 if exe: |
| 121 cmd.append(os.path.join(self._options.build_dir, exe)) |
| 122 return cmd |
| 123 |
| 124 def Run(self): |
| 125 ''' Runs the test specified by command-line argument --test ''' |
| 126 logging.info("running test %s" % (self._test)) |
| 127 return self._test_list[self._test]() |
| 128 |
| 129 def _ReadGtestFilterFile(self, name, cmd): |
| 130 '''Read a file which is a list of tests to filter out with --gtest_filter |
| 131 and append the command-line option to cmd. |
| 132 ''' |
| 133 filters = [] |
| 134 filename = os.path.join(self._data_dir, name + ".gtest.txt") |
| 135 if os.path.exists(filename): |
| 136 f = open(filename, 'r') |
| 137 for line in f.readlines(): |
| 138 if line.startswith("#") or line.startswith("//") or line.isspace(): |
| 139 continue |
| 140 line = line.rstrip() |
| 141 filters.append(line) |
| 142 gtest_filter = self._options.gtest_filter |
| 143 if len(filters): |
| 144 if gtest_filter: |
| 145 gtest_filter += ":" |
| 146 if gtest_filter.find("-") < 0: |
| 147 gtest_filter += "-" |
| 148 else: |
| 149 gtest_filter = "-" |
| 150 gtest_filter += ":".join(filters) |
| 151 if gtest_filter: |
| 152 cmd.append("--gtest_filter=%s" % gtest_filter) |
| 153 |
| 154 def SimpleTest(self, module, name): |
| 155 cmd = self._DefaultCommand(module, name) |
| 156 self._ReadGtestFilterFile(name, cmd) |
| 157 return common.RunSubprocess(cmd, 0) |
| 158 |
| 159 def ScriptedTest(self, module, exe, name, script, multi=False, cmd_args=None, |
| 160 out_dir_extra=None): |
| 161 '''Valgrind a target binary, which will be executed one or more times via a |
| 162 script or driver program. |
| 163 Args: |
| 164 module - which top level component this test is from (webkit, base, etc.) |
| 165 exe - the name of the exe (it's assumed to exist in build_dir) |
| 166 name - the name of this test (used to name output files) |
| 167 script - the driver program or script. If it's python.exe, we use |
| 168 search-path behavior to execute, otherwise we assume that it is in |
| 169 build_dir. |
| 170 multi - a boolean hint that the exe will be run multiple times, generating |
| 171 multiple output files (without this option, only the last run will be |
| 172 recorded and analyzed) |
| 173 cmd_args - extra arguments to pass to the valgrind_test.py script |
| 174 ''' |
| 175 cmd = self._DefaultCommand(module) |
| 176 exe = os.path.join(self._options.build_dir, exe) |
| 177 cmd.append("--exe=%s" % exe) |
| 178 cmd.append("--name=%s" % name) |
| 179 if multi: |
| 180 out = os.path.join(google.path_utils.ScriptDir(), |
| 181 "latest") |
| 182 if out_dir_extra: |
| 183 out = os.path.join(out, out_dir_extra) |
| 184 if os.path.exists(out): |
| 185 old_files = glob.glob(os.path.join(out, "*.txt")) |
| 186 for f in old_files: |
| 187 os.remove(f) |
| 188 else: |
| 189 os.makedirs(out) |
| 190 out = os.path.join(out, "%s%%5d.txt" % name) |
| 191 cmd.append("--out_file=%s" % out) |
| 192 if cmd_args: |
| 193 cmd.extend(cmd_args) |
| 194 if script[0] != "python.exe" and not os.path.exists(script[0]): |
| 195 script[0] = os.path.join(self._options.build_dir, script[0]) |
| 196 cmd.extend(script) |
| 197 self._ReadGtestFilterFile(name, cmd) |
| 198 return common.RunSubprocess(cmd, 0) |
| 199 |
| 200 def TestBase(self): |
| 201 return self.SimpleTest("base", "base_unittests") |
| 202 |
| 203 def TestIpc(self): |
| 204 return self.SimpleTest("chrome", "ipc_tests") |
| 205 |
| 206 def TestNet(self): |
| 207 return self.SimpleTest("net", "net_unittests") |
| 208 |
| 209 def TestTestShell(self): |
| 210 return self.SimpleTest("webkit", "test_shell_tests") |
| 211 |
| 212 def TestUnit(self): |
| 213 return self.SimpleTest("chrome", "unit_tests") |
| 214 |
| 215 # def TestLayoutAll(self): |
| 216 # return self.TestLayout(run_all=True) |
| 217 |
| 218 # def TestLayout(self, run_all=False): |
| 219 # # A "chunk file" is maintained in the local directory so that each test |
| 220 # # runs a slice of the layout tests of size chunk_size that increments with |
| 221 # # each run. Since tests can be added and removed from the layout tests at |
| 222 # # any time, this is not going to give exact coverage, but it will allow us |
| 223 # # to continuously run small slices of the layout tests under purify rather |
| 224 # # than having to run all of them in one shot. |
| 225 # chunk_num = 0 |
| 226 # # Tests currently seem to take about 20-30s each. |
| 227 # chunk_size = 120 # so about 40-60 minutes per run |
| 228 # chunk_file = os.path.join(os.environ["TEMP"], "purify_layout_chunk.txt") |
| 229 # if not run_all: |
| 230 # try: |
| 231 # f = open(chunk_file) |
| 232 # if f: |
| 233 # str = f.read() |
| 234 # if len(str): |
| 235 # chunk_num = int(str) |
| 236 # # This should be enough so that we have a couple of complete runs |
| 237 # # of test data stored in the archive (although note that when we loo
p |
| 238 # # that we almost guaranteed won't be at the end of the test list) |
| 239 # if chunk_num > 10000: |
| 240 # chunk_num = 0 |
| 241 # f.close() |
| 242 # except IOError, (errno, strerror): |
| 243 # logging.error("error reading from file %s (%d, %s)" % (chunk_file, |
| 244 # errno, strerror)) |
| 245 |
| 246 # script = os.path.join(self._source_dir, "webkit", "tools", "layout_tests", |
| 247 # "run_webkit_tests.py") |
| 248 # script_cmd = ["python.exe", script, "--run-singly", "-v", |
| 249 # "--noshow-results", "--time-out-ms=200000", |
| 250 # "--nocheck-sys-deps"] |
| 251 # if not run_all: |
| 252 # script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size)) |
| 253 |
| 254 # if len(self._args): |
| 255 # # if the arg is a txt file, then treat it as a list of tests |
| 256 # if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt": |
| 257 # script_cmd.append("--test-list=%s" % self._args[0]) |
| 258 # else: |
| 259 # script_cmd.extend(self._args) |
| 260 |
| 261 # if run_all: |
| 262 # ret = self.ScriptedTest("webkit", "test_shell.exe", "layout", |
| 263 # script_cmd, multi=True, cmd_args=["--timeout=0"]
) |
| 264 # return ret |
| 265 |
| 266 # # store each chunk in its own directory so that we can find the data later |
| 267 # chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num) |
| 268 # ret = self.ScriptedTest("webkit", "test_shell.exe", "layout", |
| 269 # script_cmd, multi=True, cmd_args=["--timeout=0"], |
| 270 # out_dir_extra=chunk_dir) |
| 271 |
| 272 # # Wait until after the test runs to completion to write out the new chunk |
| 273 # # number. This way, if the bot is killed, we'll start running again from |
| 274 # # the current chunk rather than skipping it. |
| 275 # try: |
| 276 # f = open(chunk_file, "w") |
| 277 # chunk_num += 1 |
| 278 # f.write("%d" % chunk_num) |
| 279 # f.close() |
| 280 # except IOError, (errno, strerror): |
| 281 # logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno, |
| 282 # strerror)) |
| 283 # # Since we're running small chunks of the layout tests, it's important to |
| 284 # # mark the ones that have errors in them. These won't be visible in the |
| 285 # # summary list for long, but will be useful for someone reviewing this bot
. |
| 286 # return ret |
| 287 |
| 288 # def TestUI(self): |
| 289 # if not self._options.no_reinstrument: |
| 290 # instrumentation_error = self.InstrumentDll() |
| 291 # if instrumentation_error: |
| 292 # return instrumentation_error |
| 293 # return self.ScriptedTest("chrome", "chrome.exe", "ui_tests", |
| 294 # ["ui_tests.exe", |
| 295 # "--single-process", |
| 296 # "--ui-test-timeout=120000", |
| 297 # "--ui-test-action-timeout=80000", |
| 298 # "--ui-test-action-max-timeout=180000"], |
| 299 # multi=True) |
| 300 |
| 301 |
| 302 def _main(argv): |
| 303 parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> " |
| 304 "[-t <test> ...]") |
| 305 parser.disable_interspersed_args() |
| 306 parser.add_option("-b", "--build_dir", |
| 307 help="the location of the output of the compiler output") |
| 308 parser.add_option("-t", "--test", action="append", |
| 309 help="which test to run") |
| 310 parser.add_option("", "--baseline", action="store_true", default=False, |
| 311 help="generate baseline data instead of validating") |
| 312 parser.add_option("", "--gtest_filter", |
| 313 help="additional arguments to --gtest_filter") |
| 314 parser.add_option("-v", "--verbose", action="store_true", default=False, |
| 315 help="verbose output - enable debug log messages") |
| 316 parser.add_option("", "--no-reinstrument", action="store_true", default=False, |
| 317 help="Don't force a re-instrumentation for ui_tests") |
| 318 parser.add_option("", "--generate_suppressions", action="store_true", |
| 319 default=False, |
| 320 help="Skip analysis and generate suppressions") |
| 321 |
| 322 options, args = parser.parse_args() |
| 323 |
| 324 if options.verbose: |
| 325 google.logging_utils.config_root(logging.DEBUG) |
| 326 else: |
| 327 google.logging_utils.config_root() |
| 328 |
| 329 if not options.test or not len(options.test): |
| 330 parser.error("--test not specified") |
| 331 |
| 332 for t in options.test: |
| 333 tests = ChromeTests(options, args, t) |
| 334 ret = tests.Run() |
| 335 if ret: return ret |
| 336 return 0 |
| 337 |
| 338 |
| 339 if __name__ == "__main__": |
| 340 ret = _main(sys.argv) |
| 341 sys.exit(ret) |
| 342 |
OLD | NEW |