Chromium Code Reviews| Index: tools/valgrind/chrome_tests.py |
| =================================================================== |
| --- tools/valgrind/chrome_tests.py (revision 12713) |
| +++ tools/valgrind/chrome_tests.py (working copy) |
| @@ -8,8 +8,7 @@ |
| ''' Runs various chrome tests through valgrind_test.py. |
| This file is a copy of ../purify/chrome_tests.py. Eventually, it would be nice |
| -to merge these two files. For now, I'm leaving it here with sections that |
| -aren't supported commented out as this is more of a work in progress. |
| +to merge these two files. |
| ''' |
| import glob |
| @@ -39,10 +38,6 @@ |
| class ChromeTests: |
| '''This class is derived from the chrome_tests.py file in ../purify/. |
| - |
| - TODO(erg): Finish implementing this. I've commented out all the parts that I |
| - don't have working yet. We still need to deal with layout tests, and long |
| - term, the UI tests. |
| ''' |
| def __init__(self, options, args, test): |
| @@ -56,8 +51,7 @@ |
| "googleurl": self.TestGoogleurl, |
| "media": self.TestMedia, |
| "printing": self.TestPrinting, |
| -# "layout": self.TestLayout, |
| -# "layout_all": self.TestLayoutAll, |
| + "layout": self.TestLayout, |
| "ui": self.TestUI |
| } |
| @@ -143,9 +137,9 @@ |
| cmd.append("--indirect") |
| if exe: |
| cmd.append(os.path.join(self._options.build_dir, exe)) |
| - # Valgrind runs tests slowly, so slow tests hurt more; show elapased time |
| - # so we can find the slowpokes. |
| - cmd.append("--gtest_print_time"); |
| + # Valgrind runs tests slowly, so slow tests hurt more; show elapased time |
| + # so we can find the slowpokes. |
| + cmd.append("--gtest_print_time") |
| return cmd |
| def Run(self): |
| @@ -186,47 +180,6 @@ |
| cmd.extend(cmd_args) |
| return common.RunSubprocess(cmd, 0) |
| - def ScriptedTest(self, module, exe, name, script, multi=False, cmd_args=None, |
| - out_dir_extra=None): |
| - '''Valgrind a target binary, which will be executed one or more times via a |
| - script or driver program. |
| - Args: |
| - module - which top level component this test is from (webkit, base, etc.) |
| - exe - the name of the exe (it's assumed to exist in build_dir) |
| - name - the name of this test (used to name output files) |
| - script - the driver program or script. If it's python.exe, we use |
| - search-path behavior to execute, otherwise we assume that it is in |
| - build_dir. |
| - multi - a boolean hint that the exe will be run multiple times, generating |
| - multiple output files (without this option, only the last run will be |
| - recorded and analyzed) |
| - cmd_args - extra arguments to pass to the valgrind_test.py script |
| - ''' |
| - cmd = self._DefaultCommand(module) |
| - exe = os.path.join(self._options.build_dir, exe) |
| - cmd.append("--exe=%s" % exe) |
| - cmd.append("--name=%s" % name) |
| - if multi: |
| - out = os.path.join(google.path_utils.ScriptDir(), |
| - "latest") |
| - if out_dir_extra: |
| - out = os.path.join(out, out_dir_extra) |
| - if os.path.exists(out): |
| - old_files = glob.glob(os.path.join(out, "*.txt")) |
| - for f in old_files: |
| - os.remove(f) |
| - else: |
| - os.makedirs(out) |
| - out = os.path.join(out, "%s%%5d.txt" % name) |
| - cmd.append("--out_file=%s" % out) |
| - if cmd_args: |
| - cmd.extend(cmd_args) |
| - if script[0] != "python.exe" and not os.path.exists(script[0]): |
| - script[0] = os.path.join(self._options.build_dir, script[0]) |
| - cmd.extend(script) |
| - self._ReadGtestFilterFile(name, cmd) |
| - return common.RunSubprocess(cmd, 0) |
| - |
| def TestBase(self): |
| return self.SimpleTest("base", "base_unittests") |
| @@ -258,93 +211,99 @@ |
| "--ui-test-action-timeout=80000", |
| "--ui-test-action-max-timeout=180000"]) |
| -# def TestLayoutAll(self): |
| -# return self.TestLayout(run_all=True) |
| + def TestLayoutChunk(self, chunk_num, chunk_size): |
| + # Run tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size) from the |
| + # list of tests. Wrap around to beginning of list at end. |
| + # If chunk_size is zero, run all tests in the list once. |
| + # If a text file is given as argument, it is used as the list of tests. |
|
Erik does not do reviews
2009/03/30 16:22:48
clarify this comment to refer explicitly to comman
|
| + # |
| + # Build the ginormous commandline in 'cmd'. |
| + # It's going to be roughly |
| + # python valgrind_test.py ... python run_webkit_tests.py ... |
| + # but we'll use the --indirect flag to valgrind_test.py |
| + # to avoid valgrinding python. |
| + # Start by building the valgrind_test.py commandline. |
| + cmd = self._DefaultCommand("webkit") |
| + cmd.append("--trace_children") |
| + cmd.append("--indirect") |
| + # Now build script_cmd, the run_webkits_tests.py commandline |
| + # Store each chunk in its own directory so that we can find the data later |
| + chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num) |
| + test_shell = os.path.join(self._options.build_dir, "test_shell") |
| + out_dir = os.path.join(google.path_utils.ScriptDir(), "latest") |
| + out_dir = os.path.join(out_dir, chunk_dir) |
| + if os.path.exists(out_dir): |
| + old_files = glob.glob(os.path.join(out_dir, "*.txt")) |
| + for f in old_files: |
| + os.remove(f) |
| + else: |
| + os.makedirs(out_dir) |
| + script = os.path.join(self._source_dir, "webkit", "tools", "layout_tests", |
| + "run_webkit_tests.py") |
| + script_cmd = ["python", script, "--run-singly", "-v", |
| + "--noshow-results", "--time-out-ms=200000", |
| + "--nocheck-sys-deps"] |
| + if (chunk_size > 0): |
| + script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size)) |
| + if len(self._args): |
| + # if the arg is a txt file, then treat it as a list of tests |
| + if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt": |
| + script_cmd.append("--test-list=%s" % self._args[0]) |
| + else: |
| + script_cmd.extend(self._args) |
| + self._ReadGtestFilterFile("layout", script_cmd) |
| + # Now run script_cmd with the wrapper in cmd |
| + cmd.extend(["--"]) |
| + cmd.extend(script_cmd) |
| + ret = common.RunSubprocess(cmd, 0) |
| + return ret |
| -# def TestLayout(self, run_all=False): |
| -# # A "chunk file" is maintained in the local directory so that each test |
| -# # runs a slice of the layout tests of size chunk_size that increments with |
| -# # each run. Since tests can be added and removed from the layout tests at |
| -# # any time, this is not going to give exact coverage, but it will allow us |
| -# # to continuously run small slices of the layout tests under purify rather |
| -# # than having to run all of them in one shot. |
| -# chunk_num = 0 |
| -# # Tests currently seem to take about 20-30s each. |
| -# chunk_size = 120 # so about 40-60 minutes per run |
| -# chunk_file = os.path.join(os.environ["TEMP"], "purify_layout_chunk.txt") |
| -# if not run_all: |
| -# try: |
| -# f = open(chunk_file) |
| -# if f: |
| -# str = f.read() |
| -# if len(str): |
| -# chunk_num = int(str) |
| -# # This should be enough so that we have a couple of complete runs |
| -# # of test data stored in the archive (although note that when we loop |
| -# # that we almost guaranteed won't be at the end of the test list) |
| -# if chunk_num > 10000: |
| -# chunk_num = 0 |
| -# f.close() |
| -# except IOError, (errno, strerror): |
| -# logging.error("error reading from file %s (%d, %s)" % (chunk_file, |
| -# errno, strerror)) |
| + def TestLayout(self): |
| + # A "chunk file" is maintained in the local directory so that each test |
| + # runs a slice of the layout tests of size chunk_size that increments with |
| + # each run. Since tests can be added and removed from the layout tests at |
| + # any time, this is not going to give exact coverage, but it will allow us |
| + # to continuously run small slices of the layout tests under purify rather |
| + # than having to run all of them in one shot. |
| + chunk_size = self._options.num_tests |
| + if (chunk_size == 0): |
| + return self.TestLayoutChunk(0, 0) |
| + chunk_num = 0 |
| + chunk_file = os.path.join("valgrind_layout_chunk.txt") |
| + logging.info("Reading state from " + chunk_file) |
| + try: |
| + f = open(chunk_file) |
| + if f: |
| + str = f.read() |
| + if len(str): |
| + chunk_num = int(str) |
| + # This should be enough so that we have a couple of complete runs |
| + # of test data stored in the archive (although note that when we loop |
| + # that we almost guaranteed won't be at the end of the test list) |
| + if chunk_num > 10000: |
| + chunk_num = 0 |
| + f.close() |
| + except IOError, (errno, strerror): |
| + logging.error("error reading from file %s (%d, %s)" % (chunk_file, |
| + errno, strerror)) |
| + ret = self.TestLayoutChunk(chunk_num, chunk_size) |
| + # Wait until after the test runs to completion to write out the new chunk |
| + # number. This way, if the bot is killed, we'll start running again from |
| + # the current chunk rather than skipping it. |
| + logging.info("Saving state to " + chunk_file) |
| + try: |
| + f = open(chunk_file, "w") |
| + chunk_num += 1 |
| + f.write("%d" % chunk_num) |
| + f.close() |
| + except IOError, (errno, strerror): |
| + logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno, |
| + strerror)) |
| + # Since we're running small chunks of the layout tests, it's important to |
| + # mark the ones that have errors in them. These won't be visible in the |
| + # summary list for long, but will be useful for someone reviewing this bot. |
| + return ret |
| -# script = os.path.join(self._source_dir, "webkit", "tools", "layout_tests", |
| -# "run_webkit_tests.py") |
| -# script_cmd = ["python.exe", script, "--run-singly", "-v", |
| -# "--noshow-results", "--time-out-ms=200000", |
| -# "--nocheck-sys-deps"] |
| -# if not run_all: |
| -# script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size)) |
| - |
| -# if len(self._args): |
| -# # if the arg is a txt file, then treat it as a list of tests |
| -# if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt": |
| -# script_cmd.append("--test-list=%s" % self._args[0]) |
| -# else: |
| -# script_cmd.extend(self._args) |
| - |
| -# if run_all: |
| -# ret = self.ScriptedTest("webkit", "test_shell.exe", "layout", |
| -# script_cmd, multi=True, cmd_args=["--timeout=0"]) |
| -# return ret |
| - |
| -# # store each chunk in its own directory so that we can find the data later |
| -# chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num) |
| -# ret = self.ScriptedTest("webkit", "test_shell.exe", "layout", |
| -# script_cmd, multi=True, cmd_args=["--timeout=0"], |
| -# out_dir_extra=chunk_dir) |
| - |
| -# # Wait until after the test runs to completion to write out the new chunk |
| -# # number. This way, if the bot is killed, we'll start running again from |
| -# # the current chunk rather than skipping it. |
| -# try: |
| -# f = open(chunk_file, "w") |
| -# chunk_num += 1 |
| -# f.write("%d" % chunk_num) |
| -# f.close() |
| -# except IOError, (errno, strerror): |
| -# logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno, |
| -# strerror)) |
| -# # Since we're running small chunks of the layout tests, it's important to |
| -# # mark the ones that have errors in them. These won't be visible in the |
| -# # summary list for long, but will be useful for someone reviewing this bot. |
| -# return ret |
| - |
| -# def TestUI(self): |
| -# if not self._options.no_reinstrument: |
| -# instrumentation_error = self.InstrumentDll() |
| -# if instrumentation_error: |
| -# return instrumentation_error |
| -# return self.ScriptedTest("chrome", "chrome.exe", "ui_tests", |
| -# ["ui_tests.exe", |
| -# "--single-process", |
| -# "--ui-test-timeout=120000", |
| -# "--ui-test-action-timeout=80000", |
| -# "--ui-test-action-max-timeout=180000"], |
| -# multi=True) |
| - |
| - |
| def _main(_): |
| parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> " |
| "[-t <test> ...]") |
| @@ -370,6 +329,12 @@ |
| parser.add_option("", "--generate_suppressions", action="store_true", |
| default=False, |
| help="Skip analysis and generate suppressions") |
| + # My machine can do about 120 layout tests/hour in release mode. |
| + # Let's do 30 minutes worth per run. |
| + # The CPU is mostly idle, so perhaps we can raise this when |
| + # we figure out how to run them more efficiently. |
| + parser.add_option("-n", "--num_tests", default=60, type="int", |
| + help="for layout tests: number of subtests per run. 0 for all.") |
|
Erik does not do reviews
2009/03/30 16:22:48
80 columns
|
| options, args = parser.parse_args() |