Index: tools/purify/chrome_tests.py |
=================================================================== |
--- tools/purify/chrome_tests.py (revision 7750) |
+++ tools/purify/chrome_tests.py (working copy) |
@@ -1,11 +1,15 @@ |
-#!/bin/env python |
+#!/usr/bin/python |
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. |
# Use of this source code is governed by a BSD-style license that can be |
# found in the LICENSE file. |
# chrome_tests.py |
-''' Runs various chrome tests through purify_test.py |
+''' Runs various chrome tests through valgrind_test.py. |
+ |
+This file is a copy of ../purify/chrome_tests.py. Eventually, it would be nice |
+to merge these two files. For now, I'm leaving it here with sections that |
+aren't supported commented out as this is more of a work in progress. |
''' |
import glob |
@@ -17,7 +21,7 @@ |
import google.logging_utils |
import google.path_utils |
-import google.platform_utils |
+import platform_utils |
import common |
@@ -26,18 +30,23 @@ |
class ChromeTests: |
+ '''This class is derived from the chrome_tests.py file in ../purify/. I've |
+ commented out all the parts that I don't have working yet or are irrelevant. |
+ ''' |
def __init__(self, options, args, test): |
# the known list of tests |
- self._test_list = {"test_shell": self.TestTestShell, |
- "unit": self.TestUnit, |
- "net": self.TestNet, |
- "ipc": self.TestIpc, |
- "base": self.TestBase, |
- "layout": self.TestLayout, |
- "dll": self.TestDll, |
- "layout_all": self.TestLayoutAll, |
- "ui": self.TestUI} |
+ self._test_list = { |
+# "test_shell": self.TestTestShell, |
+ "unit": self.TestUnit, |
+ "net": self.TestNet, |
+ "ipc": self.TestIpc, |
+ "base": self.TestBase, |
+# "layout": self.TestLayout, |
+# "dll": self.TestDll, |
+# "layout_all": self.TestLayoutAll, |
+# "ui": self.TestUI |
+ } |
if test not in self._test_list: |
raise TestNotFound("Unknown test: %s" % test) |
@@ -47,30 +56,27 @@ |
self._test = test |
script_dir = google.path_utils.ScriptDir() |
- utility = google.platform_utils.PlatformUtility(script_dir) |
+ utility = platform_utils.PlatformUtility(script_dir) |
# Compute the top of the tree (the "source dir") from the script dir (where |
- # this script lives). We assume that the script dir is in tools/purify |
+ # this script lives). We assume that the script dir is in tools/valgrind/ |
# relative to the top of the tree. |
self._source_dir = os.path.dirname(os.path.dirname(script_dir)) |
# since this path is used for string matching, make sure it's always |
# an absolute Windows-style path |
self._source_dir = utility.GetAbsolutePath(self._source_dir) |
- purify_test = os.path.join(script_dir, "purify_test.py") |
- self._command_preamble = ["python.exe", purify_test, "--echo_to_stdout", |
- "--source_dir=%s" % (self._source_dir), |
- "--save_cache"] |
+ valgrind_test = os.path.join(script_dir, "valgrind_test.py") |
+ self._command_preamble = ["python", valgrind_test, "--echo_to_stdout", |
+ "--source_dir=%s" % (self._source_dir)] |
def _DefaultCommand(self, module, exe=None): |
'''Generates the default command array that most tests will use.''' |
module_dir = os.path.join(self._source_dir, module) |
- if module == "chrome": |
- # unfortunately, not all modules have the same directory structure |
- self._data_dir = os.path.join(module_dir, "test", "data", "purify") |
- else: |
- self._data_dir = os.path.join(module_dir, "data", "purify") |
+ |
+ self._data_dir = "." |
Erik does not do reviews
2009/01/08 21:28:25
You probably want to use the mechanism I used here
|
+ |
if not self._options.build_dir: |
- dir_chrome = os.path.join(self._source_dir, "chrome", "Release") |
- dir_module = os.path.join(module_dir, "Release") |
+ dir_chrome = os.path.join(self._source_dir, "chrome", "Hammer") |
+ dir_module = os.path.join(module_dir, "Hammer") |
if exe: |
exe_chrome = os.path.join(dir_chrome, exe) |
exe_module = os.path.join(dir_module, exe) |
@@ -98,6 +104,8 @@ |
cmd.append("--baseline") |
if self._options.verbose: |
cmd.append("--verbose") |
+ if self._options.generate_suppressions: |
+ cmd.append("--generate_suppressions") |
if exe: |
cmd.append(os.path.join(self._options.build_dir, exe)) |
return cmd |
@@ -139,7 +147,7 @@ |
def ScriptedTest(self, module, exe, name, script, multi=False, cmd_args=None, |
out_dir_extra=None): |
- '''Purify a target exe, which will be executed one or more times via a |
+ '''Valgrind a target binary, which will be executed one or more times via a |
script or driver program. |
Args: |
module - which top level component this test is from (webkit, base, etc.) |
@@ -151,7 +159,7 @@ |
multi - a boolean hint that the exe will be run multiple times, generating |
multiple output files (without this option, only the last run will be |
recorded and analyzed) |
- cmd_args - extra arguments to pass to the purify_test.py script |
+ cmd_args - extra arguments to pass to the valgrind_test.py script |
''' |
cmd = self._DefaultCommand(module) |
exe = os.path.join(self._options.build_dir, exe) |
@@ -178,120 +186,120 @@ |
self._ReadGtestFilterFile(name, cmd) |
return common.RunSubprocess(cmd, 0) |
- def InstrumentDll(self): |
- '''Does a blocking Purify instrumentation of chrome.dll.''' |
- # TODO(paulg): Make this code support any DLL. |
- cmd = self._DefaultCommand("chrome") |
- cmd.append("--instrument_only") |
- cmd.append(os.path.join(self._options.build_dir, "chrome.dll")) |
- result = common.RunSubprocess(cmd, 0) |
- if result: |
- logging.error("Instrumentation error: %d" % result) |
- return result |
+# def InstrumentDll(self): |
+# '''Does a blocking Purify instrumentation of chrome.dll.''' |
+# # TODO(paulg): Make this code support any DLL. |
+# cmd = self._DefaultCommand("chrome") |
+# cmd.append("--instrument_only") |
+# cmd.append(os.path.join(self._options.build_dir, "chrome.dll")) |
+# result = common.RunSubprocess(cmd, 0) |
+# if result: |
+# logging.error("Instrumentation error: %d" % result) |
+# return result |
- def TestDll(self): |
- return self.InstrumentDll() |
+# def TestDll(self): |
+# return self.InstrumentDll() |
def TestBase(self): |
- return self.SimpleTest("base", "base_unittests.exe") |
+ return self.SimpleTest("base", "base_unittests") |
def TestIpc(self): |
- return self.SimpleTest("chrome", "ipc_tests.exe") |
+ return self.SimpleTest("chrome", "ipc_tests") |
def TestNet(self): |
- return self.SimpleTest("net", "net_unittests.exe") |
+ return self.SimpleTest("net", "net_unittests") |
def TestTestShell(self): |
- return self.SimpleTest("webkit", "test_shell_tests.exe") |
+ return self.SimpleTest("webkit", "test_shell_tests") |
def TestUnit(self): |
- return self.SimpleTest("chrome", "unit_tests.exe") |
+ return self.SimpleTest("chrome", "unit_tests") |
- def TestLayoutAll(self): |
- return self.TestLayout(run_all=True) |
+# def TestLayoutAll(self): |
+# return self.TestLayout(run_all=True) |
- def TestLayout(self, run_all=False): |
- # A "chunk file" is maintained in the local directory so that each test |
- # runs a slice of the layout tests of size chunk_size that increments with |
- # each run. Since tests can be added and removed from the layout tests at |
- # any time, this is not going to give exact coverage, but it will allow us |
- # to continuously run small slices of the layout tests under purify rather |
- # than having to run all of them in one shot. |
- chunk_num = 0 |
- # Tests currently seem to take about 20-30s each. |
- chunk_size = 120 # so about 40-60 minutes per run |
- chunk_file = os.path.join(os.environ["TEMP"], "purify_layout_chunk.txt") |
- if not run_all: |
- try: |
- f = open(chunk_file) |
- if f: |
- str = f.read() |
- if len(str): |
- chunk_num = int(str) |
- # This should be enough so that we have a couple of complete runs |
- # of test data stored in the archive (although note that when we loop |
- # that we almost guaranteed won't be at the end of the test list) |
- if chunk_num > 10000: |
- chunk_num = 0 |
- f.close() |
- except IOError, (errno, strerror): |
- logging.error("error reading from file %s (%d, %s)" % (chunk_file, |
- errno, strerror)) |
+# def TestLayout(self, run_all=False): |
+# # A "chunk file" is maintained in the local directory so that each test |
+# # runs a slice of the layout tests of size chunk_size that increments with |
+# # each run. Since tests can be added and removed from the layout tests at |
+# # any time, this is not going to give exact coverage, but it will allow us |
+# # to continuously run small slices of the layout tests under purify rather |
+# # than having to run all of them in one shot. |
+# chunk_num = 0 |
+# # Tests currently seem to take about 20-30s each. |
+# chunk_size = 120 # so about 40-60 minutes per run |
+# chunk_file = os.path.join(os.environ["TEMP"], "purify_layout_chunk.txt") |
+# if not run_all: |
+# try: |
+# f = open(chunk_file) |
+# if f: |
+# str = f.read() |
+# if len(str): |
+# chunk_num = int(str) |
+# # This should be enough so that we have a couple of complete runs |
+# # of test data stored in the archive (although note that when we loop |
+# # that we almost guaranteed won't be at the end of the test list) |
+# if chunk_num > 10000: |
+# chunk_num = 0 |
+# f.close() |
+# except IOError, (errno, strerror): |
+# logging.error("error reading from file %s (%d, %s)" % (chunk_file, |
+# errno, strerror)) |
- script = os.path.join(self._source_dir, "webkit", "tools", "layout_tests", |
- "run_webkit_tests.py") |
- script_cmd = ["python.exe", script, "--run-singly", "-v", |
- "--noshow-results", "--time-out-ms=200000", |
- "--nocheck-sys-deps"] |
- if not run_all: |
- script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size)) |
+# script = os.path.join(self._source_dir, "webkit", "tools", "layout_tests", |
+# "run_webkit_tests.py") |
+# script_cmd = ["python.exe", script, "--run-singly", "-v", |
+# "--noshow-results", "--time-out-ms=200000", |
+# "--nocheck-sys-deps"] |
+# if not run_all: |
+# script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size)) |
- if len(self._args): |
- # if the arg is a txt file, then treat it as a list of tests |
- if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt": |
- script_cmd.append("--test-list=%s" % self._args[0]) |
- else: |
- script_cmd.extend(self._args) |
+# if len(self._args): |
+# # if the arg is a txt file, then treat it as a list of tests |
+# if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt": |
+# script_cmd.append("--test-list=%s" % self._args[0]) |
+# else: |
+# script_cmd.extend(self._args) |
- if run_all: |
- ret = self.ScriptedTest("webkit", "test_shell.exe", "layout", |
- script_cmd, multi=True, cmd_args=["--timeout=0"]) |
- return ret |
+# if run_all: |
+# ret = self.ScriptedTest("webkit", "test_shell.exe", "layout", |
+# script_cmd, multi=True, cmd_args=["--timeout=0"]) |
+# return ret |
- # store each chunk in its own directory so that we can find the data later |
- chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num) |
- ret = self.ScriptedTest("webkit", "test_shell.exe", "layout", |
- script_cmd, multi=True, cmd_args=["--timeout=0"], |
- out_dir_extra=chunk_dir) |
+# # store each chunk in its own directory so that we can find the data later |
+# chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num) |
+# ret = self.ScriptedTest("webkit", "test_shell.exe", "layout", |
+# script_cmd, multi=True, cmd_args=["--timeout=0"], |
+# out_dir_extra=chunk_dir) |
- # Wait until after the test runs to completion to write out the new chunk |
- # number. This way, if the bot is killed, we'll start running again from |
- # the current chunk rather than skipping it. |
- try: |
- f = open(chunk_file, "w") |
- chunk_num += 1 |
- f.write("%d" % chunk_num) |
- f.close() |
- except IOError, (errno, strerror): |
- logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno, |
- strerror)) |
- # Since we're running small chunks of the layout tests, it's important to |
- # mark the ones that have errors in them. These won't be visible in the |
- # summary list for long, but will be useful for someone reviewing this bot. |
- return ret |
+# # Wait until after the test runs to completion to write out the new chunk |
+# # number. This way, if the bot is killed, we'll start running again from |
+# # the current chunk rather than skipping it. |
+# try: |
+# f = open(chunk_file, "w") |
+# chunk_num += 1 |
+# f.write("%d" % chunk_num) |
+# f.close() |
+# except IOError, (errno, strerror): |
+# logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno, |
+# strerror)) |
+# # Since we're running small chunks of the layout tests, it's important to |
+# # mark the ones that have errors in them. These won't be visible in the |
+# # summary list for long, but will be useful for someone reviewing this bot. |
+# return ret |
- def TestUI(self): |
- if not self._options.no_reinstrument: |
- instrumentation_error = self.InstrumentDll() |
- if instrumentation_error: |
- return instrumentation_error |
- return self.ScriptedTest("chrome", "chrome.exe", "ui_tests", |
- ["ui_tests.exe", |
- "--single-process", |
- "--ui-test-timeout=180000", |
- "--ui-test-action-timeout=80000", |
- "--ui-test-action-max-timeout=180000"], |
- multi=True) |
+# def TestUI(self): |
+# if not self._options.no_reinstrument: |
+# instrumentation_error = self.InstrumentDll() |
+# if instrumentation_error: |
+# return instrumentation_error |
+# return self.ScriptedTest("chrome", "chrome.exe", "ui_tests", |
+# ["ui_tests.exe", |
+# "--single-process", |
+# "--ui-test-timeout=120000", |
+# "--ui-test-action-timeout=80000", |
+# "--ui-test-action-max-timeout=180000"], |
+# multi=True) |
def _main(argv): |
@@ -310,6 +318,10 @@ |
help="verbose output - enable debug log messages") |
parser.add_option("", "--no-reinstrument", action="store_true", default=False, |
help="Don't force a re-instrumentation for ui_tests") |
+ parser.add_option("", "--generate_suppressions", action="store_true", |
+ default=False, |
+ help="Skip analysis and generate suppressions") |
+ |
options, args = parser.parse_args() |
if options.verbose: |