Chromium Code Reviews| Index: tools/test.py |
| diff --git a/tools/test.py b/tools/test.py |
| index 44fcb954303b5a482a2103deb674f2ae1d02bea8..57b0febddf5b3d07def215971f670bb97433d5a1 100755 |
| --- a/tools/test.py |
| +++ b/tools/test.py |
| @@ -5,73 +5,73 @@ |
| # BSD-style license that can be found in the LICENSE file. |
| # |
| +"""Test driver for the Dart project used by continuous build and developers.""" |
| + |
| + |
| import imp |
| import optparse |
| import os |
| -from os.path import join, dirname, abspath, basename, isdir, exists, realpath |
| -import platform |
| +import Queue |
| import re |
| -import select |
| -import signal |
| -import subprocess |
| import sys |
| -import tempfile |
| -import time |
| import threading |
| -import traceback |
| -from Queue import Queue, Empty |
| +import time |
| +import urllib |
| import testing |
| -import testing.test_runner |
| +from testing import test_runner |
| import utils |
| TIMEOUT_SECS = 60 |
| -VERBOSE = False |
| ARCH_GUESS = utils.GuessArchitecture() |
| OS_GUESS = utils.GuessOS() |
| BUILT_IN_TESTS = ['dartc', 'vm', 'dart', 'corelib', 'language', 'co19', |
| 'samples', 'isolate', 'stub-generator', 'client'] |
| # Patterns for matching test options in .dart files. |
| -VM_OPTIONS_PATTERN = re.compile(r"// VMOptions=(.*)") |
| -DART_OPTIONS_PATTERN = re.compile(r"// DartOptions=(.*)") |
| -ISOLATE_STUB_PATTERN = re.compile(r"// IsolateStubs=(.*)") |
| +VM_OPTIONS_PATTERN = re.compile(r'// VMOptions=(.*)') |
| +DART_OPTIONS_PATTERN = re.compile(r'// DartOptions=(.*)') |
| +ISOLATE_STUB_PATTERN = re.compile(r'// IsolateStubs=(.*)') |
| # --------------------------------------------- |
| # --- P r o g r e s s I n d i c a t o r s --- |
| # --------------------------------------------- |
| +class Error(Exception): |
| + pass |
| + |
| + |
| class ProgressIndicator(object): |
| + """Base class for displaying the progress of the test run.""" |
| def __init__(self, cases, context): |
| self.abort = False |
| self.terminate = False |
| self.cases = cases |
| - self.queue = Queue(len(cases)) |
| - self.batch_queues = {}; |
| + self.queue = Queue.Queue(len(cases)) |
| + self.batch_queues = {} |
| self.context = context |
| # Extract batchable cases. |
| found_cmds = {} |
| - batch_cases = [] |
| for case in cases: |
| cmd = case.case.GetCommand()[0] |
| if not utils.IsWindows(): |
| # Diagnostic check for executable (if an absolute pathname) |
| if not cmd in found_cmds: |
| if os.path.isabs(cmd) and not os.path.isfile(cmd): |
| - msg = "Can't find command %s\n" % cmd \ |
| - + "(Did you build first? " \ |
| - + "Are you running in the correct directory?)" |
| + msg = "Can't find command %s\n" % cmd |
| + msg += '(Did you build first? ' |
| + msg += 'Are you running in the correct directory?)' |
| raise Exception(msg) |
| else: |
| found_cmds[cmd] = 1 |
| if case.case.IsBatchable(): |
| - if not self.batch_queues.has_key(cmd): |
| - self.batch_queues[cmd] = Queue(len(cases)) |
| + if not cmd in self.batch_queues: |
| + self.batch_queues[cmd] = Queue.Queue(len(cases)) |
| self.batch_queues[cmd].put(case) |
| else: |
| self.queue.put_nowait(case) |
| @@ -79,7 +79,7 @@ class ProgressIndicator(object): |
| self.succeeded = 0 |
| self.remaining = len(cases) |
| self.total = len(cases) |
| - self.failed = [ ] |
| + self.failed = [] |
| self.crashed = 0 |
| self.lock = threading.Lock() |
| @@ -88,13 +88,14 @@ class ProgressIndicator(object): |
| negative_marker = '[negative] ' |
| else: |
| negative_marker = '' |
| - print "=== %(label)s %(negative)s===" % { |
| - 'label': test.GetLabel(), |
| - 'negative': negative_marker |
| + print '=== %(label)s %(negative)s===' % { |
| + 'label': test.GetLabel(), |
| + 'negative': negative_marker |
| } |
| - print "Path: %s" % "/".join(test.path) |
| + print 'Path: %s' % '/'.join(test.path) |
| def Run(self, tasks): |
| + """Starts tests and keeps running until queues are drained.""" |
| self.Starting() |
| # Scale the number of tasks to the nubmer of CPUs on the machine |
| @@ -110,45 +111,43 @@ class ProgressIndicator(object): |
| # Spawn N-1 threads and then use this thread as the last one. |
| # That way -j1 avoids threading altogether which is a nice fallback |
| # in case of threading problems. |
| - for i in xrange(tasks - 1): |
| + for unused_i in xrange(tasks - 1): |
| thread = threading.Thread(target=self.RunSingle, args=[]) |
| threads.append(thread) |
| thread.start() |
| - |
| # Next, crank up the batchable tasks. Note that this will start |
| # 'tasks' more threads, but the assumption is that if batching is |
| # enabled that almost all tests are batchable. |
| for (cmd, queue) in self.batch_queues.items(): |
| if not queue.empty(): |
| - batch_len = queue.qsize(); |
| batch_tester = None |
| try: |
| - batch_tester = testing.test_runner.BatchRunner(queue, tasks, self, |
| - [cmd, '-batch']) |
| - except Exception, e: |
| - print "Aborting batch test for " + cmd + ". Problem on startup." |
| + batch_tester = test_runner.BatchRunner(queue, tasks, self, |
| + [cmd, '-batch']) |
| + except: |
| + print 'Aborting batch test for ' + cmd + '. Problem on startup.' |
| if batch_tester: batch_tester.Shutdown() |
| raise |
| try: |
| batch_tester.WaitForCompletion() |
| except: |
| - print "Aborting batch cmd " + cmd + "while waiting for completion." |
| + print 'Aborting batch cmd ' + cmd + 'while waiting for completion.' |
| if batch_tester: batch_tester.Shutdown() |
| raise |
| try: |
| self.RunSingle() |
| if self.abort: |
| - raise Exception("Aborted") |
| + raise Error('Aborted') |
| # Wait for the remaining non-batched threads. |
| for thread in threads: |
| # Use a timeout so that signals (ctrl-c) will be processed. |
| thread.join(timeout=10000000) |
| if self.abort: |
| - raise Exception("Aborted") |
| - except Exception, e: |
| + raise Error('Aborted') |
| + except: |
| # If there's an exception we schedule an interruption for any |
| # remaining threads. |
| self.terminate = True |
| @@ -162,7 +161,7 @@ class ProgressIndicator(object): |
| while not self.terminate: |
| try: |
| test = self.queue.get_nowait() |
| - except Empty: |
| + except Queue.Empty: |
| return |
| case = test.case |
| with self.lock: |
| @@ -175,7 +174,7 @@ class ProgressIndicator(object): |
| self.abort = True |
| self.terminate = True |
| raise |
| - except IOError, e: |
| + except IOError: |
| self.abort = True |
| self.terminate = True |
| raise |
| @@ -201,55 +200,61 @@ def EscapeCommand(command): |
| parts.append('"%s"' % part) |
| else: |
| parts.append(part) |
| - return " ".join(parts) |
| + return ' '.join(parts) |
| class SimpleProgressIndicator(ProgressIndicator): |
| + """Base class for printing output of each test separately.""" |
| def Starting(self): |
| + """Called at the beginning before any tests are run.""" |
| print 'Running %i tests' % len(self.cases) |
| def Done(self): |
| + """Called when all tests are complete.""" |
| for failed in self.failed: |
| self.PrintFailureHeader(failed.test) |
| if failed.output.stderr: |
| - print "--- stderr ---" |
| + print '--- stderr ---' |
| print failed.output.stderr.strip() |
| if failed.output.stdout: |
| - print "--- stdout ---" |
| + print '--- stdout ---' |
| print failed.output.stdout.strip() |
| - print "Command: %s" % EscapeCommand(failed.command) |
| + print 'Command: %s' % EscapeCommand(failed.command) |
| if failed.HasCrashed(): |
| - print "--- CRASHED ---" |
| + print '--- CRASHED ---' |
| if failed.HasTimedOut(): |
| - print "--- TIMEOUT ---" |
| - if len(self.failed) == 0: |
| - print "===" |
| - print "=== All tests succeeded" |
| - print "===" |
| + print '--- TIMEOUT ---' |
| + if not self.failed: |
| + print '===' |
| + print '=== All tests succeeded' |
| + print '===' |
| else: |
| - print "===" |
| + print '===' |
| if len(self.failed) == 1: |
| - print "=== 1 test failed" |
| + print '=== 1 test failed' |
| else: |
| - print "=== %i tests failed" % len(self.failed) |
| + print '=== %i tests failed' % len(self.failed) |
| if self.crashed > 0: |
| if self.crashed == 1: |
| - print "=== 1 test CRASHED" |
| + print '=== 1 test CRASHED' |
| else: |
| - print "=== %i tests CRASHED" % self.crashed |
| - print "===" |
| + print '=== %i tests CRASHED' % self.crashed |
| + print '===' |
| class VerboseProgressIndicator(SimpleProgressIndicator): |
| + """Print verbose information about each test that is run.""" |
| def AboutToRun(self, case): |
| + """Called before each test case is run.""" |
| print 'Starting %s...' % case.GetLabel() |
| sys.stdout.flush() |
| def HasRun(self, output): |
| + """Called after each test case is run.""" |
| if output.UnexpectedOutput(): |
| if output.HasCrashed(): |
| outcome = 'CRASH' |
| @@ -261,11 +266,14 @@ class VerboseProgressIndicator(SimpleProgressIndicator): |
| class OneLineProgressIndicator(SimpleProgressIndicator): |
| + """Results of each test is printed like a report, on a line by itself.""" |
| def AboutToRun(self, case): |
| + """Called before each test case is run.""" |
| pass |
| def HasRun(self, output): |
| + """Called after each test case is run.""" |
| if output.UnexpectedOutput(): |
| if output.HasCrashed(): |
| outcome = 'CRASH' |
| @@ -279,6 +287,7 @@ class OneLineProgressIndicator(SimpleProgressIndicator): |
| class OneLineProgressIndicatorForBuildBot(OneLineProgressIndicator): |
| def HasRun(self, output): |
| + """Called after each test case is run.""" |
| super(OneLineProgressIndicatorForBuildBot, self).HasRun(output) |
| percent = (((self.total - self.remaining) * 100) // self.total) |
| print '@@@STEP_CLEAR@@@' |
| @@ -287,6 +296,7 @@ class OneLineProgressIndicatorForBuildBot(OneLineProgressIndicator): |
| class CompactProgressIndicator(ProgressIndicator): |
| + """Continuously updates a single line w/ a summary of progress of the run.""" |
| def __init__(self, cases, context, templates): |
| super(CompactProgressIndicator, self).__init__(cases, context) |
| @@ -295,92 +305,102 @@ class CompactProgressIndicator(ProgressIndicator): |
| self.start_time = time.time() |
| def Starting(self): |
| + """Called at the beginning before any tests are run.""" |
| pass |
| def Done(self): |
| - self.PrintProgress('Done') |
| + """Called when all tests are complete.""" |
| + self._PrintProgress('Done') |
| def AboutToRun(self, case): |
| - self.PrintProgress(case.GetLabel()) |
| + """Called before each test case is run.""" |
| + self._PrintProgress(case.GetLabel()) |
| def HasRun(self, output): |
| + """Called after each test case is run.""" |
| if output.UnexpectedOutput(): |
| self.ClearLine(self.last_status_length) |
| self.PrintFailureHeader(output.test) |
| stdout = output.output.stdout.strip() |
| - if len(stdout): |
| + if stdout: |
| print self.templates['stdout'] % stdout |
| stderr = output.output.stderr.strip() |
| - if len(stderr): |
| + if stderr: |
| print self.templates['stderr'] % stderr |
| - print "Command: %s" % EscapeCommand(output.command) |
| + print 'Command: %s' % EscapeCommand(output.command) |
| if output.HasCrashed(): |
| - print "--- CRASHED ---" |
| + print '--- CRASHED ---' |
| if output.HasTimedOut(): |
| - print "--- TIMEOUT ---" |
| + print '--- TIMEOUT ---' |
| - def Truncate(self, str, length): |
| - if length and (len(str) > (length - 3)): |
| - return str[:(length-3)] + "..." |
| + def _Truncate(self, buf, length): |
| + """Truncate a line if it exceeds length, substituting an ellipsis...""" |
| + if length and (len(buf) > (length - 3)): |
| + return buf[:(length-3)] + '...' |
| else: |
| - return str |
| + return buf |
| - def PrintProgress(self, name): |
| + def _PrintProgress(self, name): |
| + """Refresh the display.""" |
| self.ClearLine(self.last_status_length) |
| elapsed = time.time() - self.start_time |
| status = self.templates['status_line'] % { |
| - 'passed': self.succeeded, |
| - 'percent': (((self.total - self.remaining) * 100) // self.total), |
| - 'failed': len(self.failed), |
| - 'test': name, |
| - 'mins': int(elapsed) / 60, |
| - 'secs': int(elapsed) % 60 |
| + 'passed': self.succeeded, |
| + 'percent': (((self.total - self.remaining) * 100) // self.total), |
| + 'failed': len(self.failed), |
| + 'test': name, |
| + 'mins': int(elapsed) / 60, |
| + 'secs': int(elapsed) % 60 |
| } |
| - status = self.Truncate(status, 78) |
| + status = self._Truncate(status, 78) |
| self.last_status_length = len(status) |
| print status, |
| sys.stdout.flush() |
| + def ClearLine(self, last_line_length): |
| + """Erase the current line w/ a linefeed and overwriting with spaces.""" |
| + print ('\r' + (' ' * last_line_length) + '\r'), |
| + |
| class MonochromeProgressIndicator(CompactProgressIndicator): |
| + """A CompactProgressIndicator with no color.""" |
| def __init__(self, cases, context): |
| templates = { |
| - 'status_line': "[%(mins)02i:%(secs)02i|%%%(percent) 4d|+%(passed) 4d|-%(failed) 4d]: %(test)s", |
| - 'stdout': '%s', |
| - 'stderr': '%s', |
| - 'clear': lambda last_line_length: ("\r" + (" " * last_line_length) + "\r"), |
| - 'max_length': 78 |
| + 'status_line': '[%(mins)02i:%(secs)02i|%%%(percent) ' |
| + '4d|+%(passed) 4d|-%(failed) 4d]: %(test)s', |
| + 'stdout': '%s', |
| + 'stderr': '%s', |
| + 'clear': lambda last_line_len: self.ClearLine(last_line_len), |
| + 'max_length': 78 |
| } |
| - super(MonochromeProgressIndicator, self).__init__(cases, context, templates) |
| + super(MonochromeProgressIndicator, self).__init__(cases, |
| + context, |
| + templates) |
| - def ClearLine(self, last_line_length): |
| - print ("\r" + (" " * last_line_length) + "\r"), |
| class ColorProgressIndicator(CompactProgressIndicator): |
| + """A CompactProgressIndicator with pretty colors.""" |
| def __init__(self, cases, context): |
| templates = { |
| - 'status_line': ("[%(mins)02i:%(secs)02i|%%%(percent) 4d|" |
| - "\033[32m+%(passed) 4d" |
| - "\033[0m|\033[31m-%(failed) 4d\033[0m]: %(test)s"), |
| - 'stdout': '%s', |
| - 'stderr': '%s', |
| - 'clear': lambda last_line_length: ("\r" + (" " * last_line_length) + "\r"), |
| - 'max_length': 78 |
| + 'status_line': ('[%(mins)02i:%(secs)02i|%%%(percent) 4d|' |
| + '\033[32m+%(passed) 4d' |
| + '\033[0m|\033[31m-%(failed) 4d\033[0m]: %(test)s'), |
| + 'stdout': '%s', |
| + 'stderr': '%s', |
| + 'clear': lambda last_line_len: self.ClearLine(last_line_len), |
| + 'max_length': 78 |
| } |
| super(ColorProgressIndicator, self).__init__(cases, context, templates) |
| - def ClearLine(self, last_line_length): |
| - print ("\r" + (" " * last_line_length) + "\r"), |
| - |
| PROGRESS_INDICATORS = { |
| - 'verbose': VerboseProgressIndicator, |
| - 'mono': MonochromeProgressIndicator, |
| - 'color': ColorProgressIndicator, |
| - 'line': OneLineProgressIndicator, |
| - 'buildbot': OneLineProgressIndicatorForBuildBot |
| + 'verbose': VerboseProgressIndicator, |
| + 'mono': MonochromeProgressIndicator, |
| + 'color': ColorProgressIndicator, |
| + 'line': OneLineProgressIndicator, |
| + 'buildbot': OneLineProgressIndicatorForBuildBot |
| } |
| @@ -389,18 +409,8 @@ PROGRESS_INDICATORS = { |
| # ------------------------- |
| -class CommandOutput(object): |
| - |
| - def __init__(self, pid, exit_code, timed_out, stdout, stderr): |
| - self.pid = pid |
| - self.exit_code = exit_code |
| - self.timed_out = timed_out |
| - self.stdout = stdout |
| - self.stderr = stderr |
| - self.failed = None |
| - |
| - |
| class TestCase(object): |
| + """A single test case, like running 'dart' on a single .dart file.""" |
| def __init__(self, context, path): |
| self.path = path |
| @@ -431,10 +441,11 @@ class TestCase(object): |
| def RunCommand(self, command, cwd=None, cleanup=True): |
| full_command = self.context.processor(command) |
| try: |
| - output = Execute(full_command, self.context, self.context.timeout, cwd) |
| + output = test_runner.Execute(full_command, self.context, |
| + self.context.timeout, cwd) |
| except OSError as e: |
| - raise utils.ToolError("%s: %s" % (full_command[0], e.strerror)) |
| - test_output = TestOutput(self, full_command, output) |
| + raise utils.ToolError('%s: %s' % (full_command[0], e.strerror)) |
| + test_output = test_runner.TestOutput(self, full_command, output) |
| if cleanup: self.Cleanup() |
| return test_output |
| @@ -457,165 +468,27 @@ class TestCase(object): |
| return |
| -class TestOutput(object): |
| - |
| - def __init__(self, test, command, output): |
| - self.test = test |
| - self.command = command |
| - self.output = output |
| - |
| - def UnexpectedOutput(self): |
| - if self.HasCrashed(): |
| - outcome = testing.CRASH |
| - elif self.HasTimedOut(): |
| - outcome = testing.TIMEOUT |
| - elif self.HasFailed(): |
| - outcome = testing.FAIL |
| - else: |
| - outcome = testing.PASS |
| - return not outcome in self.test.outcomes |
| - |
| - def HasCrashed(self): |
| - if utils.IsWindows(): |
| - if self.output.exit_code == 3: |
| - # The VM uses std::abort to terminate on asserts. |
| - # std::abort terminates with exit code 3 on Windows. |
| - return True |
| - return 0x80000000 & self.output.exit_code and not (0x3FFFFF00 & self.output.exit_code) |
| - else: |
| - # Timed out tests will have exit_code -signal.SIGTERM. |
| - if self.output.timed_out: |
| - return False |
| - if self.output.exit_code == 253: |
| - # The Java dartc runners exit 253 in case of unhandled exceptions. |
| - return True |
| - return self.output.exit_code < 0 |
| - |
| - def HasTimedOut(self): |
| - return self.output.timed_out; |
| - |
| - def HasFailed(self): |
| - execution_failed = self.test.DidFail(self.output) |
| - if self.test.IsNegative(): |
| - return not execution_failed |
| - else: |
| - return execution_failed |
| - |
| - |
| -def KillProcessWithID(pid): |
| - if utils.IsWindows(): |
| - os.popen('taskkill /T /F /PID %d' % pid) |
| - else: |
| - os.kill(pid, signal.SIGTERM) |
| - |
| - |
| -MAX_SLEEP_TIME = 0.1 |
| -INITIAL_SLEEP_TIME = 0.0001 |
| -SLEEP_TIME_FACTOR = 1.25 |
| - |
| -SEM_INVALID_VALUE = -1 |
| -SEM_NOGPFAULTERRORBOX = 0x0002 # Microsoft Platform SDK WinBase.h |
| - |
| -def Win32SetErrorMode(mode): |
| - prev_error_mode = SEM_INVALID_VALUE |
| - try: |
| - import ctypes |
| - prev_error_mode = ctypes.windll.kernel32.SetErrorMode(mode); |
| - except ImportError: |
| - pass |
| - return prev_error_mode |
| - |
| -def RunProcess(context, timeout, args, **rest): |
| - if context.verbose: print "#", " ".join(args) |
| - popen_args = args |
| - prev_error_mode = SEM_INVALID_VALUE; |
| - if utils.IsWindows(): |
| - popen_args = '"' + subprocess.list2cmdline(args) + '"' |
| - if context.suppress_dialogs: |
| - # Try to change the error mode to avoid dialogs on fatal errors. Don't |
| - # touch any existing error mode flags by merging the existing error mode. |
| - # See http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx. |
| - error_mode = SEM_NOGPFAULTERRORBOX; |
| - prev_error_mode = Win32SetErrorMode(error_mode); |
| - Win32SetErrorMode(error_mode | prev_error_mode); |
| - process = subprocess.Popen( |
| - shell = utils.IsWindows(), |
| - args = popen_args, |
| - **rest |
| - ) |
| - if utils.IsWindows() and context.suppress_dialogs and prev_error_mode != SEM_INVALID_VALUE: |
| - Win32SetErrorMode(prev_error_mode) |
| - # Compute the end time - if the process crosses this limit we |
| - # consider it timed out. |
| - if timeout is None: end_time = None |
| - else: end_time = time.time() + timeout |
| - timed_out = False |
| - # Repeatedly check the exit code from the process in a |
| - # loop and keep track of whether or not it times out. |
| - exit_code = None |
| - sleep_time = INITIAL_SLEEP_TIME |
| - while exit_code is None: |
| - if (not end_time is None) and (time.time() >= end_time): |
| - # Kill the process and wait for it to exit. |
| - KillProcessWithID(process.pid) |
| - # Drain the output pipe from the process to avoid deadlock |
| - process.communicate() |
| - exit_code = process.wait() |
| - timed_out = True |
| - else: |
| - exit_code = process.poll() |
| - time.sleep(sleep_time) |
| - sleep_time = sleep_time * SLEEP_TIME_FACTOR |
| - if sleep_time > MAX_SLEEP_TIME: |
| - sleep_time = MAX_SLEEP_TIME |
| - return (process, exit_code, timed_out) |
| - |
| - |
| -def PrintError(str): |
| - sys.stderr.write(str) |
| - sys.stderr.write('\n') |
| - |
| - |
| -def CheckedUnlink(name): |
| - try: |
| - os.unlink(name) |
| - except OSError, e: |
| - PrintError("os.unlink() " + str(e)) |
| - |
| - |
| -def Execute(args, context, timeout=None, cwd=None): |
| - (fd_out, outname) = tempfile.mkstemp() |
| - (fd_err, errname) = tempfile.mkstemp() |
| - (process, exit_code, timed_out) = RunProcess( |
| - context, |
| - timeout, |
| - args = args, |
| - stdout = fd_out, |
| - stderr = fd_err, |
| - cwd = cwd |
| - ) |
| - os.close(fd_out) |
| - os.close(fd_err) |
| - output = file(outname).read() |
| - errors = file(errname).read() |
| - CheckedUnlink(outname) |
| - CheckedUnlink(errname) |
| - result = CommandOutput(process.pid, exit_code, timed_out, output, errors) |
| - return result |
| - |
| - |
| class TestConfiguration(object): |
| + """Test configurations give test.py the list of tests, e.g. listing a dir.""" |
| def __init__(self, context, root): |
| self.context = context |
| self.root = root |
| - def Contains(self, path, file): |
| - if len(path) > len(file): |
| + def Contains(self, path, filename): |
| + """Returns True if the given path regexp matches the passed filename.""" |
| + |
| + if len(path) > len(filename): |
| return False |
| for i in xrange(len(path)): |
| - if not path[i].match(file[i]): |
| - return False |
| + try: |
| + if not path[i].match(filename[i]): |
| + return False |
| + except: |
| + print 'Invalid regexp %s in .status file. ' % '/'.join(path) |
| + print 'Try escaping special characters with \\' |
| + raise |
| + |
| return True |
| def GetTestStatus(self, sections, defs): |
| @@ -632,26 +505,29 @@ class TestSuite(object): |
| class TestRepository(TestSuite): |
| + """A collection of test configurations.""" |
| def __init__(self, path): |
| - normalized_path = abspath(path) |
| - super(TestRepository, self).__init__(basename(normalized_path)) |
| + normalized_path = os.path.abspath(path) |
| + super(TestRepository, self).__init__(os.path.basename(normalized_path)) |
| self.path = normalized_path |
| self.is_loaded = False |
| self.config = None |
| def GetConfiguration(self, context): |
| + """Retrieve a TestConfiguration subclass for this set of tests.""" |
| if self.is_loaded: |
| return self.config |
| self.is_loaded = True |
| - file = None |
| + filename = None |
| try: |
| - (file, pathname, description) = imp.find_module('testcfg', [ self.path ]) |
| - module = imp.load_module('testcfg', file, pathname, description) |
| + (filename, pathname, description) = imp.find_module( |
| + 'testcfg', [self.path]) |
| + module = imp.load_module('testcfg', filename, pathname, description) |
| self.config = module.GetConfiguration(context, self.path) |
| finally: |
| - if file: |
| - file.close() |
| + if filename: |
| + filename.close() |
| return self.config |
| def ListTests(self, current_path, path, context, mode, arch): |
| @@ -665,14 +541,15 @@ class TestRepository(TestSuite): |
| class LiteralTestSuite(TestSuite): |
| + """Represents one set of tests.""" |
| def __init__(self, tests): |
| super(LiteralTestSuite, self).__init__('root') |
| self.tests = tests |
| def ListTests(self, current_path, path, context, mode, arch): |
| - name = path[0] |
| - result = [ ] |
| + name = path[0] |
| + result = [] |
| for test in self.tests: |
| test_name = test.GetName() |
| if name.match(test_name): |
| @@ -684,23 +561,26 @@ class LiteralTestSuite(TestSuite): |
| for test in self.tests: |
| test.GetTestStatus(context, sections, defs) |
| + |
| class Context(object): |
| + """A way to send global context for the test run to each test case.""" |
| - def __init__(self, workspace, verbose, os, timeout, |
| + def __init__(self, workspace, verbose, os_def, timeout, |
|
ahe
2011/10/11 07:55:34
I'm not sure what os_def means. Would you mind not
zundel
2011/10/11 22:45:44
os conflicts with import os, so I renamed it. I'l
|
| processor, suppress_dialogs, executable, flags, |
| keep_temporary_files, use_batch): |
| self.workspace = workspace |
| self.verbose = verbose |
| - self.os = os |
| + self.os = os_def |
| self.timeout = timeout |
| self.processor = processor |
| self.suppress_dialogs = suppress_dialogs |
| self.executable = executable |
| self.flags = flags |
| self.keep_temporary_files = keep_temporary_files |
| - self.use_batch = use_batch == "true" |
| + self.use_batch = use_batch == 'true' |
| def GetBuildRoot(self, mode, arch): |
| + """The top level directory containing compiler, runtime, tools...""" |
| result = utils.GetBuildRoot(self.os, mode, arch) |
| return result |
| @@ -709,37 +589,43 @@ class Context(object): |
| return result |
| def GetExecutable(self, mode, arch, name): |
| + """Returns the name of the executable used to run the test.""" |
| if self.executable is not None: |
| return self.executable |
| - path = abspath(join(self.GetBuildRoot(mode, arch), name)) |
| + path = os.path.abspath(os.path.join(self.GetBuildRoot(mode, arch), name)) |
| if utils.IsWindows() and not path.endswith('.exe'): |
| return path + '.exe' |
| else: |
| return path |
| def GetDart(self, mode, arch): |
| + """Returns the path to the Dart test runner (executes the .dart file).""" |
| if arch == 'dartc': |
| - command = [ abspath(join(self.GetBuildRoot(mode, arch), |
| - 'compiler', 'bin', 'dartc_test')) ] |
| + command = [os.path.abspath( |
| + os.path.join(self.GetBuildRoot(mode, arch), |
| + 'compiler', 'bin', 'dartc_test'))] |
| else: |
| - command = [ self.GetExecutable(mode, arch, 'dart_bin') ] |
| + command = [self.GetExecutable(mode, arch, 'dart_bin')] |
| return command |
| def GetDartC(self, mode, arch): |
| - dartc = abspath(os.path.join(self.GetBuildRoot(mode, arch), |
| - 'compiler', 'bin', 'dartc')) |
| + """Returns the path to the Dart --> JS compiler.""" |
| + dartc = os.path.abspath(os.path.join( |
| + self.GetBuildRoot(mode, arch), 'compiler', 'bin', 'dartc')) |
| if utils.IsWindows(): dartc += '.exe' |
| - command = [ dartc ] |
| + command = [dartc] |
| # Add the flags from the context to the command line. |
| command += self.flags |
| return command |
| def GetRunTests(self, mode, arch): |
| - return [ self.GetExecutable(mode, arch, 'run_vm_tests') ] |
| + return [self.GetExecutable(mode, arch, 'run_vm_tests')] |
| + |
| def RunTestCases(cases_to_run, progress, tasks, context): |
| + """Chooses a progress indicator and then starts the tests.""" |
| progress = PROGRESS_INDICATORS[progress](cases_to_run, context) |
| return progress.Run(tasks) |
| @@ -748,6 +634,7 @@ def RunTestCases(cases_to_run, progress, tasks, context): |
| # --- T e s t C o n f i g u r a t i o n --- |
| # ------------------------------------------- |
| + |
| class Expression(object): |
| pass |
| @@ -755,25 +642,29 @@ class Expression(object): |
| class Constant(Expression): |
| def __init__(self, value): |
| + super(Constant, self).__init__() |
| self.value = value |
| - def Evaluate(self, env, defs): |
| + def Evaluate(self, unused_env, unused_defs): |
| return self.value |
| class Variable(Expression): |
| def __init__(self, name): |
| + super(Variable, self).__init__() |
| self.name = name |
| - def GetOutcomes(self, env, defs): |
| - if self.name in env: return ListSet([env[self.name]]) |
| + def GetOutcomes(self, env, unused_defs): |
| + if self.name in env: |
| + return ListSet([env[self.name]]) |
| else: return Nothing() |
| class Outcome(Expression): |
| def __init__(self, name): |
| + super(Outcome, self).__init__() |
| self.name = name |
| def GetOutcomes(self, env, defs): |
| @@ -784,37 +675,42 @@ class Outcome(Expression): |
| class Set(object): |
| + """An abstract set class used to hold Rules.""" |
| pass |
| class ListSet(Set): |
| + """A set that uses lists for storage.""" |
| def __init__(self, elms): |
| + super(ListSet, self).__init__() |
| self.elms = elms |
| def __str__(self): |
| - return "ListSet%s" % str(self.elms) |
| + return 'ListSet%s' % str(self.elms) |
| def Intersect(self, that): |
| if not isinstance(that, ListSet): |
| return that.Intersect(self) |
| - return ListSet([ x for x in self.elms if x in that.elms ]) |
| + return ListSet([x for x in self.elms if x in that.elms]) |
| def Union(self, that): |
| if not isinstance(that, ListSet): |
| return that.Union(self) |
| - return ListSet(self.elms + [ x for x in that.elms if x not in self.elms ]) |
| + return ListSet(self.elms + |
| + [x for x in that.elms if x not in self.elms]) |
| def IsEmpty(self): |
| - return len(self.elms) == 0 |
| + return not self.elms |
| class Everything(Set): |
| + """A set that represents all possible values.""" |
| def Intersect(self, that): |
| return that |
| - def Union(self, that): |
| + def Union(self, unused_that): |
| return self |
| def IsEmpty(self): |
| @@ -823,7 +719,7 @@ class Everything(Set): |
| class Nothing(Set): |
| - def Intersect(self, that): |
| + def Intersect(self, unused_that): |
| return self |
| def Union(self, that): |
| @@ -834,19 +730,24 @@ class Nothing(Set): |
| class Operation(Expression): |
| + """A conditional expression. e.g. ($arch == ia32).""" |
| def __init__(self, left, op, right): |
| + super(Operation, self).__init__() |
| self.left = left |
| self.op = op |
| self.right = right |
| def Evaluate(self, env, defs): |
| + """Evaluates expression in the .status file. e.g. ($arch == ia32).""" |
| + |
| if self.op == '||' or self.op == ',': |
| return self.left.Evaluate(env, defs) or self.right.Evaluate(env, defs) |
| elif self.op == 'if': |
| return False |
| elif self.op == '==': |
| - inter = self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs)) |
| + outcomes = self.left.GetOutcomes(env, defs) |
| + inter = outcomes.Intersect(self.right.GetOutcomes(env, defs)) |
| return not inter.IsEmpty() |
| else: |
| assert self.op == '&&' |
| @@ -854,25 +755,28 @@ class Operation(Expression): |
| def GetOutcomes(self, env, defs): |
| if self.op == '||' or self.op == ',': |
| - return self.left.GetOutcomes(env, defs).Union(self.right.GetOutcomes(env, defs)) |
| + outcomes = self.left.GetOutcomes(env, defs) |
| + return outcomes.Union(self.right.GetOutcomes(env, defs)) |
| elif self.op == 'if': |
| - if self.right.Evaluate(env, defs): return self.left.GetOutcomes(env, defs) |
| + if self.right.Evaluate(env, defs): |
| + return self.left.GetOutcomes(env, defs) |
| else: return Nothing() |
| else: |
| assert self.op == '&&' |
| - return self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs)) |
| + outcomes = self.left.GetOutcomes(env, defs) |
| + return outcomes.Intersect(self.right.GetOutcomes(env, defs)) |
| -def IsAlpha(str): |
| - for char in str: |
| +def IsAlpha(buf): |
| + """Returns True if the entire string is alphanumeric.""" |
| + for char in buf: |
| if not (char.isalpha() or char.isdigit() or char == '_'): |
| return False |
| return True |
| class Tokenizer(object): |
| - """A simple string tokenizer that chops expressions into variables, |
| - parens and operators""" |
| + """Tokenizer that chops expressions into variables, parens and operators.""" |
| def __init__(self, expr): |
| self.index = 0 |
| @@ -880,15 +784,15 @@ class Tokenizer(object): |
| self.length = len(expr) |
| self.tokens = None |
| - def Current(self, length = 1): |
| - if not self.HasMore(length): return "" |
| + def Current(self, length=1): |
| + if not self.HasMore(length): return '' |
| return self.expr[self.index:self.index+length] |
| - def HasMore(self, length = 1): |
| + def HasMore(self, length=1): |
| return self.index < self.length + (length - 1) |
| - def Advance(self, count = 1): |
| - self.index = self.index + count |
| + def Advance(self, count=1): |
| + self.index += count |
| def AddToken(self, token): |
| self.tokens.append(token) |
| @@ -898,7 +802,19 @@ class Tokenizer(object): |
| self.Advance() |
| def Tokenize(self): |
| - self.tokens = [ ] |
| + """Lexical analysis of an expression in a .status file. |
| + |
| + Example: |
| + [ $mode == debug && ($arch == chromium || $arch == dartc) ] |
| + |
| + Args: |
| + None. |
| + |
| + Returns: |
| + A list of tokens on success, None on failure. |
| + """ |
| + |
| + self.tokens = [] |
| while self.HasMore(): |
| self.SkipSpaces() |
| if not self.HasMore(): |
| @@ -916,7 +832,7 @@ class Tokenizer(object): |
| self.AddToken(',') |
| self.Advance() |
| elif IsAlpha(self.Current()): |
| - buf = "" |
| + buf = '' |
| while self.HasMore() and IsAlpha(self.Current()): |
| buf += self.Current() |
| self.Advance() |
| @@ -936,7 +852,7 @@ class Tokenizer(object): |
| class Scanner(object): |
| - """A simple scanner that can serve out tokens from a given list""" |
| + """A simple scanner that can serve out tokens from a given list.""" |
| def __init__(self, tokens): |
| self.tokens = tokens |
| @@ -950,14 +866,16 @@ class Scanner(object): |
| return self.tokens[self.index] |
| def Advance(self): |
| - self.index = self.index + 1 |
| + self.index += 1 |
| def ParseAtomicExpression(scan): |
| - if scan.Current() == "true": |
| + """Parse an single (non recursive) expression in a .status file.""" |
| + |
| + if scan.Current() == 'true': |
| scan.Advance() |
| return Constant(True) |
| - elif scan.Current() == "false": |
| + elif scan.Current() == 'false': |
| scan.Advance() |
| return Constant(False) |
| elif IsAlpha(scan.Current()): |
| @@ -982,11 +900,11 @@ def ParseAtomicExpression(scan): |
| return None |
| -BINARIES = ['=='] |
| def ParseOperatorExpression(scan): |
| + """Parse an expression that has operators.""" |
| left = ParseAtomicExpression(scan) |
| if not left: return None |
| - while scan.HasMore() and (scan.Current() in BINARIES): |
| + while scan.HasMore() and (scan.Current() in ['==']): |
| op = scan.Current() |
| scan.Advance() |
| right = ParseOperatorExpression(scan) |
| @@ -1004,15 +922,15 @@ def ParseConditionalExpression(scan): |
| right = ParseOperatorExpression(scan) |
| if not right: |
| return None |
| - left= Operation(left, 'if', right) |
| + left = Operation(left, 'if', right) |
| return left |
| -LOGICALS = ["&&", "||", ","] |
| def ParseLogicalExpression(scan): |
| + """Parse a binary expression separated by boolean operators.""" |
| left = ParseConditionalExpression(scan) |
| if not left: return None |
| - while scan.HasMore() and (scan.Current() in LOGICALS): |
| + while scan.HasMore() and (scan.Current() in ['&&', '||', ',']): |
| op = scan.Current() |
| scan.Advance() |
| right = ParseConditionalExpression(scan) |
| @@ -1023,18 +941,18 @@ def ParseLogicalExpression(scan): |
| def ParseCondition(expr): |
| - """Parses a logical expression into an Expression object""" |
| + """Parses a boolean expression into an Expression object.""" |
| tokens = Tokenizer(expr).Tokenize() |
| if not tokens: |
| - print "Malformed expression: '%s'" % expr |
| + print 'Malformed expression: "%s"' % expr |
| return None |
| scan = Scanner(tokens) |
| ast = ParseLogicalExpression(scan) |
| if not ast: |
| - print "Malformed expression: '%s'" % expr |
| + print 'Malformed expression: "%s"' % expr |
| return None |
| if scan.HasMore(): |
| - print "Malformed expression: '%s'" % expr |
| + print 'Malformed expression: "%s"' % expr |
| return None |
| return ast |
| @@ -1047,20 +965,34 @@ class ClassifiedTest(object): |
| class Configuration(object): |
| - """The parsed contents of a configuration file""" |
| + """The parsed contents of a configuration file.""" |
| def __init__(self, sections, defs): |
| self.sections = sections |
| self.defs = defs |
| def ClassifyTests(self, cases, env): |
| - sections = [s for s in self.sections if s.condition.Evaluate(env, self.defs)] |
| + """Matches a test case with the test prefixes requested on the cmdline? |
|
ahe
2011/10/11 07:55:34
? -> .
zundel
2011/10/11 22:45:44
Done.
|
| + |
| + This 'wraps' each TestCase object with some meta information |
| + about the test. |
| + |
| + Args: |
| + cases: list of TestCase objects to classify. |
| + env: dictionary containing values for 'mode': mode, |
| + 'system' and 'arch'. |
| + |
| + Returns: |
| + A triplet of (result, rules, expected_outcomes). |
| + """ |
| + sections = [s for s in self.sections |
| + if s.condition.Evaluate(env, self.defs)] |
| all_rules = reduce(list.__add__, [s.rules for s in sections], []) |
| unused_rules = set(all_rules) |
| - result = [ ] |
| + result = [] |
| all_outcomes = set([]) |
| for case in cases: |
| - matches = [ r for r in all_rules if r.Contains(case.path) ] |
| + matches = [r for r in all_rules if r.Contains(case.path)] |
| outcomes = set([]) |
| for rule in matches: |
| outcomes = outcomes.union(rule.GetOutcomes(env, self.defs)) |
| @@ -1074,20 +1006,22 @@ class Configuration(object): |
| class Section(object): |
| - """A section of the configuration file. Sections are enabled or |
| - disabled prior to running the tests, based on their conditions""" |
| + """A section of the configuration file. |
| + |
| + Sections are enabled or disabled prior to running the tests, |
| + based on their conditions. |
| + """ |
| def __init__(self, condition): |
| self.condition = condition |
| - self.rules = [ ] |
| + self.rules = [] |
| def AddRule(self, rule): |
| self.rules.append(rule) |
| class Rule(object): |
| - """A single rule that specifies the expected outcome for a single |
| - test.""" |
| + """A single rule that specifies the expected outcome for a single test.""" |
| def __init__(self, raw_path, path, value): |
| self.raw_path = raw_path |
| @@ -1095,20 +1029,21 @@ class Rule(object): |
| self.value = value |
| def GetOutcomes(self, env, defs): |
| - set = self.value.GetOutcomes(env, defs) |
| - assert isinstance(set, ListSet) |
| - return set.elms |
| + outcomes = self.value.GetOutcomes(env, defs) |
| + assert isinstance(outcomes, ListSet) |
| + return outcomes.elms |
| def Contains(self, path): |
| + """Returns True if the specified path matches this rule (regexp).""" |
| if len(self.path) > len(path): |
| return False |
| for i in xrange(len(self.path)): |
| try: |
| if not self.path[i].match(path[i]): |
| return False |
| - except: |
| - print "Invalid reguar expression %s in .status file. " % '/'.join(path) |
| - print "Try escaping special characters with \\" |
| + except: |
| + print 'Invalid regexp %s in .status file. ' % '/'.join(path) |
| + print 'Try escaping special characters with \\' |
| raise |
| return True |
| @@ -1120,6 +1055,7 @@ PREFIX_PATTERN = re.compile(r'^\s*prefix\s+([\w\_\.\-\/]+)$') |
| def ReadConfigurationInto(path, sections, defs): |
| + """Parses a .status file into specified sections and defs arguments.""" |
| current_section = Section(Constant(True)) |
| sections.append(current_section) |
| prefix = [] |
| @@ -1134,7 +1070,7 @@ def ReadConfigurationInto(path, sections, defs): |
| continue |
| rule_match = RULE_PATTERN.match(line) |
| if rule_match: |
| - path = prefix + SplitPath(rule_match.group(1).strip()) |
| + path = prefix + _SplitPath(rule_match.group(1).strip()) |
| value_str = rule_match.group(2).strip() |
| value = ParseCondition(value_str) |
| if not value: |
| @@ -1151,9 +1087,9 @@ def ReadConfigurationInto(path, sections, defs): |
| continue |
| prefix_match = PREFIX_PATTERN.match(line) |
| if prefix_match: |
| - prefix = SplitPath(prefix_match.group(1).strip()) |
| + prefix = _SplitPath(prefix_match.group(1).strip()) |
| continue |
| - print "Malformed line: '%s'." % line |
| + print 'Malformed line: "%s".' % line |
| return False |
| return True |
| @@ -1164,86 +1100,103 @@ def ReadConfigurationInto(path, sections, defs): |
| def BuildOptions(): |
| + """Confiigures the Python optparse library with the cmdline for test.py.""" |
|
ahe
2011/10/11 07:55:34
Confiigures -> Configures
zundel
2011/10/11 22:45:44
Done.
|
| result = optparse.OptionParser() |
| - result.add_option("-m", "--mode", |
| - help="The test modes in which to run (comma-separated)", |
| + result.add_option( |
| + '-m', '--mode', |
| + help='The test modes in which to run (comma-separated)', |
| metavar='[all,debug,release]', |
| default='debug') |
| - result.add_option("-v", "--verbose", |
| - help="Verbose output", |
| + result.add_option( |
| + '-v', '--verbose', |
| + help='Verbose output', |
| default=False, |
| - action="store_true") |
| - result.add_option("-p", "--progress", |
| - help="The style of progress indicator (verbose, line, color, mono)", |
| + action='store_true') |
| + result.add_option( |
| + '-p', '--progress', |
| + help='The style of progress indicator (verbose, line, color, mono)', |
| choices=PROGRESS_INDICATORS.keys(), |
| default=None) |
| - result.add_option("--report", |
| - help="Print a summary of the tests to be run", |
| + result.add_option( |
| + '--report', |
| + help='Print a summary of the tests to be run', |
| default=False, |
| - action="store_true") |
| - result.add_option("--list", |
| - help="List all the tests, but don't run them", |
| + action='store_true') |
| + result.add_option( |
| + '--list', |
| + help='List all the tests, but don\'t run them', |
| default=False, |
| - action="store_true") |
| - result.add_option("-s", "--suite", |
| - help="A test suite", |
| + action='store_true') |
| + result.add_option( |
| + '-s', '--suite', |
| + help='A test suite', |
| default=[], |
| - action="append") |
| - result.add_option("-t", "--timeout", |
| - help="Timeout in seconds", |
| + action='append') |
| + result.add_option( |
| + '-t', '--timeout', |
| + help='Timeout in seconds', |
| default=None, |
| - type="int") |
| - result.add_option("--checked", |
| - help="Run tests in checked mode", |
| + type='int') |
| + result.add_option( |
| + '--checked', |
| + help='Run tests in checked mode', |
| default=False, |
| - action="store_true") |
| - result.add_option("--flag", |
| - help="Pass this additional flag to the VM", |
| + action='store_true') |
| + result.add_option( |
| + '--flag', |
| + help='Pass this additional flag to the VM', |
| default=[], |
| - action="append") |
| - result.add_option("--arch", |
| - help="The architecture to run tests for", |
| - metavar="[all,ia32,x64,simarm,arm,dartc]", |
| + action='append') |
| + result.add_option( |
| + '--arch', |
| + help='The architecture to run tests for', |
| + metavar='[all,ia32,x64,simarm,arm,dartc]', |
| default=ARCH_GUESS) |
| - result.add_option("--os", |
| - help="The OS to run tests on", |
| + result.add_option( |
| + '--os', |
| + help='The OS to run tests on', |
| default=OS_GUESS) |
| - result.add_option("--valgrind", |
| - help="Run tests through valgrind", |
| + result.add_option( |
| + '--valgrind', |
| + help='Run tests through valgrind', |
| default=False, |
| - action="store_true") |
| - result.add_option("-j", "--tasks", |
| - help="The number of parallel tasks to run", |
| + action='store_true') |
| + result.add_option( |
| + '-j', '--tasks', |
| + help='The number of parallel tasks to run', |
| metavar=testing.HOST_CPUS, |
| default=testing.USE_DEFAULT_CPUS, |
| - type="int") |
| - result.add_option("--time", |
| - help="Print timing information after running", |
| + type='int') |
| + result.add_option( |
| + '--time', |
| + help='Print timing information after running', |
| default=False, |
| - action="store_true") |
| - result.add_option("--executable", |
| - help="The executable with which to run the tests", |
| + action='store_true') |
| + result.add_option( |
| + '--executable', |
| + help='The executable with which to run the tests', |
| default=None) |
| - result.add_option("--keep_temporary_files", |
| - help="Do not delete temporary files after running the tests", |
| + result.add_option( |
| + '--keep_temporary_files', |
| + help='Do not delete temporary files after running the tests', |
| default=False, |
| - action="store_true") |
| - result.add_option("--batch", |
| - help="Run multiple tests for dartc architecture in a single vm", |
| - choices=["true","false"], |
| - default="true", |
| - type="choice"); |
| - result.add_option("--optimize", |
| - help="Invoke dart compiler with --optimize flag", |
| + action='store_true') |
| + result.add_option( |
| + '--batch', |
| + help='Run multiple tests for dartc architecture in a single vm', |
| + choices=['true', 'false'], |
| + default='true', |
| + type='choice') |
| + result.add_option( |
| + '--optimize', |
| + help='Invoke dart compiler with --optimize flag', |
| default=False, |
| - action="store_true") |
| - |
| + action='store_true') |
| return result |
| def ProcessOptions(options): |
| - global VERBOSE |
| - VERBOSE = options.verbose |
| + """Process command line options.""" |
| if options.arch == 'all': |
| options.arch = 'ia32,x64,simarm' |
| if options.mode == 'all': |
| @@ -1252,22 +1205,24 @@ def ProcessOptions(options): |
| # a simulated architecture and in debug mode. |
| if not options.timeout: |
| options.timeout = TIMEOUT_SECS |
| - if 'dartc' in options.arch: options.timeout *= 4 |
| - elif 'chromium' in options.arch: options.timeout *= 4 |
| - elif 'dartium' in options.arch: options.timeout *= 4 |
| - elif 'debug' in options.mode: options.timeout *= 2 |
| - # TODO(zundel): is arch 'sim' out of date? |
| - if 'sim' in options.arch: options.timeout *= 4 |
| + if 'dartc' in options.arch: |
| + options.timeout *= 4 |
| + elif 'chromium' in options.arch: |
| + options.timeout *= 4 |
| + elif 'dartium' in options.arch: |
| + options.timeout *= 4 |
| + elif 'debug' in options.mode: |
| + options.timeout *= 2 |
| options.mode = options.mode.split(',') |
| options.arch = options.arch.split(',') |
| for mode in options.mode: |
| if not mode in ['debug', 'release']: |
| - print "Unknown mode %s" % mode |
| + print 'Unknown mode %s' % mode |
| return False |
| for arch in options.arch: |
| if not arch in ['ia32', 'x64', 'simarm', 'arm', 'dartc', 'dartium', |
| 'chromium']: |
| - print "Unknown arch %s" % arch |
| + print 'Unknown arch %s' % arch |
| return False |
| options.flags = [] |
| if (arch == 'dartc' or arch == 'chromium') and mode == 'release': |
| @@ -1281,7 +1236,7 @@ def ProcessOptions(options): |
| for flag in options.flag: |
| options.flags.append(flag) |
| if options.verbose: |
| - print "Flags on the command line:" |
| + print 'Flags on the command line:' |
| for x in options.flags: |
| print x |
| # If the user hasn't specified the progress indicator, we pick |
| @@ -1306,91 +1261,109 @@ Total: %(total)i tests |
| * %(batched)4d tests are running in batch mode\ |
| """ |
| + |
| def PrintReport(cases): |
| - """Print a breakdown of which tests are marked pass/skip/fail """ |
| + """Print a breakdown of which tests are marked pass/skip/fail.""" |
| + |
| def IsFlaky(o): |
| return ((testing.PASS in o) and (testing.FAIL in o) |
| and (not testing.CRASH in o) and (not testing.OKAY in o)) |
| + |
| def IsFailOk(o): |
| return (len(o) == 2) and (testing.FAIL in o) and (testing.OKAY in o) |
|
ahe
2011/10/11 07:55:34
Add newline.
zundel
2011/10/11 22:45:44
Done.
|
| unskipped = [c for c in cases if not testing.SKIP in c.outcomes] |
| print REPORT_TEMPLATE % { |
| - 'total': len(cases), |
| - 'skipped': len(cases) - len(unskipped), |
| - 'nocrash': len([t for t in unskipped if IsFlaky(t.outcomes)]), |
| - 'pass': len([t for t in unskipped if list(t.outcomes) == [testing.PASS]]), |
| - 'fail_ok': len([t for t in unskipped if IsFailOk(t.outcomes)]), |
| - 'fail': len([t for t in unskipped if list(t.outcomes) == [testing.FAIL]]), |
| - 'crash': len([t for t in unskipped if list(t.outcomes) == [testing.CRASH]]), |
| - 'batched' : len([t for t in unskipped if t.case.IsBatchable()]) |
| + 'total': len(cases), |
| + 'skipped': len(cases) - len(unskipped), |
| + 'nocrash': len([t for t in unskipped if IsFlaky(t.outcomes)]), |
| + 'pass': len([t for t in unskipped |
| + if list(t.outcomes) == [testing.PASS]]), |
| + 'fail_ok': len([t for t in unskipped |
| + if IsFailOk(t.outcomes)]), |
| + 'fail': len([t for t in unskipped |
| + if list(t.outcomes) == [testing.FAIL]]), |
| + 'crash': len([t for t in unskipped |
| + if list(t.outcomes) == [testing.CRASH]]), |
| + 'batched': len([t for t in unskipped if t.case.IsBatchable()]) |
| } |
| def PrintTests(cases): |
| + """Print a table of the tests to be run (--list cmdline option).""" |
| has_errors = False |
| for case in cases: |
| try: |
| case.case.GetCommand() |
| except: |
| + # Python can throw an exception while parsing the .dart file. |
| + # We don't want to end the program. |
|
ahe
2011/10/11 07:55:34
We should probably improve this. It is a bit of a
zundel
2011/10/11 22:45:44
Added TODO
|
| sys.stderr.write(case.case.filename + '\n') |
| has_errors = True |
| if has_errors: |
| raise Exception('Errors in above files') |
| for case in [c for c in cases if not testing.SKIP in c.outcomes]: |
| - print "%s\t%s\t%s\t%s" %('/'.join(case.case.path), |
| + print '%s\t%s\t%s\t%s' %('/'.join(case.case.path), |
| ','.join(case.outcomes), |
| case.case.IsNegative(), |
| '\t'.join(case.case.GetCommand()[1:])) |
| class Pattern(object): |
| + """Convenience class to hold a compiled re pattern.""" |
| def __init__(self, pattern): |
| self.pattern = pattern |
| self.compiled = None |
| - def match(self, str): |
| + def match(self, buf): |
| if not self.compiled: |
| - pattern = "^" + self.pattern.replace('*', '.*') + "$" |
| + pattern = '^' + self.pattern.replace('*', '.*') + '$' |
|
ahe
2011/10/11 07:55:34
I think the style guide prefer:
'^%s$' % self.patt
zundel
2011/10/11 22:45:44
Done.
|
| self.compiled = re.compile(pattern) |
| - return self.compiled.match(str) |
| + return self.compiled.match(buf) |
| def __str__(self): |
| return self.pattern |
| -def SplitPath(s): |
| - stripped = [ c.strip() for c in s.split('/') ] |
| - return [ Pattern(s) for s in stripped if len(s) > 0 ] |
| +def _SplitPath(s): |
| + """Split a path into directories - opposite of os.path.join()?""" |
| + stripped = [c.strip() for c in s.split('/')] |
| + return [Pattern(s) for s in stripped if s] |
| def GetSpecialCommandProcessor(value): |
| if (not value) or (value.find('@') == -1): |
| + |
| def ExpandCommand(args): |
| return args |
| + |
| return ExpandCommand |
| else: |
| pos = value.find('@') |
| - import urllib |
| prefix = urllib.unquote(value[:pos]).split() |
| suffix = urllib.unquote(value[pos+1:]).split() |
| + |
| def ExpandCommand(args): |
| return prefix + args + suffix |
| + |
| return ExpandCommand |
| def GetSuites(test_root): |
| def IsSuite(path): |
| - return isdir(path) and exists(join(path, 'testcfg.py')) |
| - return [ f for f in os.listdir(test_root) if IsSuite(join(test_root, f)) ] |
| + return os.path.isdir(path) and os.path.exists( |
| + os.path.join(path, 'testcfg.py')) |
| + return [f for f in os.listdir(test_root) if IsSuite( |
| + os.path.join(test_root, f))] |
| def FormatTime(d): |
| millis = round(d * 1000) % 1000 |
| - return time.strftime("%M:%S.", time.gmtime(d)) + ("%03i" % millis) |
| + return time.strftime('%M:%S.', time.gmtime(d)) + ('%03i' % millis) |
| def Main(): |
| + """Main loop.""" |
| utils.ConfigureJava() |
| parser = BuildOptions() |
| (options, args) = parser.parse_args() |
| @@ -1398,32 +1371,33 @@ def Main(): |
| parser.print_help() |
| return 1 |
| - client = abspath(join(dirname(sys.argv[0]), '..')) |
| + client = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..')) |
| repositories = [] |
| for component in os.listdir(client) + ['.']: |
| - test_path = join(client, component, 'tests') |
| - if exists(test_path) and isdir(test_path): |
| + test_path = os.path.join(client, component, 'tests') |
| + if os.path.exists(test_path) and os.path.isdir(test_path): |
| suites = GetSuites(test_path) |
| - repositories += [TestRepository(join(test_path, name)) for name in suites] |
| + repositories += [TestRepository(os.path.join(test_path, name)) |
| + for name in suites] |
| repositories += [TestRepository(a) for a in options.suite] |
| root = LiteralTestSuite(repositories) |
| - if len(args) == 0: |
| - paths = [SplitPath(t) for t in BUILT_IN_TESTS] |
| - else: |
| - paths = [ ] |
| + if args: |
| + paths = [] |
| for arg in args: |
| - path = SplitPath(arg) |
| + path = _SplitPath(arg) |
| paths.append(path) |
| + else: |
| + paths = [_SplitPath(t) for t in BUILT_IN_TESTS] |
| # Check for --valgrind option. If enabled, we overwrite the special |
| # command flag with a command that uses the tools/valgrind.py script. |
| if options.valgrind: |
| - run_valgrind = join(client, 'runtime', 'tools', 'valgrind.py') |
| - options.special_command = "python -u " + run_valgrind + " @" |
| + run_valgrind = os.path.join(client, 'runtime', 'tools', 'valgrind.py') |
| + options.special_command = 'python -u ' + run_valgrind + ' @' |
| context = Context(client, |
| - VERBOSE, |
| + options.verbose, |
| options.os, |
| options.timeout, |
| GetSpecialCommandProcessor(options.special_command), |
| @@ -1434,31 +1408,31 @@ def Main(): |
| options.batch) |
| # Get status for tests |
| - sections = [ ] |
| - defs = { } |
| + sections = [] |
| + defs = {} |
| root.GetTestStatus(context, sections, defs) |
| config = Configuration(sections, defs) |
| # List the tests |
| - all_cases = [ ] |
| - all_unused = [ ] |
| - unclassified_tests = [ ] |
| + all_cases = [] |
| + all_unused = [] |
| globally_unused_rules = None |
| for path in paths: |
| for mode in options.mode: |
| for arch in options.arch: |
| env = { |
| - 'mode': mode, |
| - 'system': utils.GuessOS(), |
| - 'arch': arch, |
| + 'mode': mode, |
| + 'system': utils.GuessOS(), |
| + 'arch': arch, |
| } |
| test_list = root.ListTests([], path, context, mode, arch) |
| - unclassified_tests += test_list |
| - (cases, unused_rules, all_outcomes) = config.ClassifyTests(test_list, env) |
| + (cases, unused_rules, unused_outcomes) = config.ClassifyTests( |
| + test_list, env) |
| if globally_unused_rules is None: |
| globally_unused_rules = set(unused_rules) |
| else: |
| - globally_unused_rules = globally_unused_rules.intersection(unused_rules) |
| + globally_unused_rules = ( |
| + globally_unused_rules.intersection(unused_rules)) |
| all_cases += cases |
| all_unused.append(unused_rules) |
| @@ -1467,21 +1441,20 @@ def Main(): |
| if options.list: |
| PrintTests(all_cases) |
| - return 0; |
| + return 0 |
| result = None |
| + |
| def DoSkip(case): |
| return testing.SKIP in case.outcomes or testing.SLOW in case.outcomes |
| - cases_to_run = [ c for c in all_cases if not DoSkip(c) ] |
| + |
| + cases_to_run = [c for c in all_cases if not DoSkip(c)] |
| # Creating test cases may generate temporary files. Make sure |
| # Skipped tests clean up these files. |
| for c in all_cases: |
| if DoSkip(c): c.case.Cleanup() |
| - if len(cases_to_run) == 0: |
| - print "No tests to run." |
| - return 0 |
| - else: |
| + if cases_to_run: |
| try: |
| start = time.time() |
| if RunTestCases(cases_to_run, options.progress, options.tasks, |
| @@ -1491,18 +1464,21 @@ def Main(): |
| result = 1 |
| duration = time.time() - start |
| except KeyboardInterrupt: |
| - print "Exiting on KeyboardInterrupt" |
| + print 'Exiting on KeyboardInterrupt' |
| return 1 |
| + else: |
| + print 'No tests to run.' |
| + return 0 |
| if options.time: |
| - print "--- Total time: %s ---" % FormatTime(duration) |
| - timed_tests = [ t.case for t in cases_to_run if not t.case.duration is None ] |
| + print '--- Total time: %s ---' % FormatTime(duration) |
| + timed_tests = [t.case for t in cases_to_run if not t.case.duration is None] |
| timed_tests.sort(lambda a, b: a.CompareTime(b)) |
| index = 1 |
| for entry in timed_tests[:20]: |
| t = FormatTime(entry.duration) |
| - print "%4i (%s) %s" % (index, t, entry.GetLabel()) |
| + print '%4i (%s) %s' % (index, t, entry.GetLabel()) |
| index += 1 |
| return result |