Index: scripts/slave/unittests/expect_tests.py |
diff --git a/scripts/slave/unittests/expect_tests.py b/scripts/slave/unittests/expect_tests.py |
new file mode 100755 |
index 0000000000000000000000000000000000000000..5ae71f237e6e15fa5e3fbf386d73a7c1dcd15484 |
--- /dev/null |
+++ b/scripts/slave/unittests/expect_tests.py |
@@ -0,0 +1,785 @@ |
+#!/usr/bin/python |
+# Copyright 2014 The Chromium Authors. All rights reserved. |
+# Use of this source code is governed by a BSD-style license that can be |
+# found in the LICENSE file. |
+ |
+ |
+import Queue |
+import argparse |
+import collections |
+import contextlib |
+import difflib |
+import glob |
+import json |
+import multiprocessing |
+import os |
+import pdb |
+import pprint |
+import re |
+import signal |
+import sys |
+import time |
+ |
+from collections import namedtuple |
+ |
+from cStringIO import StringIO |
+ |
+import test_env # pylint: disable=unused-import |
+ |
+import coverage |
+ |
+try: |
+ import yaml |
+except ImportError: |
+ yaml = None |
+ |
+# Private types (must be module-level to be pickleable) |
+WriteAction = namedtuple('WriteAction', 'test why') |
+NoAction = namedtuple('NoAction', 'test') |
+DirSeen = namedtuple('DirSeen', 'dir') |
+Missing = namedtuple('Missing', 'test') |
+Fail = namedtuple('Fail', 'test diff') |
+Pass = namedtuple('Pass', 'test') |
+ |
+ |
+UnknownError = namedtuple('UnknownError', 'message') |
+TestError = namedtuple('TestError', 'test message') |
+_NonExistant = object() |
+ |
+# Serialization |
+SUPPORTED_SERIALIZERS = {'json', 'yaml'} |
+SERIALIZERS = {} |
+ |
+def re_encode(obj): |
+ if isinstance(obj, dict): |
+ return {re_encode(k): re_encode(v) for k, v in obj.iteritems()} |
+ elif isinstance(obj, list): |
+ return [re_encode(i) for i in obj] |
+ elif isinstance(obj, unicode): |
+ return obj.encode('utf-8') |
+ else: |
+ return obj |
+ |
+SERIALIZERS['json'] = ( |
+ lambda s: re_encode(json.load(s)), |
+ lambda data, stream: json.dump( |
+ data, stream, sort_keys=True, indent=2, separators=(',', ': '))) |
+ |
+if yaml: |
+ _YAMLSafeLoader = getattr(yaml, 'CSafeLoader', yaml.SafeLoader) |
+ _YAMLSafeDumper = getattr(yaml, 'CSafeDumper', yaml.SafeDumper) |
+ SERIALIZERS['yaml'] = ( |
+ lambda stream: yaml.load(stream, _YAMLSafeLoader), |
+ lambda data, stream: yaml.dump( |
+ data, stream, _YAMLSafeDumper, default_flow_style=False, |
+ encoding='utf-8')) |
+ |
+ |
+# Result Queue Handlers |
+class _Handler(object): |
+ """Handler object. |
+ |
+ Defines 3 nested classes for each stage of the test pipeline. The pipeline |
+ looks like: |
+ |
+ -> -> |
+ -> jobs -> (main) |
+ GenStage -> test_queue -> * -> result_queue -> ResultStage |
+ -> RunStage -> |
+ -> -> |
+ |
+ Each process will have an instance of one of the nested handler classes, which |
+ will be called on each test / result. |
+ |
+ You can skip the RunStage phase by setting SKIP_RUNLOOP to True on your |
+ implementation class. |
+ |
+ Tips: |
+ * Only do printing in ResultStage, since it's running on the main process. |
+ """ |
+ SKIP_RUNLOOP = False |
Vadim Sh.
2014/04/01 18:27:50
SKIP_RUNSTAGE?
|
+ |
+ @staticmethod |
+ def add_options(parser): |
+ """ |
+ @type parser: argparse.ArgumentParser() |
+ """ |
+ pass |
+ |
+ class GenStage(object): |
Vadim Sh.
2014/04/01 18:27:50
It occurred to me after sending the previous revie
|
+ def __init__(self, opts): |
+ self.opts = opts |
+ |
+ def __call__(self, test, put_result): |
+ """Called for each |test| generated which matches the test_globs. |
+ |
+ @param test: The generated Test object. |
+ @type test: Test() |
+ @param put_result: Call with any object to pass it to the ResultStage. |
+ @return: True iff the RunStage should run |test| |
+ @rtype: bool |
+ """ |
+ return True |
+ |
+ class RunStage(object): |
+ def __init__(self, opts): |
+ self.opts = opts |
+ |
+ def __call__(self, test, result, put_result): |
+ """Called for each |test| which ran and generated |result|. |
+ |
+ @param test: The generated Test object which was run. |
+ @type test: Test() |
+ @param result: The result of running |test| |
+ @type result: Result() |
+ @param put_result: Call this with any object to pass it to the ResultStage |
+ phase. |
+ """ |
+ put_result(result) |
+ |
+ class ResultStage(object): |
+ def __init__(self, opts): |
+ self.opts = opts |
+ |
+ def __call__(self, obj): |
+ """Called for each obj result from GenStage or RunStage. |
+ |
+ @type obj: Anything passed to put_result in GenStage or RunStage. |
+ |
+ Works similarly to a SAX XML parser by dispatching to |
+ self.handle_{type(obj).__name__} |
+ |
+ So if |obj| is a Test, this would call self.handle_Test(obj). |
+ |
+ If you implement handle(obj), then it will be called for any |
+ otherwise unhandled obj. |
+ |
+ @return: False iff the program should ultimately terminate with an error |
+ code. Note that None does NOT count as an error condition. |
+ """ |
+ return getattr(self, 'handle_%s' % type(obj).__name__, self.handle)(obj) |
+ |
+ def handle(self, obj): |
+ if self.opts.verbose: |
+ print 'UNHANDLED:', obj |
+ return False |
+ |
+ def finalize(self, aborted): |
+ """Called after __call__() has been called for all results. |
+ |
+ @param aborted: True if the user aborted the run. |
+ @type aborted: bool |
+ """ |
+ pass |
+ |
+ |
+class _ListHandler(_Handler): |
+ """List all of the tests instead of running them.""" |
+ SKIP_RUNLOOP = True |
+ |
+ class GenStage(_Handler.GenStage): |
+ def __call__(self, test, put_result): |
+ put_result(test) |
+ |
+ class ResultStage(_Handler.ResultStage): |
+ @staticmethod |
+ def handle_Test(test): |
+ print test.name |
+ |
+ # TODO(iannucci): group tests by dir? |
+ # TODO(iannucci): print more data about the test in verbose mode? |
+ |
+ |
+class _DebugHandler(_Handler): |
+ """Execute each test under the pdb debugger.""" |
+ SKIP_RUNLOOP = True |
+ |
+ class GenStage(_Handler.GenStage): |
+ def __call__(self, test, put_result): |
+ put_result(test) |
+ |
+ class ResultStage(_Handler.ResultStage): |
+ @staticmethod |
+ def handle_Test(test): |
+ dbg = pdb.Pdb() |
+ for path, line, funcname in test.breakpoints: |
+ dbg.set_break(path, line, funcname=funcname) |
+ |
+ dbg.reset() |
+ |
+ def dispatch_thunk(*args): |
+ """Allows us to continue until the actual breakpoint.""" |
+ val = dbg.trace_dispatch(*args) |
+ dbg.set_continue() |
+ sys.settrace(dbg.trace_dispatch) |
+ return val |
+ sys.settrace(dispatch_thunk) |
+ try: |
+ test.run() |
+ except pdb.bdb.BdbQuit: |
+ pass |
+ finally: |
+ dbg.quitting = 1 |
+ sys.settrace(None) |
+ |
+ |
+class _TrainHandler(_Handler): |
+ """Write test expectations to disk.""" |
+ @staticmethod |
+ def add_options(parser): |
+ assert isinstance(parser, argparse.ArgumentParser) |
+ parser.add_argument( |
+ '--force', action='store_true', help=( |
+ 'Immediately write expectations to disk instead of determining if ' |
+ 'they contain a diff from the current expectations.' |
+ )) |
+ |
+ class GenStage(_Handler.GenStage): |
+ def __init__(self, *args): |
+ super(_TrainHandler.GenStage, self).__init__(*args) |
+ self.dirs_seen = set() |
+ |
+ def __call__(self, test, put_result): |
+ if test.expect_dir not in self.dirs_seen: |
+ try: |
+ os.makedirs(test.expect_dir) |
+ except OSError: |
+ pass |
+ put_result(DirSeen(test.expect_dir)) |
+ self.dirs_seen.add(test.expect_dir) |
+ return True |
+ |
+ class RunStage(_Handler.RunStage): |
+ def __call__(self, test, result, put_result): |
+ if self.opts.force: |
+ _WriteNewData(test, result.data) |
+ put_result(WriteAction(test, 'forced')) |
+ return |
+ |
+ current, same_schema = _GetCurrentData(test) |
+ diff = _DiffData(current, result.data) |
+ if diff or not same_schema: |
+ _WriteNewData(test, result.data) |
+ if current is _NonExistant: |
+ why = 'missing' |
+ elif diff: |
+ why = 'diff' |
+ else: |
+ why = 'schema changed' |
+ put_result(WriteAction(test, why)) |
+ else: |
+ put_result(NoAction(test)) |
+ |
+ class ResultStage(_Handler.ResultStage): |
+ def __init__(self, opts): |
+ super(_TrainHandler.ResultStage, self).__init__(opts) |
+ self.dirs_seen = set() |
+ self.files_expected = collections.defaultdict(set) |
+ self.start = time.time() |
+ self.num_tests = 0 |
+ |
+ def _record_expected(self, test): |
+ head, tail = os.path.split(test.expect_path()) |
+ self.files_expected[head].add(tail) |
+ |
+ def handle_DirSeen(self, dirseen): |
+ self.dirs_seen.add(dirseen.dir) |
+ |
+ def handle_NoAction(self, result): |
+ self._record_expected(result.test) |
+ if self.opts.verbose: |
+ print '%s did not change' % result.test.name |
+ |
+ def handle_WriteAction(self, result): |
+ self._record_expected(result.test) |
+ if not self.opts.quiet: |
+ test = result.test |
+ name = test.expect_path() if self.opts.verbose else test.name |
+ print 'Wrote %s: %s' % (name, result.why) |
+ |
+ def finalize(self, aborted): |
+ if not aborted and not self.opts.test_glob: |
+ for d in self.dirs_seen: |
+ expected = self.files_expected[d] |
+ for f in os.listdir(d): |
+ if f == 'OWNERS': |
+ continue |
+ if f not in expected: |
+ path = os.path.join(d, f) |
+ os.unlink(path) |
+ if self.opts.verbose: |
+ print 'Removed unexpected file', path |
+ if not self.opts.quiet: |
+ num_tests = sum(len(x) for x in self.files_expected.itervalues()) |
+ print 'Trained %d tests in %0.3fs' % ( |
+ num_tests, time.time() - self.start) |
+ |
+ |
+class _TestHandler(_Handler): |
+ """Run the tests.""" |
+ class RunStage(_Handler.RunStage): |
+ def __call__(self, test, result, put_result): |
+ current, _ = _GetCurrentData(test) |
+ if current is _NonExistant: |
+ put_result(Missing(test)) |
+ else: |
+ diff = _DiffData(current, result.data) |
+ if not diff: |
+ put_result(Pass(test)) |
+ else: |
+ put_result(Fail(test, diff)) |
+ |
+ class ResultStage(_Handler.ResultStage): |
+ def __init__(self, *args): |
+ super(_TestHandler.ResultStage, self).__init__(*args) |
+ self.err_out = StringIO() |
+ self.start = time.time() |
+ self.errors = collections.defaultdict(int) |
+ self.num_tests = 0 |
+ |
+ def emit(self, short, test, verbose): |
+ if self.opts.verbose: |
+ print >> sys.stdout, '%s ... %s' % (test.name if test else '????', |
+ verbose) |
+ else: |
+ sys.stdout.write(short) |
+ sys.stdout.flush() |
+ |
+ def add_result(self, msg_lines, test, header, category): |
+ print >> self.err_out |
+ print >> self.err_out, '=' * 70 |
+ if test is not None: |
+ print >> self.err_out, '%s: %s (%s)' % ( |
+ header, test.name, test.expect_path()) |
+ print >> self.err_out, '-' * 70 |
+ if msg_lines: |
+ print >> self.err_out, '\n'.join(msg_lines) |
+ self.errors[category] += 1 |
+ self.num_tests += 1 |
+ |
+ def handle_Pass(self, p): |
+ if not self.opts.quiet: |
+ self.emit('.', p.test, 'ok') |
+ self.num_tests += 1 |
+ |
+ def handle_Fail(self, fail): |
+ self.emit('F', fail.test, 'FAIL') |
+ self.add_result(fail.diff, fail.test, 'FAIL', 'failures') |
+ return False |
+ |
+ def handle_TestError(self, test_error): |
+ self.emit('E', test_error.test, 'ERROR') |
+ self.add_result([test_error.message], test_error.test, 'ERROR', 'errors') |
+ return False |
+ |
+ def handle_UnknownError(self, error): |
+ self.emit('U', None, 'UNKNOWN ERROR') |
+ self.add_result([error.message], None, 'UNKNOWN ERROR', 'unknown_errors') |
+ return False |
+ |
+ def handle_Missing(self, missing): |
+ self.emit('M', missing.test, 'MISSING') |
+ self.add_result([], missing.test, 'MISSING', 'missing') |
+ return False |
+ |
+ def finalize(self, aborted): |
+ # TODO(iannucci): print summary stats (and timing info?) |
+ buf = self.err_out.getvalue() |
+ if buf: |
+ print buf |
+ if not self.opts.quiet: |
+ print '-' * 70 |
+ print 'Ran %d tests in %0.3fs' % ( |
+ self.num_tests, time.time() - self.start) |
+ if aborted: |
+ print 'ABORTED' |
+ elif self.errors: |
+ print 'FAILED (%s)' % (', '.join('%s=%d' % i |
+ for i in self.errors.iteritems())) |
+ elif not self.opts.quiet: |
+ print 'OK' |
+ |
+ |
+HANDLERS = { |
+ 'list': _ListHandler, |
+ 'debug': _DebugHandler, |
+ 'train': _TrainHandler, |
+ 'test': _TestHandler, |
+} |
+ |
+ |
+# Private engine helpers |
+@contextlib.contextmanager |
+def _cover(opts): |
+ if opts is not None: |
+ c = coverage.coverage(**opts) |
+ c._warn_no_data = False # pylint: disable=protected-access |
+ c.start() |
+ try: |
+ yield |
+ finally: |
+ if opts is not None: |
+ c.stop() |
+ c.save() |
+ |
+ |
+# Private engine implementation |
+def _GetCurrentData(test): |
+ """ |
+ @type test: Test() |
+ @returns: The deserialized data (or _NonExistant), and a boolean indicating |
+ if the current serialized data is in the same format which was |
+ requested by |test|. |
+ @rtype: (dict, bool) |
+ """ |
+ for ext in sorted(SUPPORTED_SERIALIZERS, key=lambda s: s != test.ext): |
+ path = test.expect_path(ext) |
+ if ext not in SERIALIZERS: |
+ raise Exception('The package to support %s is not installed.' % ext) |
+ if os.path.exists(path): |
+ with open(path, 'rb') as f: |
+ data = SERIALIZERS[ext][0](f) |
+ return data, ext == test.ext |
+ return _NonExistant, True |
+ |
+ |
+def _WriteNewData(test, data): |
+ """ |
+ @type test: Test() |
+ """ |
+ if test.ext not in SUPPORTED_SERIALIZERS: |
+ raise Exception('%s is not a supported serializer.' % test.ext) |
+ if test.ext not in SERIALIZERS: |
+ raise Exception('The package to support %s is not installed.' % test.ext) |
+ with open(test.expect_path(), 'wb') as f: |
+ SERIALIZERS[test.ext][1](data, f) |
+ |
+ |
+def _DiffData(old, new): |
+ """ |
+ Takes old data and new data, then returns a textual diff as a list of lines. |
+ @type old: dict |
+ @type new: dict |
+ @rtype: [str] |
+ """ |
+ if old is _NonExistant: |
+ return new |
+ if old == new: |
+ return [] |
+ else: |
+ return list(difflib.context_diff( |
+ pprint.pformat(old).splitlines(), |
+ pprint.pformat(new).splitlines(), |
+ fromfile='expected', tofile='current', |
+ n=4, lineterm='' |
+ )) |
+ |
+ |
+def _GenLoopProcess(gen, test_queue, result_queue, num_procs, kill_switch, |
+ match_globs, cover_ctx, handler): |
+ """ |
+ Generate `Test`'s from |gen|, and feed them into |test_queue|. |
+ |
+ Non-Test instances will be translated into `UnknownError` objects. |
+ |
+ On completion, feed |num_procs| None objects into |test_queue|. |
+ |
+ @param gen: generator yielding Test() instances. |
+ @type test_queue: multiprocessing.Queue() |
+ @type result_queue: multiprocessing.Queue() |
+ @type num_procs: int |
+ @type kill_switch: multiprocessing.Event() |
+ @type match_globs: [str] |
+ @type cover_ctx: dict |
+ @type handler: _Handler.GenStage() |
+ """ |
+ try: |
+ matcher = re.compile( |
+ '^%s$' % '|'.join('(?:%s)' % glob.fnmatch.translate(g) |
+ for g in match_globs if g[0] != '-')) |
+ if matcher.pattern == '^$': |
+ matcher = re.compile('^.*$') |
+ |
+ neg_matcher = re.compile( |
+ '^%s$' % '|'.join('(?:%s)' % glob.fnmatch.translate(g[1:]) |
+ for g in match_globs if g[0] == '-')) |
+ |
+ with cover_ctx: |
+ for test in gen(): |
+ if kill_switch.is_set(): |
+ break |
+ |
+ if not isinstance(test, Test): |
+ result_queue.put_nowait( |
+ UnknownError('Got non-Test isinstance from generator: %r' % test)) |
+ continue |
+ |
+ if not neg_matcher.match(test.name) and matcher.match(test.name): |
+ if handler(test, result_queue.put_nowait): |
+ test_queue.put_nowait(test) |
+ |
+ except KeyboardInterrupt: |
+ pass |
+ finally: |
+ for _ in xrange(num_procs): |
+ test_queue.put_nowait(None) |
+ |
+ |
+def _RunLoopProcess(test_queue, result_queue, kill_switch, cover_ctx, |
+ handler): |
+ """ |
+ Consume `Test` instances from |test_queue|, run them, and push the `Result`s |
+ into |result_queue|. |
+ |
+ Generates coverage data as a side-effect. |
+ @type test_queue: multiprocessing.Queue() |
+ @type result_queue: multiprocessing.Queue() |
+ @type kill_switch: multiprocessing.Event() |
+ @type handler: _Handler.RunStage() |
+ """ |
+ try: |
+ with cover_ctx: |
+ while not kill_switch.is_set(): |
+ try: |
+ test = test_queue.get(timeout=0.1) |
+ if test is None: |
+ break |
+ except Queue.Empty: |
+ continue |
+ |
+ try: |
+ rslt = test.run() |
+ if not isinstance(rslt, Result): |
+ result_queue.put_nowait( |
+ TestError(test, 'Got non-Result instance from test: %r' % rslt)) |
+ continue |
+ |
+ handler(test, rslt, result_queue.put_nowait) |
+ except Exception as e: |
+ # TODO(iannucci): include stacktrace |
+ result_queue.put_nowait(TestError(test, str(e))) |
+ except KeyboardInterrupt: |
+ pass |
+ |
+ |
+# Private CLI implementation |
+def parse_args(args): |
+ args = args or sys.argv[1:] |
+ |
+ # Set the default mode if not specified and not passing --help |
+ search_names = set(HANDLERS.keys() + ['-h', '--help']) |
+ if not any(arg in search_names for arg in args): |
+ args.insert(0, 'test') |
+ |
+ parser = argparse.ArgumentParser() |
+ subparsers = parser.add_subparsers( |
+ title='Mode (default "test")', dest='mode', |
+ help='See `[mode] --help` for more options.') |
+ |
+ for k, h in HANDLERS.iteritems(): |
+ sp = subparsers.add_parser(k, help=h.__doc__.lower()) |
+ h.add_options(sp) |
+ |
+ mg = sp.add_mutually_exclusive_group() |
+ mg.add_argument( |
+ '--quiet', action='store_true', |
+ help='be quiet (only print failures)') |
+ mg.add_argument( |
+ '--verbose', action='store_true', help='be verbose') |
+ |
+ if not h.SKIP_RUNLOOP: |
+ sp.add_argument( |
+ '--jobs', metavar='N', type=int, |
+ default=multiprocessing.cpu_count(), |
+ help='run N jobs in parallel (default %(default)s)') |
+ |
+ sp.add_argument( |
+ '--test_list', metavar='FILE', |
+ help='take the list of test globs from the FILE (use "-" for stdin)') |
+ |
+ sp.add_argument( |
+ 'test_glob', nargs='*', help=( |
+ 'glob to filter the tests acted on. If the glob begins with "-" ' |
+ 'then it acts as a negation glob and anything which matches it ' |
+ 'will be skipped.')) |
+ |
+ opts = parser.parse_args(args) |
+ |
+ if not hasattr(opts, 'jobs'): |
+ opts.jobs = 0 |
+ elif opts.jobs < 1: |
+ parser.error('--jobs was less than 1') |
+ |
+ if opts.test_list: |
+ fh = sys.stdin if opts.test_list == '-' else open(opts.test_list, 'rb') |
+ with fh as tl: |
+ opts.test_glob += [l.strip() for l in tl.readlines()] |
+ |
+ test_globs = opts.test_glob |
+ handler = HANDLERS[opts.mode] |
+ |
+ del opts.test_list |
+ del opts.mode |
+ |
+ return opts, handler, test_globs |
+ |
+ |
+# Public |
+Result = namedtuple('Result', 'data') |
+ |
+ |
+_Test = namedtuple( |
+ 'Test', 'name func args kwargs expect_dir expect_base ext breakpoints') |
+ |
+class Test(_Test): |
+ def __new__(cls, name, func, args=(), kwargs=None, expect_dir=None, |
+ expect_base=None, ext='json', breakpoints=None, break_funcs=()): |
+ """Create a new test. |
+ |
+ @param name: The name of the test. Will be used as the default expect_base |
+ |
+ @param func: The function to execute to run this test. Must be pickleable. |
+ @param args: *args for |func| |
+ @param kwargs: **kwargs for |func| |
+ |
+ @param expect_dir: The directory which holds the expectation file for this |
+ Test. |
+ @param expect_base: The basename (without extension) of the expectation |
+ file. Defaults to |name|. |
+ @param ext: The extension of the expectation file. Affects the serializer |
+ used to write the expectations to disk. Valid values are |
+ 'json' and 'yaml' (Keys in SERIALIZERS). |
+ |
+ @param breakpoints: A list of (path, lineno, func_name) tuples. These will |
+ turn into breakpoints when the tests are run in 'debug' |
+ mode. See |break_funcs| for an easier way to set this. |
+ @param break_funcs: A list of functions for which to set breakpoints. |
+ """ |
+ kwargs = kwargs or {} |
+ |
+ breakpoints = breakpoints or [] |
+ if not breakpoints or break_funcs: |
+ for f in (break_funcs or (func,)): |
+ if hasattr(f, 'im_func'): |
+ f = f.im_func |
+ breakpoints.append((f.func_code.co_filename, |
+ f.func_code.co_firstlineno, |
+ f.func_code.co_name)) |
+ |
+ return super(Test, cls).__new__(cls, name, func, args, kwargs, expect_dir, |
+ expect_base, ext, breakpoints) |
+ |
+ def expect_path(self, ext=None): |
+ name = self.expect_base or self.name |
+ name = ''.join('_' if c in '<>:"\\/|?*\0' else c for c in name) |
+ return os.path.join(self.expect_dir, name + ('.%s' % (ext or self.ext))) |
+ |
+ def run(self): |
+ return self.func(*self.args, **self.kwargs) |
+ |
+ |
+def main(test_gen, coverage_includes=None, coverage_omits=None, args=None): |
+ """Entry point for tests using expect_tests. |
+ |
+ Example: |
+ import expect_tests |
+ |
+ def happy_fn(val): |
+ # Usually you would return data which is the result of some deterministic |
+ # computation. |
+ return expect_tests.Result({'neet': '%s string value' % val}) |
+ |
+ def Gen(): |
+ yield expect_tests.Test('happy', happy_fn, args=('happy',)) |
+ |
+ if __name__ == '__main__': |
+ expect_tests.main() |
+ |
+ @param test_gen: A Generator which yields Test objects. |
+ @param coverage_includes: A list of path globs to include under coverage. |
+ @param coverage_omits: A list of path globs to exclude under coverage. |
+ @param args: Commandline args (starting at argv[1]) |
+ """ |
+ opts, handler, test_globs = parse_args(args) |
+ result_handler = handler.ResultStage(opts) |
+ |
+ kill_switch = multiprocessing.Event() |
+ signal.signal(signal.SIGINT, lambda *_: kill_switch.set()) |
+ signal.signal(signal.SIGTERM, lambda *_: kill_switch.set()) |
+ |
+ if handler.SKIP_RUNLOOP: |
+ coverage_opts = None |
+ else: |
+ coverage_opts = { |
+ 'include': coverage_includes, |
+ 'omit': coverage_omits, |
+ 'data_suffix': True |
+ } |
+ c = coverage.coverage(**coverage_opts) |
+ c.erase() |
+ cover_ctx = _cover(coverage_opts) |
+ |
+ test_queue = multiprocessing.Queue() |
+ result_queue = multiprocessing.Queue() |
+ |
+ test_gen_args = ( |
+ test_gen, test_queue, result_queue, opts.jobs, kill_switch, |
+ test_globs, cover_ctx, handler.GenStage(opts)) |
+ |
+ procs = [] |
+ if handler.SKIP_RUNLOOP: |
+ _GenLoopProcess(*test_gen_args) |
+ else: |
+ procs = [multiprocessing.Process( |
+ target=_GenLoopProcess, args=test_gen_args)] |
+ |
+ procs += [ |
+ multiprocessing.Process( |
+ target=_RunLoopProcess, args=( |
+ test_queue, result_queue, kill_switch, cover_ctx, |
+ handler.RunStage(opts))) |
+ for _ in xrange(opts.jobs) |
+ ] |
+ |
+ for p in procs: |
+ p.daemon = True |
+ p.start() |
+ |
+ error = False |
+ while not kill_switch.is_set(): |
+ while not kill_switch.is_set(): |
+ try: |
+ error |= result_handler(result_queue.get(timeout=0.1)) is False |
+ except Queue.Empty: |
+ break |
+ |
+ if not any(p.is_alive() for p in procs): |
+ break |
+ |
+ result_handler.finalize(kill_switch.is_set()) |
+ |
+ assert kill_switch.is_set() or result_queue.empty() |
+ |
+ if not handler.SKIP_RUNLOOP: |
+ c.combine() |
+ if not kill_switch.is_set() and not opts.test_glob: |
+ outf = StringIO() |
+ total_covered = c.report(file=outf) |
+ summary = outf.getvalue().replace('%- 15s' % 'Name', 'Coverage Report', 1) |
+ if opts.verbose: |
+ print summary |
+ elif total_covered != 100.0: |
+ lines = summary.splitlines() |
+ lines[2:-2] = [l for l in lines[2:-2] |
+ if not l.strip().endswith('100%')] |
+ print '\n'.join(lines) |
+ print 'FATAL: Recipes configs are not at 100% coverage.' |
+ sys.exit(2) |
+ |
+ sys.exit(error or kill_switch.is_set()) |