OLD | NEW |
---|---|
(Empty) | |
1 #!/usr/bin/python | |
2 # Copyright 2014 The Chromium Authors. All rights reserved. | |
3 # Use of this source code is governed by a BSD-style license that can be | |
4 # found in the LICENSE file. | |
5 | |
6 | |
7 import Queue | |
8 import argparse | |
9 import collections | |
10 import contextlib | |
11 import difflib | |
12 import glob | |
13 import json | |
14 import multiprocessing | |
15 import os | |
16 import pdb | |
17 import pprint | |
18 import re | |
19 import signal | |
20 import sys | |
21 import time | |
22 | |
23 from collections import namedtuple | |
24 | |
25 from cStringIO import StringIO | |
26 | |
27 import test_env # pylint: disable=unused-import | |
28 | |
29 import coverage | |
30 | |
31 try: | |
32 import yaml | |
33 except ImportError: | |
34 yaml = None | |
35 | |
36 # Private types (must be module-level to be pickleable) | |
37 WriteAction = namedtuple('WriteAction', 'test why') | |
38 NoAction = namedtuple('NoAction', 'test') | |
39 DirSeen = namedtuple('DirSeen', 'dir') | |
40 Missing = namedtuple('Missing', 'test') | |
41 Fail = namedtuple('Fail', 'test diff') | |
42 Pass = namedtuple('Pass', 'test') | |
43 | |
44 | |
45 UnknownError = namedtuple('UnknownError', 'message') | |
46 TestError = namedtuple('TestError', 'test message') | |
47 _NonExistant = object() | |
48 | |
49 # Serialization | |
50 SUPPORTED_SERIALIZERS = {'json', 'yaml'} | |
51 SERIALIZERS = {} | |
52 | |
53 def re_encode(obj): | |
54 if isinstance(obj, dict): | |
55 return {re_encode(k): re_encode(v) for k, v in obj.iteritems()} | |
56 elif isinstance(obj, list): | |
57 return [re_encode(i) for i in obj] | |
58 elif isinstance(obj, unicode): | |
59 return obj.encode('utf-8') | |
60 else: | |
61 return obj | |
62 | |
63 SERIALIZERS['json'] = ( | |
64 lambda s: re_encode(json.load(s)), | |
65 lambda data, stream: json.dump( | |
66 data, stream, sort_keys=True, indent=2, separators=(',', ': '))) | |
67 | |
68 if yaml: | |
69 _YAMLSafeLoader = getattr(yaml, 'CSafeLoader', yaml.SafeLoader) | |
70 _YAMLSafeDumper = getattr(yaml, 'CSafeDumper', yaml.SafeDumper) | |
71 SERIALIZERS['yaml'] = ( | |
72 lambda stream: yaml.load(stream, _YAMLSafeLoader), | |
73 lambda data, stream: yaml.dump( | |
74 data, stream, _YAMLSafeDumper, default_flow_style=False, | |
75 encoding='utf-8')) | |
76 | |
77 | |
78 # Result Queue Handlers | |
79 class _Handler(object): | |
80 """Handler object. | |
81 | |
82 Defines 3 nested classes for each stage of the test pipeline. The pipeline | |
83 looks like: | |
84 | |
85 -> -> | |
86 -> jobs -> (main) | |
87 GenStage -> test_queue -> * -> result_queue -> ResultStage | |
88 -> RunStage -> | |
89 -> -> | |
90 | |
91 Each process will have an instance of one of the nested handler classes, which | |
92 will be called on each test / result. | |
93 | |
94 You can skip the RunStage phase by setting SKIP_RUNLOOP to True on your | |
95 implementation class. | |
96 | |
97 Tips: | |
98 * Only do printing in ResultStage, since it's running on the main process. | |
99 """ | |
100 SKIP_RUNLOOP = False | |
Vadim Sh.
2014/04/01 18:27:50
SKIP_RUNSTAGE?
| |
101 | |
102 @staticmethod | |
103 def add_options(parser): | |
104 """ | |
105 @type parser: argparse.ArgumentParser() | |
106 """ | |
107 pass | |
108 | |
109 class GenStage(object): | |
Vadim Sh.
2014/04/01 18:27:50
It occurred to me after sending the previous revie
| |
110 def __init__(self, opts): | |
111 self.opts = opts | |
112 | |
113 def __call__(self, test, put_result): | |
114 """Called for each |test| generated which matches the test_globs. | |
115 | |
116 @param test: The generated Test object. | |
117 @type test: Test() | |
118 @param put_result: Call with any object to pass it to the ResultStage. | |
119 @return: True iff the RunStage should run |test| | |
120 @rtype: bool | |
121 """ | |
122 return True | |
123 | |
124 class RunStage(object): | |
125 def __init__(self, opts): | |
126 self.opts = opts | |
127 | |
128 def __call__(self, test, result, put_result): | |
129 """Called for each |test| which ran and generated |result|. | |
130 | |
131 @param test: The generated Test object which was run. | |
132 @type test: Test() | |
133 @param result: The result of running |test| | |
134 @type result: Result() | |
135 @param put_result: Call this with any object to pass it to the ResultStage | |
136 phase. | |
137 """ | |
138 put_result(result) | |
139 | |
140 class ResultStage(object): | |
141 def __init__(self, opts): | |
142 self.opts = opts | |
143 | |
144 def __call__(self, obj): | |
145 """Called for each obj result from GenStage or RunStage. | |
146 | |
147 @type obj: Anything passed to put_result in GenStage or RunStage. | |
148 | |
149 Works similarly to a SAX XML parser by dispatching to | |
150 self.handle_{type(obj).__name__} | |
151 | |
152 So if |obj| is a Test, this would call self.handle_Test(obj). | |
153 | |
154 If you implement handle(obj), then it will be called for any | |
155 otherwise unhandled obj. | |
156 | |
157 @return: False iff the program should ultimately terminate with an error | |
158 code. Note that None does NOT count as an error condition. | |
159 """ | |
160 return getattr(self, 'handle_%s' % type(obj).__name__, self.handle)(obj) | |
161 | |
162 def handle(self, obj): | |
163 if self.opts.verbose: | |
164 print 'UNHANDLED:', obj | |
165 return False | |
166 | |
167 def finalize(self, aborted): | |
168 """Called after __call__() has been called for all results. | |
169 | |
170 @param aborted: True if the user aborted the run. | |
171 @type aborted: bool | |
172 """ | |
173 pass | |
174 | |
175 | |
176 class _ListHandler(_Handler): | |
177 """List all of the tests instead of running them.""" | |
178 SKIP_RUNLOOP = True | |
179 | |
180 class GenStage(_Handler.GenStage): | |
181 def __call__(self, test, put_result): | |
182 put_result(test) | |
183 | |
184 class ResultStage(_Handler.ResultStage): | |
185 @staticmethod | |
186 def handle_Test(test): | |
187 print test.name | |
188 | |
189 # TODO(iannucci): group tests by dir? | |
190 # TODO(iannucci): print more data about the test in verbose mode? | |
191 | |
192 | |
193 class _DebugHandler(_Handler): | |
194 """Execute each test under the pdb debugger.""" | |
195 SKIP_RUNLOOP = True | |
196 | |
197 class GenStage(_Handler.GenStage): | |
198 def __call__(self, test, put_result): | |
199 put_result(test) | |
200 | |
201 class ResultStage(_Handler.ResultStage): | |
202 @staticmethod | |
203 def handle_Test(test): | |
204 dbg = pdb.Pdb() | |
205 for path, line, funcname in test.breakpoints: | |
206 dbg.set_break(path, line, funcname=funcname) | |
207 | |
208 dbg.reset() | |
209 | |
210 def dispatch_thunk(*args): | |
211 """Allows us to continue until the actual breakpoint.""" | |
212 val = dbg.trace_dispatch(*args) | |
213 dbg.set_continue() | |
214 sys.settrace(dbg.trace_dispatch) | |
215 return val | |
216 sys.settrace(dispatch_thunk) | |
217 try: | |
218 test.run() | |
219 except pdb.bdb.BdbQuit: | |
220 pass | |
221 finally: | |
222 dbg.quitting = 1 | |
223 sys.settrace(None) | |
224 | |
225 | |
226 class _TrainHandler(_Handler): | |
227 """Write test expectations to disk.""" | |
228 @staticmethod | |
229 def add_options(parser): | |
230 assert isinstance(parser, argparse.ArgumentParser) | |
231 parser.add_argument( | |
232 '--force', action='store_true', help=( | |
233 'Immediately write expectations to disk instead of determining if ' | |
234 'they contain a diff from the current expectations.' | |
235 )) | |
236 | |
237 class GenStage(_Handler.GenStage): | |
238 def __init__(self, *args): | |
239 super(_TrainHandler.GenStage, self).__init__(*args) | |
240 self.dirs_seen = set() | |
241 | |
242 def __call__(self, test, put_result): | |
243 if test.expect_dir not in self.dirs_seen: | |
244 try: | |
245 os.makedirs(test.expect_dir) | |
246 except OSError: | |
247 pass | |
248 put_result(DirSeen(test.expect_dir)) | |
249 self.dirs_seen.add(test.expect_dir) | |
250 return True | |
251 | |
252 class RunStage(_Handler.RunStage): | |
253 def __call__(self, test, result, put_result): | |
254 if self.opts.force: | |
255 _WriteNewData(test, result.data) | |
256 put_result(WriteAction(test, 'forced')) | |
257 return | |
258 | |
259 current, same_schema = _GetCurrentData(test) | |
260 diff = _DiffData(current, result.data) | |
261 if diff or not same_schema: | |
262 _WriteNewData(test, result.data) | |
263 if current is _NonExistant: | |
264 why = 'missing' | |
265 elif diff: | |
266 why = 'diff' | |
267 else: | |
268 why = 'schema changed' | |
269 put_result(WriteAction(test, why)) | |
270 else: | |
271 put_result(NoAction(test)) | |
272 | |
273 class ResultStage(_Handler.ResultStage): | |
274 def __init__(self, opts): | |
275 super(_TrainHandler.ResultStage, self).__init__(opts) | |
276 self.dirs_seen = set() | |
277 self.files_expected = collections.defaultdict(set) | |
278 self.start = time.time() | |
279 self.num_tests = 0 | |
280 | |
281 def _record_expected(self, test): | |
282 head, tail = os.path.split(test.expect_path()) | |
283 self.files_expected[head].add(tail) | |
284 | |
285 def handle_DirSeen(self, dirseen): | |
286 self.dirs_seen.add(dirseen.dir) | |
287 | |
288 def handle_NoAction(self, result): | |
289 self._record_expected(result.test) | |
290 if self.opts.verbose: | |
291 print '%s did not change' % result.test.name | |
292 | |
293 def handle_WriteAction(self, result): | |
294 self._record_expected(result.test) | |
295 if not self.opts.quiet: | |
296 test = result.test | |
297 name = test.expect_path() if self.opts.verbose else test.name | |
298 print 'Wrote %s: %s' % (name, result.why) | |
299 | |
300 def finalize(self, aborted): | |
301 if not aborted and not self.opts.test_glob: | |
302 for d in self.dirs_seen: | |
303 expected = self.files_expected[d] | |
304 for f in os.listdir(d): | |
305 if f == 'OWNERS': | |
306 continue | |
307 if f not in expected: | |
308 path = os.path.join(d, f) | |
309 os.unlink(path) | |
310 if self.opts.verbose: | |
311 print 'Removed unexpected file', path | |
312 if not self.opts.quiet: | |
313 num_tests = sum(len(x) for x in self.files_expected.itervalues()) | |
314 print 'Trained %d tests in %0.3fs' % ( | |
315 num_tests, time.time() - self.start) | |
316 | |
317 | |
318 class _TestHandler(_Handler): | |
319 """Run the tests.""" | |
320 class RunStage(_Handler.RunStage): | |
321 def __call__(self, test, result, put_result): | |
322 current, _ = _GetCurrentData(test) | |
323 if current is _NonExistant: | |
324 put_result(Missing(test)) | |
325 else: | |
326 diff = _DiffData(current, result.data) | |
327 if not diff: | |
328 put_result(Pass(test)) | |
329 else: | |
330 put_result(Fail(test, diff)) | |
331 | |
332 class ResultStage(_Handler.ResultStage): | |
333 def __init__(self, *args): | |
334 super(_TestHandler.ResultStage, self).__init__(*args) | |
335 self.err_out = StringIO() | |
336 self.start = time.time() | |
337 self.errors = collections.defaultdict(int) | |
338 self.num_tests = 0 | |
339 | |
340 def emit(self, short, test, verbose): | |
341 if self.opts.verbose: | |
342 print >> sys.stdout, '%s ... %s' % (test.name if test else '????', | |
343 verbose) | |
344 else: | |
345 sys.stdout.write(short) | |
346 sys.stdout.flush() | |
347 | |
348 def add_result(self, msg_lines, test, header, category): | |
349 print >> self.err_out | |
350 print >> self.err_out, '=' * 70 | |
351 if test is not None: | |
352 print >> self.err_out, '%s: %s (%s)' % ( | |
353 header, test.name, test.expect_path()) | |
354 print >> self.err_out, '-' * 70 | |
355 if msg_lines: | |
356 print >> self.err_out, '\n'.join(msg_lines) | |
357 self.errors[category] += 1 | |
358 self.num_tests += 1 | |
359 | |
360 def handle_Pass(self, p): | |
361 if not self.opts.quiet: | |
362 self.emit('.', p.test, 'ok') | |
363 self.num_tests += 1 | |
364 | |
365 def handle_Fail(self, fail): | |
366 self.emit('F', fail.test, 'FAIL') | |
367 self.add_result(fail.diff, fail.test, 'FAIL', 'failures') | |
368 return False | |
369 | |
370 def handle_TestError(self, test_error): | |
371 self.emit('E', test_error.test, 'ERROR') | |
372 self.add_result([test_error.message], test_error.test, 'ERROR', 'errors') | |
373 return False | |
374 | |
375 def handle_UnknownError(self, error): | |
376 self.emit('U', None, 'UNKNOWN ERROR') | |
377 self.add_result([error.message], None, 'UNKNOWN ERROR', 'unknown_errors') | |
378 return False | |
379 | |
380 def handle_Missing(self, missing): | |
381 self.emit('M', missing.test, 'MISSING') | |
382 self.add_result([], missing.test, 'MISSING', 'missing') | |
383 return False | |
384 | |
385 def finalize(self, aborted): | |
386 # TODO(iannucci): print summary stats (and timing info?) | |
387 buf = self.err_out.getvalue() | |
388 if buf: | |
389 print | |
390 print buf | |
391 if not self.opts.quiet: | |
392 print | |
393 print '-' * 70 | |
394 print 'Ran %d tests in %0.3fs' % ( | |
395 self.num_tests, time.time() - self.start) | |
396 print | |
397 if aborted: | |
398 print 'ABORTED' | |
399 elif self.errors: | |
400 print 'FAILED (%s)' % (', '.join('%s=%d' % i | |
401 for i in self.errors.iteritems())) | |
402 elif not self.opts.quiet: | |
403 print 'OK' | |
404 | |
405 | |
406 HANDLERS = { | |
407 'list': _ListHandler, | |
408 'debug': _DebugHandler, | |
409 'train': _TrainHandler, | |
410 'test': _TestHandler, | |
411 } | |
412 | |
413 | |
414 # Private engine helpers | |
415 @contextlib.contextmanager | |
416 def _cover(opts): | |
417 if opts is not None: | |
418 c = coverage.coverage(**opts) | |
419 c._warn_no_data = False # pylint: disable=protected-access | |
420 c.start() | |
421 try: | |
422 yield | |
423 finally: | |
424 if opts is not None: | |
425 c.stop() | |
426 c.save() | |
427 | |
428 | |
429 # Private engine implementation | |
430 def _GetCurrentData(test): | |
431 """ | |
432 @type test: Test() | |
433 @returns: The deserialized data (or _NonExistant), and a boolean indicating | |
434 if the current serialized data is in the same format which was | |
435 requested by |test|. | |
436 @rtype: (dict, bool) | |
437 """ | |
438 for ext in sorted(SUPPORTED_SERIALIZERS, key=lambda s: s != test.ext): | |
439 path = test.expect_path(ext) | |
440 if ext not in SERIALIZERS: | |
441 raise Exception('The package to support %s is not installed.' % ext) | |
442 if os.path.exists(path): | |
443 with open(path, 'rb') as f: | |
444 data = SERIALIZERS[ext][0](f) | |
445 return data, ext == test.ext | |
446 return _NonExistant, True | |
447 | |
448 | |
449 def _WriteNewData(test, data): | |
450 """ | |
451 @type test: Test() | |
452 """ | |
453 if test.ext not in SUPPORTED_SERIALIZERS: | |
454 raise Exception('%s is not a supported serializer.' % test.ext) | |
455 if test.ext not in SERIALIZERS: | |
456 raise Exception('The package to support %s is not installed.' % test.ext) | |
457 with open(test.expect_path(), 'wb') as f: | |
458 SERIALIZERS[test.ext][1](data, f) | |
459 | |
460 | |
461 def _DiffData(old, new): | |
462 """ | |
463 Takes old data and new data, then returns a textual diff as a list of lines. | |
464 @type old: dict | |
465 @type new: dict | |
466 @rtype: [str] | |
467 """ | |
468 if old is _NonExistant: | |
469 return new | |
470 if old == new: | |
471 return [] | |
472 else: | |
473 return list(difflib.context_diff( | |
474 pprint.pformat(old).splitlines(), | |
475 pprint.pformat(new).splitlines(), | |
476 fromfile='expected', tofile='current', | |
477 n=4, lineterm='' | |
478 )) | |
479 | |
480 | |
481 def _GenLoopProcess(gen, test_queue, result_queue, num_procs, kill_switch, | |
482 match_globs, cover_ctx, handler): | |
483 """ | |
484 Generate `Test`'s from |gen|, and feed them into |test_queue|. | |
485 | |
486 Non-Test instances will be translated into `UnknownError` objects. | |
487 | |
488 On completion, feed |num_procs| None objects into |test_queue|. | |
489 | |
490 @param gen: generator yielding Test() instances. | |
491 @type test_queue: multiprocessing.Queue() | |
492 @type result_queue: multiprocessing.Queue() | |
493 @type num_procs: int | |
494 @type kill_switch: multiprocessing.Event() | |
495 @type match_globs: [str] | |
496 @type cover_ctx: dict | |
497 @type handler: _Handler.GenStage() | |
498 """ | |
499 try: | |
500 matcher = re.compile( | |
501 '^%s$' % '|'.join('(?:%s)' % glob.fnmatch.translate(g) | |
502 for g in match_globs if g[0] != '-')) | |
503 if matcher.pattern == '^$': | |
504 matcher = re.compile('^.*$') | |
505 | |
506 neg_matcher = re.compile( | |
507 '^%s$' % '|'.join('(?:%s)' % glob.fnmatch.translate(g[1:]) | |
508 for g in match_globs if g[0] == '-')) | |
509 | |
510 with cover_ctx: | |
511 for test in gen(): | |
512 if kill_switch.is_set(): | |
513 break | |
514 | |
515 if not isinstance(test, Test): | |
516 result_queue.put_nowait( | |
517 UnknownError('Got non-Test isinstance from generator: %r' % test)) | |
518 continue | |
519 | |
520 if not neg_matcher.match(test.name) and matcher.match(test.name): | |
521 if handler(test, result_queue.put_nowait): | |
522 test_queue.put_nowait(test) | |
523 | |
524 except KeyboardInterrupt: | |
525 pass | |
526 finally: | |
527 for _ in xrange(num_procs): | |
528 test_queue.put_nowait(None) | |
529 | |
530 | |
531 def _RunLoopProcess(test_queue, result_queue, kill_switch, cover_ctx, | |
532 handler): | |
533 """ | |
534 Consume `Test` instances from |test_queue|, run them, and push the `Result`s | |
535 into |result_queue|. | |
536 | |
537 Generates coverage data as a side-effect. | |
538 @type test_queue: multiprocessing.Queue() | |
539 @type result_queue: multiprocessing.Queue() | |
540 @type kill_switch: multiprocessing.Event() | |
541 @type handler: _Handler.RunStage() | |
542 """ | |
543 try: | |
544 with cover_ctx: | |
545 while not kill_switch.is_set(): | |
546 try: | |
547 test = test_queue.get(timeout=0.1) | |
548 if test is None: | |
549 break | |
550 except Queue.Empty: | |
551 continue | |
552 | |
553 try: | |
554 rslt = test.run() | |
555 if not isinstance(rslt, Result): | |
556 result_queue.put_nowait( | |
557 TestError(test, 'Got non-Result instance from test: %r' % rslt)) | |
558 continue | |
559 | |
560 handler(test, rslt, result_queue.put_nowait) | |
561 except Exception as e: | |
562 # TODO(iannucci): include stacktrace | |
563 result_queue.put_nowait(TestError(test, str(e))) | |
564 except KeyboardInterrupt: | |
565 pass | |
566 | |
567 | |
568 # Private CLI implementation | |
569 def parse_args(args): | |
570 args = args or sys.argv[1:] | |
571 | |
572 # Set the default mode if not specified and not passing --help | |
573 search_names = set(HANDLERS.keys() + ['-h', '--help']) | |
574 if not any(arg in search_names for arg in args): | |
575 args.insert(0, 'test') | |
576 | |
577 parser = argparse.ArgumentParser() | |
578 subparsers = parser.add_subparsers( | |
579 title='Mode (default "test")', dest='mode', | |
580 help='See `[mode] --help` for more options.') | |
581 | |
582 for k, h in HANDLERS.iteritems(): | |
583 sp = subparsers.add_parser(k, help=h.__doc__.lower()) | |
584 h.add_options(sp) | |
585 | |
586 mg = sp.add_mutually_exclusive_group() | |
587 mg.add_argument( | |
588 '--quiet', action='store_true', | |
589 help='be quiet (only print failures)') | |
590 mg.add_argument( | |
591 '--verbose', action='store_true', help='be verbose') | |
592 | |
593 if not h.SKIP_RUNLOOP: | |
594 sp.add_argument( | |
595 '--jobs', metavar='N', type=int, | |
596 default=multiprocessing.cpu_count(), | |
597 help='run N jobs in parallel (default %(default)s)') | |
598 | |
599 sp.add_argument( | |
600 '--test_list', metavar='FILE', | |
601 help='take the list of test globs from the FILE (use "-" for stdin)') | |
602 | |
603 sp.add_argument( | |
604 'test_glob', nargs='*', help=( | |
605 'glob to filter the tests acted on. If the glob begins with "-" ' | |
606 'then it acts as a negation glob and anything which matches it ' | |
607 'will be skipped.')) | |
608 | |
609 opts = parser.parse_args(args) | |
610 | |
611 if not hasattr(opts, 'jobs'): | |
612 opts.jobs = 0 | |
613 elif opts.jobs < 1: | |
614 parser.error('--jobs was less than 1') | |
615 | |
616 if opts.test_list: | |
617 fh = sys.stdin if opts.test_list == '-' else open(opts.test_list, 'rb') | |
618 with fh as tl: | |
619 opts.test_glob += [l.strip() for l in tl.readlines()] | |
620 | |
621 test_globs = opts.test_glob | |
622 handler = HANDLERS[opts.mode] | |
623 | |
624 del opts.test_list | |
625 del opts.mode | |
626 | |
627 return opts, handler, test_globs | |
628 | |
629 | |
630 # Public | |
631 Result = namedtuple('Result', 'data') | |
632 | |
633 | |
634 _Test = namedtuple( | |
635 'Test', 'name func args kwargs expect_dir expect_base ext breakpoints') | |
636 | |
637 class Test(_Test): | |
638 def __new__(cls, name, func, args=(), kwargs=None, expect_dir=None, | |
639 expect_base=None, ext='json', breakpoints=None, break_funcs=()): | |
640 """Create a new test. | |
641 | |
642 @param name: The name of the test. Will be used as the default expect_base | |
643 | |
644 @param func: The function to execute to run this test. Must be pickleable. | |
645 @param args: *args for |func| | |
646 @param kwargs: **kwargs for |func| | |
647 | |
648 @param expect_dir: The directory which holds the expectation file for this | |
649 Test. | |
650 @param expect_base: The basename (without extension) of the expectation | |
651 file. Defaults to |name|. | |
652 @param ext: The extension of the expectation file. Affects the serializer | |
653 used to write the expectations to disk. Valid values are | |
654 'json' and 'yaml' (Keys in SERIALIZERS). | |
655 | |
656 @param breakpoints: A list of (path, lineno, func_name) tuples. These will | |
657 turn into breakpoints when the tests are run in 'debug' | |
658 mode. See |break_funcs| for an easier way to set this. | |
659 @param break_funcs: A list of functions for which to set breakpoints. | |
660 """ | |
661 kwargs = kwargs or {} | |
662 | |
663 breakpoints = breakpoints or [] | |
664 if not breakpoints or break_funcs: | |
665 for f in (break_funcs or (func,)): | |
666 if hasattr(f, 'im_func'): | |
667 f = f.im_func | |
668 breakpoints.append((f.func_code.co_filename, | |
669 f.func_code.co_firstlineno, | |
670 f.func_code.co_name)) | |
671 | |
672 return super(Test, cls).__new__(cls, name, func, args, kwargs, expect_dir, | |
673 expect_base, ext, breakpoints) | |
674 | |
675 def expect_path(self, ext=None): | |
676 name = self.expect_base or self.name | |
677 name = ''.join('_' if c in '<>:"\\/|?*\0' else c for c in name) | |
678 return os.path.join(self.expect_dir, name + ('.%s' % (ext or self.ext))) | |
679 | |
680 def run(self): | |
681 return self.func(*self.args, **self.kwargs) | |
682 | |
683 | |
684 def main(test_gen, coverage_includes=None, coverage_omits=None, args=None): | |
685 """Entry point for tests using expect_tests. | |
686 | |
687 Example: | |
688 import expect_tests | |
689 | |
690 def happy_fn(val): | |
691 # Usually you would return data which is the result of some deterministic | |
692 # computation. | |
693 return expect_tests.Result({'neet': '%s string value' % val}) | |
694 | |
695 def Gen(): | |
696 yield expect_tests.Test('happy', happy_fn, args=('happy',)) | |
697 | |
698 if __name__ == '__main__': | |
699 expect_tests.main() | |
700 | |
701 @param test_gen: A Generator which yields Test objects. | |
702 @param coverage_includes: A list of path globs to include under coverage. | |
703 @param coverage_omits: A list of path globs to exclude under coverage. | |
704 @param args: Commandline args (starting at argv[1]) | |
705 """ | |
706 opts, handler, test_globs = parse_args(args) | |
707 result_handler = handler.ResultStage(opts) | |
708 | |
709 kill_switch = multiprocessing.Event() | |
710 signal.signal(signal.SIGINT, lambda *_: kill_switch.set()) | |
711 signal.signal(signal.SIGTERM, lambda *_: kill_switch.set()) | |
712 | |
713 if handler.SKIP_RUNLOOP: | |
714 coverage_opts = None | |
715 else: | |
716 coverage_opts = { | |
717 'include': coverage_includes, | |
718 'omit': coverage_omits, | |
719 'data_suffix': True | |
720 } | |
721 c = coverage.coverage(**coverage_opts) | |
722 c.erase() | |
723 cover_ctx = _cover(coverage_opts) | |
724 | |
725 test_queue = multiprocessing.Queue() | |
726 result_queue = multiprocessing.Queue() | |
727 | |
728 test_gen_args = ( | |
729 test_gen, test_queue, result_queue, opts.jobs, kill_switch, | |
730 test_globs, cover_ctx, handler.GenStage(opts)) | |
731 | |
732 procs = [] | |
733 if handler.SKIP_RUNLOOP: | |
734 _GenLoopProcess(*test_gen_args) | |
735 else: | |
736 procs = [multiprocessing.Process( | |
737 target=_GenLoopProcess, args=test_gen_args)] | |
738 | |
739 procs += [ | |
740 multiprocessing.Process( | |
741 target=_RunLoopProcess, args=( | |
742 test_queue, result_queue, kill_switch, cover_ctx, | |
743 handler.RunStage(opts))) | |
744 for _ in xrange(opts.jobs) | |
745 ] | |
746 | |
747 for p in procs: | |
748 p.daemon = True | |
749 p.start() | |
750 | |
751 error = False | |
752 while not kill_switch.is_set(): | |
753 while not kill_switch.is_set(): | |
754 try: | |
755 error |= result_handler(result_queue.get(timeout=0.1)) is False | |
756 except Queue.Empty: | |
757 break | |
758 | |
759 if not any(p.is_alive() for p in procs): | |
760 break | |
761 | |
762 result_handler.finalize(kill_switch.is_set()) | |
763 | |
764 assert kill_switch.is_set() or result_queue.empty() | |
765 | |
766 if not handler.SKIP_RUNLOOP: | |
767 c.combine() | |
768 if not kill_switch.is_set() and not opts.test_glob: | |
769 outf = StringIO() | |
770 total_covered = c.report(file=outf) | |
771 summary = outf.getvalue().replace('%- 15s' % 'Name', 'Coverage Report', 1) | |
772 if opts.verbose: | |
773 print | |
774 print summary | |
775 elif total_covered != 100.0: | |
776 print | |
777 lines = summary.splitlines() | |
778 lines[2:-2] = [l for l in lines[2:-2] | |
779 if not l.strip().endswith('100%')] | |
780 print '\n'.join(lines) | |
781 print | |
782 print 'FATAL: Recipes configs are not at 100% coverage.' | |
783 sys.exit(2) | |
784 | |
785 sys.exit(error or kill_switch.is_set()) | |
OLD | NEW |