Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(62)

Side by Side Diff: third_party/logilab/logilab/common/pytest.py

Issue 1920403002: [content/test/gpu] Run pylint check of gpu tests in unittest instead of PRESUBMIT (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Update path to LICENSE.txt of logilab/README.chromium Created 4 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
2 # contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
3 #
4 # This file is part of logilab-common.
5 #
6 # logilab-common is free software: you can redistribute it and/or modify it unde r
7 # the terms of the GNU Lesser General Public License as published by the Free
8 # Software Foundation, either version 2.1 of the License, or (at your option) an y
9 # later version.
10 #
11 # logilab-common is distributed in the hope that it will be useful, but WITHOUT
12 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13 # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
14 # details.
15 #
16 # You should have received a copy of the GNU Lesser General Public License along
17 # with logilab-common. If not, see <http://www.gnu.org/licenses/>.
18 """pytest is a tool that eases test running and debugging.
19
20 To be able to use pytest, you should either write tests using
21 the logilab.common.testlib's framework or the unittest module of the
22 Python's standard library.
23
24 You can customize pytest's behaviour by defining a ``pytestconf.py`` file
25 somewhere in your test directory. In this file, you can add options or
26 change the way tests are run.
27
28 To add command line options, you must define a ``update_parser`` function in
29 your ``pytestconf.py`` file. The function must accept a single parameter
30 that will be the OptionParser's instance to customize.
31
32 If you wish to customize the tester, you'll have to define a class named
33 ``CustomPyTester``. This class should extend the default `PyTester` class
34 defined in the pytest module. Take a look at the `PyTester` and `DjangoTester`
35 classes for more information about what can be done.
36
37 For instance, if you wish to add a custom -l option to specify a loglevel, you
38 could define the following ``pytestconf.py`` file ::
39
40 import logging
41 from logilab.common.pytest import PyTester
42
43 def update_parser(parser):
44 parser.add_option('-l', '--loglevel', dest='loglevel', action='store',
45 choices=('debug', 'info', 'warning', 'error', 'critica l'),
46 default='critical', help="the default log level possib le choices are "
47 "('debug', 'info', 'warning', 'error', 'critical')")
48 return parser
49
50
51 class CustomPyTester(PyTester):
52 def __init__(self, cvg, options):
53 super(CustomPyTester, self).__init__(cvg, options)
54 loglevel = options.loglevel.upper()
55 logger = logging.getLogger('erudi')
56 logger.setLevel(logging.getLevelName(loglevel))
57
58
59 In your TestCase class you can then get the value of a specific option with
60 the ``optval`` method::
61
62 class MyTestCase(TestCase):
63 def test_foo(self):
64 loglevel = self.optval('loglevel')
65 # ...
66
67
68 You can also tag your tag your test for fine filtering
69
70 With those tag::
71
72 from logilab.common.testlib import tag, TestCase
73
74 class Exemple(TestCase):
75
76 @tag('rouge', 'carre')
77 def toto(self):
78 pass
79
80 @tag('carre', 'vert')
81 def tata(self):
82 pass
83
84 @tag('rouge')
85 def titi(test):
86 pass
87
88 you can filter the function with a simple python expression
89
90 * ``toto`` and ``titi`` match ``rouge``
91 * ``toto``, ``tata`` and ``titi``, match ``rouge or carre``
92 * ``tata`` and ``titi`` match``rouge ^ carre``
93 * ``titi`` match ``rouge and not carre``
94 """
95
96 from __future__ import print_function
97
98 __docformat__ = "restructuredtext en"
99
100 PYTEST_DOC = """%prog [OPTIONS] [testfile [testpattern]]
101
102 examples:
103
104 pytest path/to/mytests.py
105 pytest path/to/mytests.py TheseTests
106 pytest path/to/mytests.py TheseTests.test_thisone
107 pytest path/to/mytests.py -m '(not long and database) or regr'
108
109 pytest one (will run both test_thisone and test_thatone)
110 pytest path/to/mytests.py -s not (will skip test_notthisone)
111 """
112
113 ENABLE_DBC = False
114 FILE_RESTART = ".pytest.restart"
115
116 import os, sys, re
117 import os.path as osp
118 from time import time, clock
119 import warnings
120 import types
121 from inspect import isgeneratorfunction, isclass
122 from contextlib import contextmanager
123
124 from logilab.common.fileutils import abspath_listdir
125 from logilab.common import textutils
126 from logilab.common import testlib, STD_BLACKLIST
127 # use the same unittest module as testlib
128 from logilab.common.testlib import unittest, start_interactive_mode
129 from logilab.common.deprecation import deprecated
130 import doctest
131
132 import unittest as unittest_legacy
133 if not getattr(unittest_legacy, "__package__", None):
134 try:
135 import unittest2.suite as unittest_suite
136 except ImportError:
137 sys.exit("You have to install python-unittest2 to use this module")
138 else:
139 import unittest.suite as unittest_suite
140
141 try:
142 import django
143 from logilab.common.modutils import modpath_from_file, load_module_from_modp ath
144 DJANGO_FOUND = True
145 except ImportError:
146 DJANGO_FOUND = False
147
148 CONF_FILE = 'pytestconf.py'
149
150 ## coverage pausing tools
151
152 @contextmanager
153 def replace_trace(trace=None):
154 """A context manager that temporary replaces the trace function"""
155 oldtrace = sys.gettrace()
156 sys.settrace(trace)
157 try:
158 yield
159 finally:
160 # specific hack to work around a bug in pycoverage, see
161 # https://bitbucket.org/ned/coveragepy/issue/123
162 if (oldtrace is not None and not callable(oldtrace) and
163 hasattr(oldtrace, 'pytrace')):
164 oldtrace = oldtrace.pytrace
165 sys.settrace(oldtrace)
166
167
168 def pause_trace():
169 """A context manager that temporary pauses any tracing"""
170 return replace_trace()
171
172 class TraceController(object):
173 ctx_stack = []
174
175 @classmethod
176 @deprecated('[lgc 0.63.1] Use the pause_trace() context manager')
177 def pause_tracing(cls):
178 cls.ctx_stack.append(pause_trace())
179 cls.ctx_stack[-1].__enter__()
180
181 @classmethod
182 @deprecated('[lgc 0.63.1] Use the pause_trace() context manager')
183 def resume_tracing(cls):
184 cls.ctx_stack.pop().__exit__(None, None, None)
185
186
187 pause_tracing = TraceController.pause_tracing
188 resume_tracing = TraceController.resume_tracing
189
190
191 def nocoverage(func):
192 """Function decorator that pauses tracing functions"""
193 if hasattr(func, 'uncovered'):
194 return func
195 func.uncovered = True
196
197 def not_covered(*args, **kwargs):
198 with pause_trace():
199 return func(*args, **kwargs)
200 not_covered.uncovered = True
201 return not_covered
202
203 ## end of coverage pausing tools
204
205
206 TESTFILE_RE = re.compile("^((unit)?test.*|smoketest)\.py$")
207 def this_is_a_testfile(filename):
208 """returns True if `filename` seems to be a test file"""
209 return TESTFILE_RE.match(osp.basename(filename))
210
211 TESTDIR_RE = re.compile("^(unit)?tests?$")
212 def this_is_a_testdir(dirpath):
213 """returns True if `filename` seems to be a test directory"""
214 return TESTDIR_RE.match(osp.basename(dirpath))
215
216
217 def load_pytest_conf(path, parser):
218 """loads a ``pytestconf.py`` file and update default parser
219 and / or tester.
220 """
221 namespace = {}
222 exec(open(path, 'rb').read(), namespace)
223 if 'update_parser' in namespace:
224 namespace['update_parser'](parser)
225 return namespace.get('CustomPyTester', PyTester)
226
227
228 def project_root(parser, projdir=os.getcwd()):
229 """try to find project's root and add it to sys.path"""
230 previousdir = curdir = osp.abspath(projdir)
231 testercls = PyTester
232 conf_file_path = osp.join(curdir, CONF_FILE)
233 if osp.isfile(conf_file_path):
234 testercls = load_pytest_conf(conf_file_path, parser)
235 while this_is_a_testdir(curdir) or \
236 osp.isfile(osp.join(curdir, '__init__.py')):
237 newdir = osp.normpath(osp.join(curdir, os.pardir))
238 if newdir == curdir:
239 break
240 previousdir = curdir
241 curdir = newdir
242 conf_file_path = osp.join(curdir, CONF_FILE)
243 if osp.isfile(conf_file_path):
244 testercls = load_pytest_conf(conf_file_path, parser)
245 return previousdir, testercls
246
247
248 class GlobalTestReport(object):
249 """this class holds global test statistics"""
250 def __init__(self):
251 self.ran = 0
252 self.skipped = 0
253 self.failures = 0
254 self.errors = 0
255 self.ttime = 0
256 self.ctime = 0
257 self.modulescount = 0
258 self.errmodules = []
259
260 def feed(self, filename, testresult, ttime, ctime):
261 """integrates new test information into internal statistics"""
262 ran = testresult.testsRun
263 self.ran += ran
264 self.skipped += len(getattr(testresult, 'skipped', ()))
265 self.failures += len(testresult.failures)
266 self.errors += len(testresult.errors)
267 self.ttime += ttime
268 self.ctime += ctime
269 self.modulescount += 1
270 if not testresult.wasSuccessful():
271 problems = len(testresult.failures) + len(testresult.errors)
272 self.errmodules.append((filename[:-3], problems, ran))
273
274 def failed_to_test_module(self, filename):
275 """called when the test module could not be imported by unittest
276 """
277 self.errors += 1
278 self.modulescount += 1
279 self.ran += 1
280 self.errmodules.append((filename[:-3], 1, 1))
281
282 def skip_module(self, filename):
283 self.modulescount += 1
284 self.ran += 1
285 self.errmodules.append((filename[:-3], 0, 0))
286
287 def __str__(self):
288 """this is just presentation stuff"""
289 line1 = ['Ran %s test cases in %.2fs (%.2fs CPU)'
290 % (self.ran, self.ttime, self.ctime)]
291 if self.errors:
292 line1.append('%s errors' % self.errors)
293 if self.failures:
294 line1.append('%s failures' % self.failures)
295 if self.skipped:
296 line1.append('%s skipped' % self.skipped)
297 modulesok = self.modulescount - len(self.errmodules)
298 if self.errors or self.failures:
299 line2 = '%s modules OK (%s failed)' % (modulesok,
300 len(self.errmodules))
301 descr = ', '.join(['%s [%s/%s]' % info for info in self.errmodules])
302 line3 = '\nfailures: %s' % descr
303 elif modulesok:
304 line2 = 'All %s modules OK' % modulesok
305 line3 = ''
306 else:
307 return ''
308 return '%s\n%s%s' % (', '.join(line1), line2, line3)
309
310
311
312 def remove_local_modules_from_sys(testdir):
313 """remove all modules from cache that come from `testdir`
314
315 This is used to avoid strange side-effects when using the
316 testall() mode of pytest.
317 For instance, if we run pytest on this tree::
318
319 A/test/test_utils.py
320 B/test/test_utils.py
321
322 we **have** to clean sys.modules to make sure the correct test_utils
323 module is ran in B
324 """
325 for modname, mod in list(sys.modules.items()):
326 if mod is None:
327 continue
328 if not hasattr(mod, '__file__'):
329 # this is the case of some built-in modules like sys, imp, marshal
330 continue
331 modfile = mod.__file__
332 # if modfile is not an absolute path, it was probably loaded locally
333 # during the tests
334 if not osp.isabs(modfile) or modfile.startswith(testdir):
335 del sys.modules[modname]
336
337
338
339 class PyTester(object):
340 """encapsulates testrun logic"""
341
342 def __init__(self, cvg, options):
343 self.report = GlobalTestReport()
344 self.cvg = cvg
345 self.options = options
346 self.firstwrite = True
347 self._errcode = None
348
349 def show_report(self):
350 """prints the report and returns appropriate exitcode"""
351 # everything has been ran, print report
352 print("*" * 79)
353 print(self.report)
354
355 def get_errcode(self):
356 # errcode set explicitly
357 if self._errcode is not None:
358 return self._errcode
359 return self.report.failures + self.report.errors
360
361 def set_errcode(self, errcode):
362 self._errcode = errcode
363 errcode = property(get_errcode, set_errcode)
364
365 def testall(self, exitfirst=False):
366 """walks through current working directory, finds something
367 which can be considered as a testdir and runs every test there
368 """
369 here = os.getcwd()
370 for dirname, dirs, _ in os.walk(here):
371 for skipped in STD_BLACKLIST:
372 if skipped in dirs:
373 dirs.remove(skipped)
374 basename = osp.basename(dirname)
375 if this_is_a_testdir(basename):
376 print("going into", dirname)
377 # we found a testdir, let's explore it !
378 if not self.testonedir(dirname, exitfirst):
379 break
380 dirs[:] = []
381 if self.report.ran == 0:
382 print("no test dir found testing here:", here)
383 # if no test was found during the visit, consider
384 # the local directory as a test directory even if
385 # it doesn't have a traditional test directory name
386 self.testonedir(here)
387
388 def testonedir(self, testdir, exitfirst=False):
389 """finds each testfile in the `testdir` and runs it
390
391 return true when all tests has been executed, false if exitfirst and
392 some test has failed.
393 """
394 for filename in abspath_listdir(testdir):
395 if this_is_a_testfile(filename):
396 if self.options.exitfirst and not self.options.restart:
397 # overwrite restart file
398 try:
399 restartfile = open(FILE_RESTART, "w")
400 restartfile.close()
401 except Exception:
402 print("Error while overwriting succeeded test file :",
403 osp.join(os.getcwd(), FILE_RESTART),
404 file=sys.__stderr__)
405 raise
406 # run test and collect information
407 prog = self.testfile(filename, batchmode=True)
408 if exitfirst and (prog is None or not prog.result.wasSuccessful( )):
409 return False
410 self.firstwrite = True
411 # clean local modules
412 remove_local_modules_from_sys(testdir)
413 return True
414
415 def testfile(self, filename, batchmode=False):
416 """runs every test in `filename`
417
418 :param filename: an absolute path pointing to a unittest file
419 """
420 here = os.getcwd()
421 dirname = osp.dirname(filename)
422 if dirname:
423 os.chdir(dirname)
424 # overwrite restart file if it has not been done already
425 if self.options.exitfirst and not self.options.restart and self.firstwri te:
426 try:
427 restartfile = open(FILE_RESTART, "w")
428 restartfile.close()
429 except Exception:
430 print("Error while overwriting succeeded test file :",
431 osp.join(os.getcwd(), FILE_RESTART), file=sys.__stderr__)
432 raise
433 modname = osp.basename(filename)[:-3]
434 print((' %s ' % osp.basename(filename)).center(70, '='),
435 file=sys.__stderr__)
436 try:
437 tstart, cstart = time(), clock()
438 try:
439 testprog = SkipAwareTestProgram(modname, batchmode=batchmode, cv g=self.cvg,
440 options=self.options, outstream =sys.stderr)
441 except KeyboardInterrupt:
442 raise
443 except SystemExit as exc:
444 self.errcode = exc.code
445 raise
446 except testlib.SkipTest:
447 print("Module skipped:", filename)
448 self.report.skip_module(filename)
449 return None
450 except Exception:
451 self.report.failed_to_test_module(filename)
452 print('unhandled exception occurred while testing', modname,
453 file=sys.stderr)
454 import traceback
455 traceback.print_exc(file=sys.stderr)
456 return None
457
458 tend, cend = time(), clock()
459 ttime, ctime = (tend - tstart), (cend - cstart)
460 self.report.feed(filename, testprog.result, ttime, ctime)
461 return testprog
462 finally:
463 if dirname:
464 os.chdir(here)
465
466
467
468 class DjangoTester(PyTester):
469
470 def load_django_settings(self, dirname):
471 """try to find project's setting and load it"""
472 curdir = osp.abspath(dirname)
473 previousdir = curdir
474 while not osp.isfile(osp.join(curdir, 'settings.py')) and \
475 osp.isfile(osp.join(curdir, '__init__.py')):
476 newdir = osp.normpath(osp.join(curdir, os.pardir))
477 if newdir == curdir:
478 raise AssertionError('could not find settings.py')
479 previousdir = curdir
480 curdir = newdir
481 # late django initialization
482 settings = load_module_from_modpath(modpath_from_file(osp.join(curdir, ' settings.py')))
483 from django.core.management import setup_environ
484 setup_environ(settings)
485 settings.DEBUG = False
486 self.settings = settings
487 # add settings dir to pythonpath since it's the project's root
488 if curdir not in sys.path:
489 sys.path.insert(1, curdir)
490
491 def before_testfile(self):
492 # Those imports must be done **after** setup_environ was called
493 from django.test.utils import setup_test_environment
494 from django.test.utils import create_test_db
495 setup_test_environment()
496 create_test_db(verbosity=0)
497 self.dbname = self.settings.TEST_DATABASE_NAME
498
499 def after_testfile(self):
500 # Those imports must be done **after** setup_environ was called
501 from django.test.utils import teardown_test_environment
502 from django.test.utils import destroy_test_db
503 teardown_test_environment()
504 print('destroying', self.dbname)
505 destroy_test_db(self.dbname, verbosity=0)
506
507 def testall(self, exitfirst=False):
508 """walks through current working directory, finds something
509 which can be considered as a testdir and runs every test there
510 """
511 for dirname, dirs, files in os.walk(os.getcwd()):
512 for skipped in ('CVS', '.svn', '.hg'):
513 if skipped in dirs:
514 dirs.remove(skipped)
515 if 'tests.py' in files:
516 if not self.testonedir(dirname, exitfirst):
517 break
518 dirs[:] = []
519 else:
520 basename = osp.basename(dirname)
521 if basename in ('test', 'tests'):
522 print("going into", dirname)
523 # we found a testdir, let's explore it !
524 if not self.testonedir(dirname, exitfirst):
525 break
526 dirs[:] = []
527
528 def testonedir(self, testdir, exitfirst=False):
529 """finds each testfile in the `testdir` and runs it
530
531 return true when all tests has been executed, false if exitfirst and
532 some test has failed.
533 """
534 # special django behaviour : if tests are splitted in several files,
535 # remove the main tests.py file and tests each test file separately
536 testfiles = [fpath for fpath in abspath_listdir(testdir)
537 if this_is_a_testfile(fpath)]
538 if len(testfiles) > 1:
539 try:
540 testfiles.remove(osp.join(testdir, 'tests.py'))
541 except ValueError:
542 pass
543 for filename in testfiles:
544 # run test and collect information
545 prog = self.testfile(filename, batchmode=True)
546 if exitfirst and (prog is None or not prog.result.wasSuccessful()):
547 return False
548 # clean local modules
549 remove_local_modules_from_sys(testdir)
550 return True
551
552 def testfile(self, filename, batchmode=False):
553 """runs every test in `filename`
554
555 :param filename: an absolute path pointing to a unittest file
556 """
557 here = os.getcwd()
558 dirname = osp.dirname(filename)
559 if dirname:
560 os.chdir(dirname)
561 self.load_django_settings(dirname)
562 modname = osp.basename(filename)[:-3]
563 print((' %s ' % osp.basename(filename)).center(70, '='),
564 file=sys.stderr)
565 try:
566 try:
567 tstart, cstart = time(), clock()
568 self.before_testfile()
569 testprog = SkipAwareTestProgram(modname, batchmode=batchmode, cv g=self.cvg)
570 tend, cend = time(), clock()
571 ttime, ctime = (tend - tstart), (cend - cstart)
572 self.report.feed(filename, testprog.result, ttime, ctime)
573 return testprog
574 except SystemExit:
575 raise
576 except Exception as exc:
577 import traceback
578 traceback.print_exc()
579 self.report.failed_to_test_module(filename)
580 print('unhandled exception occurred while testing', modname)
581 print('error: %s' % exc)
582 return None
583 finally:
584 self.after_testfile()
585 if dirname:
586 os.chdir(here)
587
588
589 def make_parser():
590 """creates the OptionParser instance
591 """
592 from optparse import OptionParser
593 parser = OptionParser(usage=PYTEST_DOC)
594
595 parser.newargs = []
596 def rebuild_cmdline(option, opt, value, parser):
597 """carry the option to unittest_main"""
598 parser.newargs.append(opt)
599
600 def rebuild_and_store(option, opt, value, parser):
601 """carry the option to unittest_main and store
602 the value on current parser
603 """
604 parser.newargs.append(opt)
605 setattr(parser.values, option.dest, True)
606
607 def capture_and_rebuild(option, opt, value, parser):
608 warnings.simplefilter('ignore', DeprecationWarning)
609 rebuild_cmdline(option, opt, value, parser)
610
611 # pytest options
612 parser.add_option('-t', dest='testdir', default=None,
613 help="directory where the tests will be found")
614 parser.add_option('-d', dest='dbc', default=False,
615 action="store_true", help="enable design-by-contract")
616 # unittest_main options provided and passed through pytest
617 parser.add_option('-v', '--verbose', callback=rebuild_cmdline,
618 action="callback", help="Verbose output")
619 parser.add_option('-i', '--pdb', callback=rebuild_and_store,
620 dest="pdb", action="callback",
621 help="Enable test failure inspection")
622 parser.add_option('-x', '--exitfirst', callback=rebuild_and_store,
623 dest="exitfirst", default=False,
624 action="callback", help="Exit on first failure "
625 "(only make sense when pytest run one test file)")
626 parser.add_option('-R', '--restart', callback=rebuild_and_store,
627 dest="restart", default=False,
628 action="callback",
629 help="Restart tests from where it failed (implies exitfirs t) "
630 "(only make sense if tests previously ran with exitfirst only)")
631 parser.add_option('--color', callback=rebuild_cmdline,
632 action="callback",
633 help="colorize tracebacks")
634 parser.add_option('-s', '--skip',
635 # XXX: I wish I could use the callback action but it
636 # doesn't seem to be able to get the value
637 # associated to the option
638 action="store", dest="skipped", default=None,
639 help="test names matching this name will be skipped "
640 "to skip several patterns, use commas")
641 parser.add_option('-q', '--quiet', callback=rebuild_cmdline,
642 action="callback", help="Minimal output")
643 parser.add_option('-P', '--profile', default=None, dest='profile',
644 help="Profile execution and store data in the given file")
645 parser.add_option('-m', '--match', default=None, dest='tags_pattern',
646 help="only execute test whose tag match the current patter n")
647
648 if DJANGO_FOUND:
649 parser.add_option('-J', '--django', dest='django', default=False,
650 action="store_true",
651 help='use pytest for django test cases')
652 return parser
653
654
655 def parseargs(parser):
656 """Parse the command line and return (options processed), (options to pass t o
657 unittest_main()), (explicitfile or None).
658 """
659 # parse the command line
660 options, args = parser.parse_args()
661 filenames = [arg for arg in args if arg.endswith('.py')]
662 if filenames:
663 if len(filenames) > 1:
664 parser.error("only one filename is acceptable")
665 explicitfile = filenames[0]
666 args.remove(explicitfile)
667 else:
668 explicitfile = None
669 # someone wants DBC
670 testlib.ENABLE_DBC = options.dbc
671 newargs = parser.newargs
672 if options.skipped:
673 newargs.extend(['--skip', options.skipped])
674 # restart implies exitfirst
675 if options.restart:
676 options.exitfirst = True
677 # append additional args to the new sys.argv and let unittest_main
678 # do the rest
679 newargs += args
680 return options, explicitfile
681
682
683
684 def run():
685 parser = make_parser()
686 rootdir, testercls = project_root(parser)
687 options, explicitfile = parseargs(parser)
688 # mock a new command line
689 sys.argv[1:] = parser.newargs
690 cvg = None
691 if not '' in sys.path:
692 sys.path.insert(0, '')
693 if DJANGO_FOUND and options.django:
694 tester = DjangoTester(cvg, options)
695 else:
696 tester = testercls(cvg, options)
697 if explicitfile:
698 cmd, args = tester.testfile, (explicitfile,)
699 elif options.testdir:
700 cmd, args = tester.testonedir, (options.testdir, options.exitfirst)
701 else:
702 cmd, args = tester.testall, (options.exitfirst,)
703 try:
704 try:
705 if options.profile:
706 import hotshot
707 prof = hotshot.Profile(options.profile)
708 prof.runcall(cmd, *args)
709 prof.close()
710 print('profile data saved in', options.profile)
711 else:
712 cmd(*args)
713 except SystemExit:
714 raise
715 except:
716 import traceback
717 traceback.print_exc()
718 finally:
719 tester.show_report()
720 sys.exit(tester.errcode)
721
722 class SkipAwareTestProgram(unittest.TestProgram):
723 # XXX: don't try to stay close to unittest.py, use optparse
724 USAGE = """\
725 Usage: %(progName)s [options] [test] [...]
726
727 Options:
728 -h, --help Show this message
729 -v, --verbose Verbose output
730 -i, --pdb Enable test failure inspection
731 -x, --exitfirst Exit on first failure
732 -s, --skip skip test matching this pattern (no regexp for now)
733 -q, --quiet Minimal output
734 --color colorize tracebacks
735
736 -m, --match Run only test whose tag match this pattern
737
738 -P, --profile FILE: Run the tests using cProfile and saving results
739 in FILE
740
741 Examples:
742 %(progName)s - run default set of tests
743 %(progName)s MyTestSuite - run suite 'MyTestSuite'
744 %(progName)s MyTestCase.testSomething - run MyTestCase.testSomething
745 %(progName)s MyTestCase - run all 'test*' test methods
746 in MyTestCase
747 """
748 def __init__(self, module='__main__', defaultTest=None, batchmode=False,
749 cvg=None, options=None, outstream=sys.stderr):
750 self.batchmode = batchmode
751 self.cvg = cvg
752 self.options = options
753 self.outstream = outstream
754 super(SkipAwareTestProgram, self).__init__(
755 module=module, defaultTest=defaultTest,
756 testLoader=NonStrictTestLoader())
757
758 def parseArgs(self, argv):
759 self.pdbmode = False
760 self.exitfirst = False
761 self.skipped_patterns = []
762 self.test_pattern = None
763 self.tags_pattern = None
764 self.colorize = False
765 self.profile_name = None
766 import getopt
767 try:
768 options, args = getopt.getopt(argv[1:], 'hHvixrqcp:s:m:P:',
769 ['help', 'verbose', 'quiet', 'pdb',
770 'exitfirst', 'restart',
771 'skip=', 'color', 'match=', 'profile= '])
772 for opt, value in options:
773 if opt in ('-h', '-H', '--help'):
774 self.usageExit()
775 if opt in ('-i', '--pdb'):
776 self.pdbmode = True
777 if opt in ('-x', '--exitfirst'):
778 self.exitfirst = True
779 if opt in ('-r', '--restart'):
780 self.restart = True
781 self.exitfirst = True
782 if opt in ('-q', '--quiet'):
783 self.verbosity = 0
784 if opt in ('-v', '--verbose'):
785 self.verbosity = 2
786 if opt in ('-s', '--skip'):
787 self.skipped_patterns = [pat.strip() for pat in
788 value.split(', ')]
789 if opt == '--color':
790 self.colorize = True
791 if opt in ('-m', '--match'):
792 #self.tags_pattern = value
793 self.options["tag_pattern"] = value
794 if opt in ('-P', '--profile'):
795 self.profile_name = value
796 self.testLoader.skipped_patterns = self.skipped_patterns
797 if len(args) == 0 and self.defaultTest is None:
798 suitefunc = getattr(self.module, 'suite', None)
799 if isinstance(suitefunc, (types.FunctionType,
800 types.MethodType)):
801 self.test = self.module.suite()
802 else:
803 self.test = self.testLoader.loadTestsFromModule(self.module)
804 return
805 if len(args) > 0:
806 self.test_pattern = args[0]
807 self.testNames = args
808 else:
809 self.testNames = (self.defaultTest, )
810 self.createTests()
811 except getopt.error as msg:
812 self.usageExit(msg)
813
814 def runTests(self):
815 if self.profile_name:
816 import cProfile
817 cProfile.runctx('self._runTests()', globals(), locals(), self.profil e_name )
818 else:
819 return self._runTests()
820
821 def _runTests(self):
822 self.testRunner = SkipAwareTextTestRunner(verbosity=self.verbosity,
823 stream=self.outstream,
824 exitfirst=self.exitfirst,
825 pdbmode=self.pdbmode,
826 cvg=self.cvg,
827 test_pattern=self.test_pattern ,
828 skipped_patterns=self.skipped_ patterns,
829 colorize=self.colorize,
830 batchmode=self.batchmode,
831 options=self.options)
832
833 def removeSucceededTests(obj, succTests):
834 """ Recursive function that removes succTests from
835 a TestSuite or TestCase
836 """
837 if isinstance(obj, unittest.TestSuite):
838 removeSucceededTests(obj._tests, succTests)
839 if isinstance(obj, list):
840 for el in obj[:]:
841 if isinstance(el, unittest.TestSuite):
842 removeSucceededTests(el, succTests)
843 elif isinstance(el, unittest.TestCase):
844 descr = '.'.join((el.__class__.__module__,
845 el.__class__.__name__,
846 el._testMethodName))
847 if descr in succTests:
848 obj.remove(el)
849 # take care, self.options may be None
850 if getattr(self.options, 'restart', False):
851 # retrieve succeeded tests from FILE_RESTART
852 try:
853 restartfile = open(FILE_RESTART, 'r')
854 try:
855 succeededtests = list(elem.rstrip('\n\r') for elem in
856 restartfile.readlines())
857 removeSucceededTests(self.test, succeededtests)
858 finally:
859 restartfile.close()
860 except Exception as ex:
861 raise Exception("Error while reading succeeded tests into %s: %s "
862 % (osp.join(os.getcwd(), FILE_RESTART), ex))
863
864 result = self.testRunner.run(self.test)
865 # help garbage collection: we want TestSuite, which hold refs to every
866 # executed TestCase, to be gc'ed
867 del self.test
868 if getattr(result, "debuggers", None) and \
869 getattr(self, "pdbmode", None):
870 start_interactive_mode(result)
871 if not getattr(self, "batchmode", None):
872 sys.exit(not result.wasSuccessful())
873 self.result = result
874
875
876 class SkipAwareTextTestRunner(unittest.TextTestRunner):
877
878 def __init__(self, stream=sys.stderr, verbosity=1,
879 exitfirst=False, pdbmode=False, cvg=None, test_pattern=None,
880 skipped_patterns=(), colorize=False, batchmode=False,
881 options=None):
882 super(SkipAwareTextTestRunner, self).__init__(stream=stream,
883 verbosity=verbosity)
884 self.exitfirst = exitfirst
885 self.pdbmode = pdbmode
886 self.cvg = cvg
887 self.test_pattern = test_pattern
888 self.skipped_patterns = skipped_patterns
889 self.colorize = colorize
890 self.batchmode = batchmode
891 self.options = options
892
893 def _this_is_skipped(self, testedname):
894 return any([(pat in testedname) for pat in self.skipped_patterns])
895
896 def _runcondition(self, test, skipgenerator=True):
897 if isinstance(test, testlib.InnerTest):
898 testname = test.name
899 else:
900 if isinstance(test, testlib.TestCase):
901 meth = test._get_test_method()
902 testname = '%s.%s' % (test.__name__, meth.__name__)
903 elif isinstance(test, types.FunctionType):
904 func = test
905 testname = func.__name__
906 elif isinstance(test, types.MethodType):
907 cls = test.__self__.__class__
908 testname = '%s.%s' % (cls.__name__, test.__name__)
909 else:
910 return True # Not sure when this happens
911 if isgeneratorfunction(test) and skipgenerator:
912 return self.does_match_tags(test) # Let inner tests decide at ru n time
913 if self._this_is_skipped(testname):
914 return False # this was explicitly skipped
915 if self.test_pattern is not None:
916 try:
917 classpattern, testpattern = self.test_pattern.split('.')
918 klass, name = testname.split('.')
919 if classpattern not in klass or testpattern not in name:
920 return False
921 except ValueError:
922 if self.test_pattern not in testname:
923 return False
924
925 return self.does_match_tags(test)
926
927 def does_match_tags(self, test):
928 if self.options is not None:
929 tags_pattern = getattr(self.options, 'tags_pattern', None)
930 if tags_pattern is not None:
931 tags = getattr(test, 'tags', testlib.Tags())
932 if tags.inherit and isinstance(test, types.MethodType):
933 tags = tags | getattr(test.im_class, 'tags', testlib.Tags())
934 return tags.match(tags_pattern)
935 return True # no pattern
936
937 def _makeResult(self):
938 return testlib.SkipAwareTestResult(self.stream, self.descriptions,
939 self.verbosity, self.exitfirst,
940 self.pdbmode, self.cvg, self.colorize)
941
942 def run(self, test):
943 "Run the given test case or test suite."
944 result = self._makeResult()
945 startTime = time()
946 test(result, runcondition=self._runcondition, options=self.options)
947 stopTime = time()
948 timeTaken = stopTime - startTime
949 result.printErrors()
950 if not self.batchmode:
951 self.stream.writeln(result.separator2)
952 run = result.testsRun
953 self.stream.writeln("Ran %d test%s in %.3fs" %
954 (run, run != 1 and "s" or "", timeTaken))
955 self.stream.writeln()
956 if not result.wasSuccessful():
957 if self.colorize:
958 self.stream.write(textutils.colorize_ansi("FAILED", color='r ed'))
959 else:
960 self.stream.write("FAILED")
961 else:
962 if self.colorize:
963 self.stream.write(textutils.colorize_ansi("OK", color='green '))
964 else:
965 self.stream.write("OK")
966 failed, errored, skipped = map(len, (result.failures,
967 result.errors,
968 result.skipped))
969
970 det_results = []
971 for name, value in (("failures", result.failures),
972 ("errors",result.errors),
973 ("skipped", result.skipped)):
974 if value:
975 det_results.append("%s=%i" % (name, len(value)))
976 if det_results:
977 self.stream.write(" (")
978 self.stream.write(', '.join(det_results))
979 self.stream.write(")")
980 self.stream.writeln("")
981 return result
982
983 class NonStrictTestLoader(unittest.TestLoader):
984 """
985 Overrides default testloader to be able to omit classname when
986 specifying tests to run on command line.
987
988 For example, if the file test_foo.py contains ::
989
990 class FooTC(TestCase):
991 def test_foo1(self): # ...
992 def test_foo2(self): # ...
993 def test_bar1(self): # ...
994
995 class BarTC(TestCase):
996 def test_bar2(self): # ...
997
998 'python test_foo.py' will run the 3 tests in FooTC
999 'python test_foo.py FooTC' will run the 3 tests in FooTC
1000 'python test_foo.py test_foo' will run test_foo1 and test_foo2
1001 'python test_foo.py test_foo1' will run test_foo1
1002 'python test_foo.py test_bar' will run FooTC.test_bar1 and BarTC.test_bar2
1003 """
1004
1005 def __init__(self):
1006 self.skipped_patterns = ()
1007
1008 # some magic here to accept empty list by extending
1009 # and to provide callable capability
1010 def loadTestsFromNames(self, names, module=None):
1011 suites = []
1012 for name in names:
1013 suites.extend(self.loadTestsFromName(name, module))
1014 return self.suiteClass(suites)
1015
1016 def _collect_tests(self, module):
1017 tests = {}
1018 for obj in vars(module).values():
1019 if isclass(obj) and issubclass(obj, unittest.TestCase):
1020 classname = obj.__name__
1021 if classname[0] == '_' or self._this_is_skipped(classname):
1022 continue
1023 methodnames = []
1024 # obj is a TestCase class
1025 for attrname in dir(obj):
1026 if attrname.startswith(self.testMethodPrefix):
1027 attr = getattr(obj, attrname)
1028 if callable(attr):
1029 methodnames.append(attrname)
1030 # keep track of class (obj) for convenience
1031 tests[classname] = (obj, methodnames)
1032 return tests
1033
1034 def loadTestsFromSuite(self, module, suitename):
1035 try:
1036 suite = getattr(module, suitename)()
1037 except AttributeError:
1038 return []
1039 assert hasattr(suite, '_tests'), \
1040 "%s.%s is not a valid TestSuite" % (module.__name__, suitename)
1041 # python2.3 does not implement __iter__ on suites, we need to return
1042 # _tests explicitly
1043 return suite._tests
1044
1045 def loadTestsFromName(self, name, module=None):
1046 parts = name.split('.')
1047 if module is None or len(parts) > 2:
1048 # let the base class do its job here
1049 return [super(NonStrictTestLoader, self).loadTestsFromName(name)]
1050 tests = self._collect_tests(module)
1051 collected = []
1052 if len(parts) == 1:
1053 pattern = parts[0]
1054 if callable(getattr(module, pattern, None)
1055 ) and pattern not in tests:
1056 # consider it as a suite
1057 return self.loadTestsFromSuite(module, pattern)
1058 if pattern in tests:
1059 # case python unittest_foo.py MyTestTC
1060 klass, methodnames = tests[pattern]
1061 for methodname in methodnames:
1062 collected = [klass(methodname)
1063 for methodname in methodnames]
1064 else:
1065 # case python unittest_foo.py something
1066 for klass, methodnames in tests.values():
1067 # skip methodname if matched by skipped_patterns
1068 for skip_pattern in self.skipped_patterns:
1069 methodnames = [methodname
1070 for methodname in methodnames
1071 if skip_pattern not in methodname]
1072 collected += [klass(methodname)
1073 for methodname in methodnames
1074 if pattern in methodname]
1075 elif len(parts) == 2:
1076 # case "MyClass.test_1"
1077 classname, pattern = parts
1078 klass, methodnames = tests.get(classname, (None, []))
1079 for methodname in methodnames:
1080 collected = [klass(methodname) for methodname in methodnames
1081 if pattern in methodname]
1082 return collected
1083
1084 def _this_is_skipped(self, testedname):
1085 return any([(pat in testedname) for pat in self.skipped_patterns])
1086
1087 def getTestCaseNames(self, testCaseClass):
1088 """Return a sorted sequence of method names found within testCaseClass
1089 """
1090 is_skipped = self._this_is_skipped
1091 classname = testCaseClass.__name__
1092 if classname[0] == '_' or is_skipped(classname):
1093 return []
1094 testnames = super(NonStrictTestLoader, self).getTestCaseNames(
1095 testCaseClass)
1096 return [testname for testname in testnames if not is_skipped(testname)]
1097
1098
1099 # The 2 functions below are modified versions of the TestSuite.run method
1100 # that is provided with unittest2 for python 2.6, in unittest2/suite.py
1101 # It is used to monkeypatch the original implementation to support
1102 # extra runcondition and options arguments (see in testlib.py)
1103
1104 def _ts_run(self, result, runcondition=None, options=None):
1105 self._wrapped_run(result, runcondition=runcondition, options=options)
1106 self._tearDownPreviousClass(None, result)
1107 self._handleModuleTearDown(result)
1108 return result
1109
1110 def _ts_wrapped_run(self, result, debug=False, runcondition=None, options=None):
1111 for test in self:
1112 if result.shouldStop:
1113 break
1114 if unittest_suite._isnotsuite(test):
1115 self._tearDownPreviousClass(test, result)
1116 self._handleModuleFixture(test, result)
1117 self._handleClassSetUp(test, result)
1118 result._previousTestClass = test.__class__
1119 if (getattr(test.__class__, '_classSetupFailed', False) or
1120 getattr(result, '_moduleSetUpFailed', False)):
1121 continue
1122
1123 # --- modifications to deal with _wrapped_run ---
1124 # original code is:
1125 #
1126 # if not debug:
1127 # test(result)
1128 # else:
1129 # test.debug()
1130 if hasattr(test, '_wrapped_run'):
1131 try:
1132 test._wrapped_run(result, debug, runcondition=runcondition, opti ons=options)
1133 except TypeError:
1134 test._wrapped_run(result, debug)
1135 elif not debug:
1136 try:
1137 test(result, runcondition, options)
1138 except TypeError:
1139 test(result)
1140 else:
1141 test.debug()
1142 # --- end of modifications to deal with _wrapped_run ---
1143 return result
1144
1145 if sys.version_info >= (2, 7):
1146 # The function below implements a modified version of the
1147 # TestSuite.run method that is provided with python 2.7, in
1148 # unittest/suite.py
1149 def _ts_run(self, result, debug=False, runcondition=None, options=None):
1150 topLevel = False
1151 if getattr(result, '_testRunEntered', False) is False:
1152 result._testRunEntered = topLevel = True
1153
1154 self._wrapped_run(result, debug, runcondition, options)
1155
1156 if topLevel:
1157 self._tearDownPreviousClass(None, result)
1158 self._handleModuleTearDown(result)
1159 result._testRunEntered = False
1160 return result
1161
1162
1163 def enable_dbc(*args):
1164 """
1165 Without arguments, return True if contracts can be enabled and should be
1166 enabled (see option -d), return False otherwise.
1167
1168 With arguments, return False if contracts can't or shouldn't be enabled,
1169 otherwise weave ContractAspect with items passed as arguments.
1170 """
1171 if not ENABLE_DBC:
1172 return False
1173 try:
1174 from logilab.aspects.weaver import weaver
1175 from logilab.aspects.lib.contracts import ContractAspect
1176 except ImportError:
1177 sys.stderr.write(
1178 'Warning: logilab.aspects is not available. Contracts disabled.')
1179 return False
1180 for arg in args:
1181 weaver.weave_module(arg, ContractAspect)
1182 return True
1183
1184
1185 # monkeypatch unittest and doctest (ouch !)
1186 unittest._TextTestResult = testlib.SkipAwareTestResult
1187 unittest.TextTestRunner = SkipAwareTextTestRunner
1188 unittest.TestLoader = NonStrictTestLoader
1189 unittest.TestProgram = SkipAwareTestProgram
1190
1191 if sys.version_info >= (2, 4):
1192 doctest.DocTestCase.__bases__ = (testlib.TestCase,)
1193 # XXX check python2.6 compatibility
1194 #doctest.DocTestCase._cleanups = []
1195 #doctest.DocTestCase._out = []
1196 else:
1197 unittest.FunctionTestCase.__bases__ = (testlib.TestCase,)
1198 unittest.TestSuite.run = _ts_run
1199 unittest.TestSuite._wrapped_run = _ts_wrapped_run
OLDNEW
« no previous file with comments | « third_party/logilab/logilab/common/pyro_ext.py ('k') | third_party/logilab/logilab/common/registry.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698