Index: third_party/typ/typ/tests/main_test.py |
diff --git a/third_party/typ/typ/tests/main_test.py b/third_party/typ/typ/tests/main_test.py |
index 02993755ea62c52e45b972eb25586fde69fbe65b..f0d027d3836bd3bcf1397cac4424fa9ecf753c56 100644 |
--- a/third_party/typ/typ/tests/main_test.py |
+++ b/third_party/typ/typ/tests/main_test.py |
@@ -141,17 +141,16 @@ |
LOAD_TEST_PY = """ |
import unittest |
- |
-class BaseTest(unittest.TestCase): |
- pass |
- |
-def method_fail(self): |
- self.fail() |
- |
-def method_pass(self): |
- pass |
- |
def load_tests(_, _2, _3): |
+ class BaseTest(unittest.TestCase): |
+ pass |
+ |
+ def method_fail(self): |
+ self.fail() |
+ |
+ def method_pass(self): |
+ pass |
+ |
setattr(BaseTest, "test_fail", method_fail) |
setattr(BaseTest, "test_pass", method_pass) |
suite = unittest.TestSuite() |
@@ -163,6 +162,82 @@ |
LOAD_TEST_FILES = {'load_test.py': LOAD_TEST_PY} |
+MIXED_TEST_PY = """ |
+import unittest |
+class SampleTest(unittest.TestCase): |
+ |
+ def test_pass_0(self): |
+ self.assertEqual(1, 1) |
+ |
+ def test_pass_1(self): |
+ self.assertEqual(1, 1) |
+ |
+ def test_fail_0(self): |
+ self.assertEqual(1, 2) |
+ |
+ def test_fail_1(self): |
+ raise Exception() |
+ |
+ @unittest.skip('Skip for no reason') |
+ def test_skip_0(self): |
+ pass |
+""" |
+ |
+ |
+LOAD_MANY_TEST_PY = """ |
+import unittest |
+ |
+def generate_test_case(test_method_name, test_type): |
+ class GeneratedTest(unittest.TestCase): |
+ pass |
+ |
+ if test_type == 'pass': |
+ def test_method(self): |
+ self.assertEqual(1, 1) |
+ elif test_type == 'fail': |
+ def test_method(self): |
+ self.assertEqual(1, 2) |
+ elif test_type == 'skip': |
+ def test_method(self): |
+ self.skipTest('Skipped') |
+ else: |
+ raise Exception |
+ |
+ setattr(GeneratedTest, test_method_name, test_method) |
+ return GeneratedTest(test_method_name) |
+ |
+ |
+def load_tests(loader, standard_tests, pattern): |
+ del loader, standard_tests, pattern # unused |
+ |
+ suite = unittest.TestSuite() |
+ |
+ passed_test_names = [ |
+ str('test_pass_%s' % i) for i in range(2, 15)] |
+ |
+ failed_test_names = [ |
+ str('test_fail_%s' % i) for i in range(2, 10)] |
+ |
+ skipped_test_names = [ |
+ str('test_skip_%s' % i) for i in range(1, 10)] |
+ |
+ for test_method_name in passed_test_names: |
+ suite.addTest(generate_test_case(test_method_name, 'pass')) |
+ |
+ for test_method_name in failed_test_names: |
+ suite.addTest(generate_test_case(test_method_name, 'fail')) |
+ |
+ for test_method_name in skipped_test_names: |
+ suite.addTest(generate_test_case(test_method_name, 'skip')) |
+ |
+ return suite |
+""" |
+ |
+ |
+MANY_TEST_FILES = { |
+ 'mixed_test.py': MIXED_TEST_PY, # 2 passes, 2 fails, 1 skip |
+ 'load_many_test.py': LOAD_MANY_TEST_PY} # 13 passes, 13 fails, 9 skips |
+ |
path_to_main = os.path.join( |
os.path.dirname(os.path.dirname(os.path.abspath(__file__))), |
@@ -172,6 +247,34 @@ |
class TestCli(test_case.MainTestCase): |
prog = [sys.executable, path_to_main] |
files_to_ignore = ['*.pyc'] |
+ |
+ def get_test_results_stat(self, test_output): |
+ num_passes = test_output.count(' passed\n') |
+ num_fails = test_output.count(' failed unexpectedly:\n') |
+ num_skips = test_output.count(' was skipped\n') |
+ return num_passes, num_fails, num_skips |
+ |
+ def run_and_check_test_results(self, num_shards): |
+ total_passes, total_fails, total_skips = 0, 0, 0 |
+ min_num_tests_run = float('inf') |
+ max_num_tests_run = 0 |
+ for shard_index in range(num_shards): |
+ _, out, _, _ = self.check( |
+ ['--total-shards', str(num_shards), '--shard-index', |
+ str(shard_index)], files=MANY_TEST_FILES) |
+ passes, fails, skips = self.get_test_results_stat(out) |
+ total_passes += passes |
+ total_fails += fails |
+ total_skips += skips |
+ num_tests_run = passes + fails |
+ min_num_tests_run = min(min_num_tests_run, num_tests_run) |
+ max_num_tests_run = max(max_num_tests_run, num_tests_run) |
+ self.assertEqual(total_passes, 15) |
+ self.assertEqual(total_fails, 10) |
+ self.assertEqual(total_skips, 10) |
+ |
+ # Make sure that we don't distribute the tests too unevenly. |
+ self.assertLessEqual(max_num_tests_run - min_num_tests_run, 2) |
def test_bad_arg(self): |
self.check(['--bad-arg'], ret=2, out='', |
@@ -202,12 +305,12 @@ |
[1/1] pass_test.PassingTest.test_pass passed |
1 test run, 0 failures. |
- Name Stmts Miss Cover |
- ---------------------------------- |
- fail_test.py 4 4 0% |
- pass_test.py 4 0 100% |
- ---------------------------------- |
- TOTAL 8 4 50% |
+ Name Stmts Miss Cover |
+ ------------------------------- |
+ fail_test 4 4 0% |
+ pass_test 4 0 100% |
+ ------------------------------- |
+ TOTAL 8 4 50% |
""")) |
except ImportError: # pragma: no cover |
# We can never cover this line, since running coverage means |
@@ -271,32 +374,6 @@ |
'fail_then_pass_test']['FPTest']['test_count']['actual'], |
'FAIL PASS') |
- def test_fail_then_skip(self): |
- files = {'fail_then_skip_test.py': d("""\ |
- import unittest |
- count = 0 |
- class FPTest(unittest.TestCase): |
- def test_count(self): |
- global count |
- count += 1 |
- if count == 1: |
- self.fail() |
- elif count == 2: |
- self.skipTest('') |
- """)} |
- _, out, _, files = self.check(['--retry-limit', '3', |
- '--write-full-results-to', |
- 'full_results.json'], |
- files=files, ret=0, err='') |
- self.assertIn('Retrying failed tests (attempt #1 of 3)', out) |
- self.assertNotIn('Retrying failed tests (attempt #2 of 3)', out) |
- self.assertIn('1 test run, 0 failures.\n', out) |
- results = json.loads(files['full_results.json']) |
- self.assertEqual( |
- results['tests'][ |
- 'fail_then_skip_test']['FPTest']['test_count']['actual'], |
- 'FAIL SKIP') |
- |
def test_failures_are_not_elided(self): |
_, out, _, _ = self.check(['--terminal-width=20'], |
files=FAIL_TEST_FILES, ret=1, err='') |
@@ -368,7 +445,8 @@ |
pass |
""")} |
self.check(['-l', 'foo.py'], files=files, ret=1, err='', |
- rout=('Failed to load "foo.py":')) |
+ rout=('Failed to load "foo.py": No module named ' |
+ '\'?package_that_does_not_exist\'?\n')) |
def test_import_failure_no_tests(self): |
files = {'foo.py': 'import unittest'} |
@@ -385,7 +463,9 @@ |
""")} |
_, out, _, _ = self.check([], files=files, ret=1, err='') |
self.assertIn('Failed to import test module: syn_test', out) |
- self.assertIn('SyntaxError: invalid syntax', out) |
+ self.assertIn((' syntax error\n' |
+ ' ^\n' |
+ 'SyntaxError: invalid syntax\n'), out) |
def test_interrupt(self): |
files = {'interrupt_test.py': d("""\ |
@@ -409,8 +489,8 @@ |
def load_tests(_, _2, _3): |
raise ValueError('this should fail') |
""")} |
- _, out, _, _ = self.check([], files=files, ret=1, err='') |
- self.assertIn('this should fail', out) |
+ self.check([], files=files, ret=1, err='', |
+ out=('foo_test.load_tests() failed: this should fail\n')) |
def test_load_tests_single_worker(self): |
files = LOAD_TEST_FILES |
@@ -552,46 +632,17 @@ |
self.assertIn('sf_test.SkipSetup.test_notrun was skipped', out) |
def test_sharding(self): |
- |
- def run(shard_index, total_shards, tests): |
- files = {'shard_test.py': textwrap.dedent( |
- """\ |
- import unittest |
- class ShardTest(unittest.TestCase): |
- def test_01(self): |
- pass |
- |
- def test_02(self): |
- pass |
- |
- def test_03(self): |
- pass |
- |
- def test_04(self): |
- pass |
- |
- def test_05(self): |
- pass |
- """)} |
- _, out, _, _ = self.check( |
- ['--shard-index', str(shard_index), |
- '--total-shards', str(total_shards), |
- '--jobs', '1'], |
- files=files) |
- |
- exp_out = '' |
- total_tests = len(tests) |
- for i, test in enumerate(tests): |
- exp_out += ('[%d/%d] shard_test.ShardTest.test_%s passed\n' % |
- (i + 1, total_tests, test)) |
- exp_out += '%d test%s run, 0 failures.\n' % ( |
- total_tests, "" if total_tests == 1 else "s") |
- self.assertEqual(out, exp_out) |
- |
- run(0, 1, ['01', '02', '03', '04', '05']) |
- run(0, 2, ['01', '03', '05']) |
- run(1, 2, ['02', '04']) |
- run(0, 6, ['01']) |
+ # Test no sharding. |
+ self.run_and_check_test_results(1) |
+ |
+ # A typical with 4 shards. |
+ self.run_and_check_test_results(4) |
+ |
+ # Case which number of shards is a prime. |
+ self.run_and_check_test_results(7) |
+ |
+ # Case which number of shards is more than number of tests. |
+ self.run_and_check_test_results(50) |
def test_subdir(self): |
files = { |
@@ -631,7 +682,7 @@ |
self.assertEqual(len(posts), 1) |
payload = posts[0][2].decode('utf8') |
- self.assertIn('"test_pass": {"actual": "PASS", "expected": "PASS"}', |
+ self.assertIn('"test_pass": {"expected": "PASS", "actual": "PASS"}', |
payload) |
self.assertTrue(payload.endswith('--\r\n')) |
self.assertNotEqual(server.log.getvalue(), '') |