OLD | NEW |
| (Empty) |
1 #!/usr/bin/env python | |
2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
3 # Use of this source code is governed by a BSD-style license that can be | |
4 # found in the LICENSE file. | |
5 | |
6 import json | |
7 import logging | |
8 import os | |
9 import re | |
10 import subprocess | |
11 import sys | |
12 import unittest | |
13 | |
14 ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) | |
15 sys.path.insert(0, ROOT_DIR) | |
16 sys.path.append(os.path.join(ROOT_DIR, 'tests', 'gtest_fake')) | |
17 | |
18 import gtest_fake_base | |
19 | |
20 | |
21 def RunTest(test_file, extra_flags): | |
22 target = os.path.join(ROOT_DIR, 'tests', 'gtest_fake', test_file) | |
23 cmd = [ | |
24 sys.executable, | |
25 os.path.join(ROOT_DIR, 'run_test_cases.py'), | |
26 ] + extra_flags | |
27 | |
28 cmd.append(target) | |
29 logging.debug(' '.join(cmd)) | |
30 proc = subprocess.Popen( | |
31 cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) | |
32 # pylint is confused. | |
33 out, err = proc.communicate() or ('', '') | |
34 | |
35 return (out, err, proc.returncode) | |
36 | |
37 | |
38 class TraceTestCases(unittest.TestCase): | |
39 def setUp(self): | |
40 # Make sure there's no environment variable that could do side effects. | |
41 os.environ.pop('GTEST_SHARD_INDEX', '') | |
42 os.environ.pop('GTEST_TOTAL_SHARDS', '') | |
43 | |
44 self.filename = 'test.results' | |
45 | |
46 def tearDown(self): | |
47 if os.path.exists(self.filename): | |
48 os.remove(self.filename) | |
49 | |
50 def _check_results(self, expected_out_re, out, err): | |
51 if sys.platform == 'win32': | |
52 out = out.replace('\r\n', '\n') | |
53 lines = out.splitlines() | |
54 | |
55 for index in range(len(expected_out_re)): | |
56 line = lines.pop(0) | |
57 self.assertTrue( | |
58 re.match('^%s$' % expected_out_re[index], line), | |
59 (index, expected_out_re[index], repr(line))) | |
60 self.assertEqual([], lines) | |
61 self.assertEqual('', err) | |
62 | |
63 def _check_results_file(self, expected_file_contents_entries): | |
64 self.assertTrue(os.path.exists(self.filename)) | |
65 | |
66 with open(self.filename) as f: | |
67 file_contents = json.load(f) | |
68 | |
69 self.assertEqual(len(expected_file_contents_entries), len(file_contents)) | |
70 for (entry_name, entry_count) in expected_file_contents_entries: | |
71 self.assertTrue(entry_name in file_contents) | |
72 self.assertEqual(entry_count, len(file_contents[entry_name])) | |
73 | |
74 def test_simple_pass(self): | |
75 out, err, return_code = RunTest( | |
76 'gtest_fake_pass.py', ['--result', self.filename]) | |
77 | |
78 self.assertEqual(0, return_code) | |
79 | |
80 expected_out_re = [ | |
81 r'\[\d/\d\] \d\.\d\ds .+', | |
82 r'\[\d/\d\] \d\.\d\ds .+', | |
83 r'\[\d/\d\] \d\.\d\ds .+', | |
84 re.escape('Summary:'), | |
85 re.escape('Success: 3 100.00%'), | |
86 re.escape('Flaky: 0 0.00%'), | |
87 re.escape('Fail: 0 0.00%'), | |
88 r'\d+\.\ds Done running 3 tests with 3 executions. \d+\.\d test/s', | |
89 ] | |
90 self._check_results(expected_out_re, out, err) | |
91 | |
92 expected_result_file_entries = [ | |
93 ('Foo.Bar1', 1), | |
94 ('Foo.Bar2', 1), | |
95 ('Foo.Bar3', 1) | |
96 ] | |
97 self._check_results_file(expected_result_file_entries) | |
98 | |
99 def test_simple_fail(self): | |
100 out, err, return_code = RunTest( | |
101 'gtest_fake_fail.py', ['--result', self.filename]) | |
102 | |
103 self.assertEqual(1, return_code) | |
104 | |
105 expected_out_re = [ | |
106 r'\[\d/\d\] \d\.\d\ds .+', | |
107 r'\[\d/\d\] \d\.\d\ds .+', | |
108 r'\[\d/\d\] \d\.\d\ds .+', | |
109 r'\[\d/\d\] \d\.\d\ds .+', | |
110 r'\[\d/\d\] \d\.\d\ds .+', | |
111 r'\[\d/\d\] \d\.\d\ds .+', | |
112 re.escape('Note: Google Test filter = Baz.Fail'), | |
113 r'', | |
114 ] + [ | |
115 re.escape(l) for l in | |
116 gtest_fake_base.get_test_output('Baz.Fail').splitlines() | |
117 ] + [ | |
118 '', | |
119 ] + [ | |
120 re.escape(l) for l in gtest_fake_base.get_footer(1, 1).splitlines() | |
121 ] + [ | |
122 '', | |
123 re.escape('Summary:'), | |
124 re.escape('Baz.Fail failed'), | |
125 re.escape('Success: 3 75.00%'), | |
126 re.escape('Flaky: 0 0.00%'), | |
127 re.escape('Fail: 1 25.00%'), | |
128 r'\d+\.\ds Done running 4 tests with 6 executions. \d+\.\d test/s', | |
129 ] | |
130 self._check_results(expected_out_re, out, err) | |
131 | |
132 expected_result_file_entries = [ | |
133 ('Foo.Bar1', 1), | |
134 ('Foo.Bar2', 1), | |
135 ('Foo.Bar3', 1), | |
136 ('Baz.Fail', 3) | |
137 ] | |
138 self._check_results_file(expected_result_file_entries) | |
139 | |
140 def test_simple_gtest_list_error(self): | |
141 out, err, return_code = RunTest( | |
142 'gtest_fake_error.py', ['--no-dump']) | |
143 | |
144 expected_out_re = [ | |
145 'Failed to run %s %s --gtest_list_tests' % ( | |
146 sys.executable, | |
147 os.path.join(ROOT_DIR, 'tests', 'gtest_fake', 'gtest_fake_error.py')), | |
148 'stdout:', | |
149 '', | |
150 'stderr:', | |
151 'Unable to list tests' | |
152 ] | |
153 | |
154 self.assertEqual(1, return_code) | |
155 self._check_results(expected_out_re, out, err) | |
156 | |
157 def test_gtest_list_tests(self): | |
158 out, err, return_code = RunTest( | |
159 'gtest_fake_fail.py', ['--gtest_list_tests']) | |
160 | |
161 expected_out = ( | |
162 'Foo.\n Bar1\n Bar2\n Bar3\nBaz.\n Fail\n' | |
163 ' YOU HAVE 2 tests with ignored failures (FAILS prefix)\n\n') | |
164 self.assertEqual(0, return_code) | |
165 self.assertEqual(expected_out, out) | |
166 self.assertEqual('', err) | |
167 | |
168 | |
169 if __name__ == '__main__': | |
170 VERBOSE = '-v' in sys.argv | |
171 logging.basicConfig(level=logging.DEBUG if VERBOSE else logging.ERROR) | |
172 unittest.main() | |
OLD | NEW |