OLD | NEW |
---|---|
(Empty) | |
1 #!/usr/bin/python | |
2 # Copyright (c) 2011 The Chromium Authors. All rights reserved. | |
3 # Use of this source code is governed by a BSD-style license that can be | |
4 # found in the LICENSE file. | |
5 | |
6 """Implements a simple "negative compile" test for C++ on linux. | |
7 | |
8 Sometimes a C++ API needs to ensure that various usages cannot compile. To | |
9 enable unittesting of these assertions, we use this python script to | |
10 invoke gcc on a source file and assert that compilation fails. | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
Will there be a sites/wiki page about this feature
awong
2011/09/08 02:46:16
Created a skeleton.
| |
11 """ | |
12 | |
13 import ast | |
14 import locale | |
15 import os | |
16 import re | |
17 import sys | |
18 import shlex | |
19 import subprocess | |
20 import time | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
alphabetize this list
awong
2011/09/08 02:46:16
Done.
| |
21 | |
22 | |
23 class ArgumentValidationException(Exception): | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
Is this a NoCompileSyntaxError?
Maybe a docstring
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
Don't all global names that aren't meant to be pee
awong
2011/09/08 02:46:16
It's a standalone script so making everything _ pr
awong
2011/09/08 02:46:16
Done.
Ami GONE FROM CHROMIUM
2011/09/08 18:30:20
I'm not sure what you're pointing out there; the o
awong
2011/09/09 01:01:43
The naming box at the bottom shows that exceptions
| |
24 pass | |
25 | |
26 | |
27 # Matches lines that start with #if and have the substring TEST in the | |
28 # conditional. Also extracts the comment. This allows us to search for | |
29 # lines like the following: | |
30 # | |
31 # #ifdef TEST_NAME_OF_TEST // [r'expected output'] | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
I'm bothered by TEST appearing twice in each of th
awong
2011/09/08 02:46:16
I like making it more specific with NCTEST. Howev
| |
32 # #if defined(TEST_NAME_OF_TEST) // [r'expected output'] | |
33 # #if TEST_NAME_OF_TEST // [r'expected output'] | |
34 # #elif TEST_NAME_OF_TEST // [r'expected output'] | |
35 # | |
36 # inside the unittest file. | |
37 TEST_CONFIG_RE = re.compile(r'^#(?:el)?if.*\s+(\S*TEST\S*)\s*(//.*)?') | |
38 | |
39 | |
40 # Used the removed the defined() preprocesor predicate if the test uses | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
english the comment up.
awong
2011/09/08 02:46:16
Tried to rewrite.
| |
41 # the form: | |
42 # | |
43 # #if defined(TEST_NAME_OF_TEST) | |
44 # | |
45 # Should be used to post-process the results above. | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
s/above/found by TEST_CONFIG_RE/
awong
2011/09/08 02:46:16
Done.
| |
46 STRIP_DEFINED_RE = re.compile(r'defined\((.*)\)') | |
47 | |
48 | |
49 # Used to grab the expectation from comment at the end of an #ifdef. See | |
50 # TEST_CONFIG_RE's comment for examples of what the format should look like. | |
51 # | |
52 # The extracted substring should be a python array of regular expressions. | |
53 EXTRACT_EXPECTATION_RE = re.compile(r'//\s*(\[.*\])') | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
Any reason not to make this part of TEST_CONFIG_RE
awong
2011/09/08 02:46:16
Seemed like too much logic in one regex.
Ami GONE FROM CHROMIUM
2011/09/08 18:30:20
Fine to leave as-is; I'm just silently judging you
| |
54 | |
55 | |
56 # The header for the result file so that it can be compiled. | |
57 RESULT_FILE_HEADER = """ | |
58 // This file is generated by the no compile test from: | |
59 // %s | |
60 | |
61 #include "testing/gtest/include/gtest/gtest.h" | |
62 | |
63 """ | |
64 | |
65 # Timeout constants. | |
66 TEST_TERMINATE_TIMEOUT_SEC = 5 | |
67 TEST_KILL_TIMEOUT_SEC = TEST_TERMINATE_TIMEOUT_SEC + 1 | |
68 BUSY_LOOP_MAX_TIME_SEC = TEST_KILL_TIMEOUT_SEC * 2 | |
69 | |
70 | |
71 def ValidateInput(parallelism, sourcefile_path, cflags, resultfile_path): | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
docstring here and below please.
awong
2011/09/08 02:46:16
Done.
| |
72 if parallelism < 1: | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
If it was me I'd replace lines 72-86 with:
assert
awong
2011/09/08 02:46:16
Good point. Done.
| |
73 raise ArgumentValidationException("parallelism must be > 1. Got %d" % | |
74 parallelism) | |
75 | |
76 if type(sourcefile_path) is not str: | |
77 raise ArgumentValidationException("sourcefile must be a string. Got %s" % | |
78 repr(sourcefile_path)) | |
79 | |
80 if type(cflags) is not str: | |
81 raise ArgumentValidationException("cflags must be a string. Got %s" % | |
82 repr(cflags)) | |
83 | |
84 if type(resultfile_path) is not str: | |
85 raise ArgumentValidationException("resultfile must be a string. Got %s" % | |
86 repr(resultfile_path)) | |
87 | |
88 | |
89 def ParseExpecation(expectation_string): | |
90 """Extracts expectation definition from the trailing comment on the ifdef. | |
91 | |
92 See the comment on TEST_CONFIG_RE for examples of the format we are parsing. | |
93 | |
94 Args: | |
95 expectation_string: A string like '// [r'some_regex'] | |
96 | |
97 Returns: | |
98 A list of compiled regular expressions indicating all possible valid | |
99 compiler outputs. | |
100 """ | |
101 if expectation_string is None: | |
102 raise ArgumentValidationException('Test must specify expected output.') | |
103 | |
104 match = EXTRACT_EXPECTATION_RE.match(expectation_string) | |
105 if match is None: | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
do you use "is None" over "not" intentionally?
(no
awong
2011/09/08 02:46:16
Done.
| |
106 raise ArgumentValidationException( | |
107 'Cannot parse expectations in %s' % expectation_string) | |
108 | |
109 raw_expectation = ast.literal_eval(match.group(1)) | |
110 if type(raw_expectation) is not list: | |
111 raise ArgumentValidationException( | |
112 'Expectations must be a list of regexps. Instead, got %s' % | |
113 repr(raw_expectation)) | |
114 | |
115 expectation = [] | |
116 for regex_str in raw_expectation: | |
117 if type(regex_str) is not str: | |
118 raise ArgumentValidationException( | |
119 '"%s" is not a regexp in %s' % (regex_str, expectation_string)) | |
120 re.compile(regex_str) | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
unreachable
awong
2011/09/08 02:46:16
Done.
| |
121 expectation.append(re.compile(regex_str)) | |
122 return expectation | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
doco semantics of empty list.
awong
2011/09/08 02:46:16
Done.
| |
123 | |
124 | |
125 def ExtractTestConfigs(sourcefile_path): | |
126 """Parses the soruce file for test configurations. | |
127 | |
128 Each no-compile test in the file is separated by an ifdef macro. We scan | |
129 the source file with the TEST_CONFIG_RE to find all ifdefs that look like | |
130 they demark one no-compile test and try to extract the test configuration | |
131 from that. | |
132 | |
133 Args: | |
134 sourcefile_path: A string with path to the source file. | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
"with"?
awong
2011/09/08 02:46:16
"containing"?
Ami GONE FROM CHROMIUM
2011/09/08 18:30:20
"The path to the source file"?
(append "in string
awong
2011/09/09 01:01:43
Done.
| |
135 | |
136 Returns: | |
137 A list of test configurations. Each test configuration is a dictionary of | |
138 the form: | |
139 { name: 'TEST_NAME' | |
140 suite_name: 'SOURCE_PATH_NAME' | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
SourcePathName?
(what is this for? gtest mockery
awong
2011/09/08 02:46:16
Done.
| |
141 expectations: [re.Pattern, re.Pattern] } | |
142 | |
143 The compiled regexps in expectations define the valid outputs of the | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
s/expectations/|expectations|/
awong
2011/09/08 02:46:16
Done.
| |
144 compiler. If any one of the listed patterns matches either the stderr or | |
145 stdout from the compilation, and the compilation failed, then the test is | |
146 considered to have succeeded. If the list is empty, than we ignore the | |
147 compiler output and just check for failed compilation. If the expectations | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
ditto (and drop "the")
awong
2011/09/08 02:46:16
Done.
| |
148 is actually None, then this specifies a compiler sanity check test, which | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
this is the first time in the file you mention "sa
Ami GONE FROM CHROMIUM
2011/09/08 18:30:20
Missed this or didn't like it?
awong
2011/09/09 01:01:43
Missed, but I'm not sure I like. :)
I'd rather on
| |
149 should a SUCCESSFUL compilation. | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
english
awong
2011/09/08 02:46:16
Done.
| |
150 """ | |
151 sourcefile = open(sourcefile_path, 'r') | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
or die?
awong
2011/09/08 02:46:16
I thought python just raises on errors...
| |
152 | |
153 # Convert filename from underscores to CamelCase. | |
154 words = os.path.splitext('bind_unittest.nc')[0].split('_') | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
'bind_unittest.nc' whaaa?
awong
2011/09/08 02:46:16
oops.
| |
155 words = [w.capitalize() for w in words] | |
156 suite_name = 'NoCompile' + ''.join(words) | |
157 | |
158 # Start with at least the compiler sanity test. | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
IMO this needs a justifying comment (IIUC you do t
awong
2011/09/08 02:46:16
Justification added.
| |
159 test_configs = [{'name': 'TEST_SANITY', | |
160 'suite_name': suite_name, | |
161 'expectations': None}] | |
162 | |
163 for line in sourcefile: | |
164 match_result = TEST_CONFIG_RE.match(line) | |
165 if match_result: | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
if you reverse the test you get to early-continue
awong
2011/09/08 02:46:16
Good call.
| |
166 groups = match_result.groups() | |
167 | |
168 # Grab the name and remove the defined() predicate if there is one. | |
169 name = groups[0] | |
170 strip_result = STRIP_DEFINED_RE.match(name) | |
171 if strip_result: | |
172 name = strip_result.group(1) | |
173 | |
174 # Read expectations if there are any. | |
175 expectations = ParseExpecation(groups[1]) | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
temporary isn't used anywhere but the dict below s
awong
2011/09/08 02:46:16
inlined :)
| |
176 | |
177 test_configs.append({'name': name, | |
178 'suite_name': suite_name, | |
179 'expectations': expectations}) | |
180 sourcefile.close() | |
181 return test_configs | |
182 | |
183 | |
184 def StartTest(sourcefile_path, cflags, config): | |
185 """Perform one negative compile test. | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
s/Perform/Start/
awong
2011/09/08 02:46:16
Done.
| |
186 | |
187 Args: | |
188 sourcefile_path: A string with path to the source file. | |
189 cflags: A string with all the CFLAGS to give to gcc. | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
doco passing through shlex
awong
2011/09/08 02:46:16
Done.
| |
190 config: A dictionary describing the test. See ExtractTestConfigs | |
191 for a description of the config format. | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
doco return value
awong
2011/09/08 02:46:16
Done.
| |
192 """ | |
193 # TODO(ajwong): Get the compiler from gyp. | |
194 cmdline = ['g++'] | |
195 cmdline.extend(shlex.split(cflags)) | |
196 name = config['name'] | |
197 expectations = config['expectations'] | |
198 if expectations is not None: | |
199 cmdline.append('-D%s' % name) | |
200 cmdline.extend(['-o', '/dev/null', '-c', '-x', 'c++', sourcefile_path]) | |
201 | |
202 process = subprocess.Popen(cmdline, stdout=subprocess.PIPE, | |
203 stderr=subprocess.PIPE) | |
204 return {'proc': process, | |
205 'cmdline': ' '.join(cmdline), | |
206 'name': name, | |
207 'suite_name': config['suite_name'], | |
208 'terminate_timeout': time.time() + TEST_TERMINATE_TIMEOUT_SEC, | |
209 'kill_timeout': time.time() + TEST_KILL_TIMEOUT_SEC, | |
210 'aborted': False, | |
211 'expectations': expectations} | |
212 | |
213 | |
214 def PassTest(resultfile, test): | |
215 """Interprets and logs the result of a test started by StartTest() | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:19
There isn't much interpreting going on (here and i
awong
2011/09/08 02:46:16
Comment fixed.
| |
216 | |
217 Args: | |
218 resultfile: File object for .cc file that results are written to. | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:19
doco |test|
awong
2011/09/08 02:46:16
Done.
| |
219 """ | |
220 resultfile.write('TEST(%s, %s) { }\n' % (test['suite_name'], test['name'])) | |
221 resultfile.write('\n') | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:19
throw into previous write()?
awong
2011/09/08 02:46:16
Done.
| |
222 | |
223 | |
224 def FailTest(resultfile, test, error, stdout=None, stderr=None): | |
225 """Interprets and logs the result of a test started by StartTest() | |
226 | |
227 Args: | |
228 resultfile: File object for .cc file that results are written to. | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:19
doco other args
awong
2011/09/08 02:46:16
Done.
| |
229 """ | |
230 resultfile.write('#error %s Failed: %s\n' % (test['name'], error)) | |
231 resultfile.write('#error compile line: %s\n' % test['cmdline']) | |
232 if stdout and len(stdout) != 0: | |
233 resultfile.write('#error %s stdout:\n' % test['name']) | |
234 for line in stdout.split('\n'): | |
235 resultfile.write('#error %s\n' % line) | |
236 | |
237 if stderr and len(stderr) != 0: | |
238 resultfile.write('#error %s stderr:\n' % test['name']) | |
239 for line in stderr.split('\n'): | |
240 resultfile.write('#error %s\n' % line) | |
241 resultfile.write('\n') | |
242 | |
243 | |
244 def ProcessTestResult(resultfile, test): | |
245 """Interprets and logs the result of a test started by StartTest() | |
246 | |
247 Args: | |
248 resultfile: File object for .cc file that results are written to. | |
249 test: The dictionary from StartTest() to process. | |
250 """ | |
251 # Snap a copy of stdout and stderr into the test dictionary immediately | |
252 # cause we can only call this once on the Popen object, and lots of stuff | |
253 # below will want access to it. | |
254 proc = test['proc'] | |
255 (stdout, stderr) = proc.communicate() | |
256 | |
257 if test['aborted']: | |
258 FailTest(resultfile, test, "Compile timed out.") | |
259 return | |
260 | |
261 if test['expectations'] is None: | |
262 # This signals a compiler sanity check test. Fail iff compilation failed. | |
263 if proc.poll() != 0: | |
264 FailTest(resultfile, test, 'Sanity compile failed. Is compiler borked?', | |
265 stdout, stderr) | |
266 return | |
267 elif proc.poll() == 0: | |
268 # Handle failure due to successful compile. | |
269 FailTest(resultfile, test, | |
270 'Unexpected successful compilation.', | |
271 stdout, stderr) | |
272 return | |
273 else: | |
274 # Check the output has the right expectations. If there are no | |
275 # expectations, then we just consider the output "matched" by default. | |
276 matched = len(test['expectations']) == 0 | |
277 for regexp in test['expectations']: | |
278 if (regexp.search(stdout) is not None or | |
279 regexp.search(stderr) is not None): | |
280 matched = True | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:19
PassTest & return?
awong
2011/09/08 02:46:16
Done.
| |
281 break | |
282 if matched is False: | |
283 expectation_str = ', '.join( | |
284 ["r'%s'" % regexp.pattern for regexp in test['expectations']]) | |
285 FailTest(resultfile, test, | |
286 'Expecatations [%s] did not match output.' % expectation_str, | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:19
typo
awong
2011/09/08 02:46:16
Done.
| |
287 stdout, stderr) | |
288 return | |
289 | |
290 # No failures. Success! | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:19
If you take my suggestion at l.280 above, this can
awong
2011/09/08 02:46:16
Done.
| |
291 PassTest(resultfile, test) | |
292 | |
293 | |
294 def DrainExecutingTasks(resultfile, executing_tests): | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:19
DrainAtLeastOneExecutingTask?
I don't like either
awong
2011/09/08 02:46:16
I don't have any good names either. This function
| |
295 """Blocks until one task is removed from executing_tests. | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:19
s/one/at least one/
awong
2011/09/08 02:46:16
Done.
| |
296 | |
297 This function removes completed tests from executing_tests, logging failures | |
298 and output. If no tests can be removed, it will enter a poll-loop until one | |
299 test finishes or times out. On a timeout, this function is responsible for | |
300 terminating the process in the appropriate fashion. | |
301 | |
302 Args: | |
303 resultfile: File object for .cc file that results are written to. | |
304 executing_tests: Array of currently executing tests from StartTest(). | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:19
doco return value
awong
2011/09/08 02:46:16
Done.
| |
305 """ | |
306 finished_tests = [] | |
307 busy_loop_watchdog = time.time() + BUSY_LOOP_MAX_TIME_SEC | |
308 while len(finished_tests) == 0: | |
309 if time.time() > busy_loop_watchdog: | |
310 # We've looping too long. Assume this whole thing is hung. | |
311 raise WatchdogException('Busy looping for too long. Aborting negative ' | |
312 'compile test.') | |
313 | |
314 for test in executing_tests: | |
315 proc = test['proc'] | |
316 if proc.poll() is not None: | |
317 finished_tests.append(test) | |
318 elif test['terminate_timeout'] < time.time(): | |
319 proc.terminate() | |
320 test['aborted'] = True | |
321 elif test['kill_timeout'] < time.time(): | |
322 proc.kill() | |
323 test['aborted'] = True | |
324 | |
325 for test in finished_tests: | |
326 ProcessTestResult(resultfile, test) | |
327 | |
328 return filter(lambda x : x not in finished_tests, executing_tests) | |
329 | |
330 | |
331 def main(): | |
332 if len(sys.argv) < 3: | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:19
s/</!=/
awong
2011/09/08 02:46:16
Done.
| |
333 print ('Usage: %s <parallelism> <sourcefile> <cflags> <resultfile>' % | |
334 sys.argv[0]) | |
335 sys.exit(1) | |
336 pass | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:19
why the pass?
awong
2011/09/08 02:46:16
removed.
| |
337 | |
338 # Force us to the "C" locale so that compiler doesn't localize its output. | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:19
s/that/the/
Ami GONE FROM CHROMIUM
2011/08/31 21:50:19
This is strange. Is this something that gyp does
awong
2011/09/08 02:46:16
Done.
awong
2011/09/08 02:46:16
No...it's just gcc localizing its output. If you'r
| |
339 os.environ['LC_ALL'] = 'C' | |
340 | |
341 parallelism = int(sys.argv[1]) | |
342 sourcefile_path = sys.argv[2] | |
343 cflags = sys.argv[3] | |
344 resultfile_path = sys.argv[4] | |
345 | |
346 ValidateInput(parallelism, sourcefile_path, cflags, resultfile_path) | |
347 | |
348 print 'parallelism: %s' % parallelism | |
349 print 'sourcefile: %s' % sourcefile_path | |
350 print 'cflags: %s' % cflags | |
351 print 'resultfile: %s' % resultfile_path | |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:19
Drop these?
awong
2011/09/08 02:46:16
dropped.
| |
352 | |
353 test_configs = ExtractTestConfigs(sourcefile_path) | |
354 | |
355 resultfile = open(resultfile_path, 'w') | |
356 resultfile.write(RESULT_FILE_HEADER % sourcefile_path) | |
357 | |
358 # Run the no-compile tests, but ensure we do not run more than |parallelism| | |
359 # tests at once. | |
360 executing_tests = [] | |
361 for config in test_configs: | |
362 # DrainExecutingTasks blocks until at least one test finishes. Thus, this | |
363 # acts as a semaphore. We cannot use threads + a real semaphore because | |
364 # subprocess forks, which can cause all sorts of hilarity with threads. | |
365 if len(executing_tests) > parallelism: | |
366 executing_tests = DrainExecutingTasks(resultfile, executing_tests) | |
367 | |
368 if config['name'].startswith('DISABLED_'): | |
369 PassTest(resultfile, config) | |
370 else: | |
371 executing_tests.append(StartTest(sourcefile_path, cflags, config)) | |
372 | |
373 # If there are no more test to start, we still need to drain the running | |
374 # ones. | |
375 while len(executing_tests) > 0: | |
376 executing_tests = DrainExecutingTasks(resultfile, executing_tests) | |
377 | |
378 resultfile.close() | |
379 | |
380 | |
381 if __name__ == '__main__': | |
382 main() | |
OLD | NEW |