Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(3)

Side by Side Diff: tools/nocompile_driver.py

Issue 7458012: Create a "no compile" drivers script in python to unittest compile time asserts. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Address Ami's comments and don't busyloop. Created 9 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« build/nocompile.gypi ('K') | « build/nocompile.gypi ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 #!/usr/bin/python
2 # Copyright (c) 2011 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
5
6 """Implements a simple "negative compile" test for C++ on linux.
7
8 Sometimes a C++ API needs to ensure that various usages cannot compile. To
9 enable unittesting of these assertions, we use this python script to
10 invoke gcc on a source file and assert that compilation fails.
11
12 For more info, see:
13 https://sites.google.com/a/chromium.org/dev/developers/testing/no-compile-tests
Ami GONE FROM CHROMIUM 2011/09/08 18:30:20 Not found for me; also would be nice to use the de
awong 2011/09/09 01:01:43 Strange...I'm able to see it. I've updated with t
Ami GONE FROM CHROMIUM 2011/09/09 17:17:12 WFM now.
14 """
15
16 import ast
17 import locale
18 import os
19 import re
20 import select
21 import shlex
22 import subprocess
23 import sys
24 import time
25
26
27 class ConfigurationSyntaxError(Exception):
28 """Raised if the test configuration or specification cannot be parsed."""
29 pass
30
31
32 # Matches lines that start with #if and have the substring TEST in the
33 # conditional. Also extracts the comment. This allows us to search for
34 # lines like the following:
35 #
36 # #ifdef NCTEST_NAME_OF_TEST // [r'expected output']
37 # #if defined(NCTEST_NAME_OF_TEST) // [r'expected output']
38 # #if NCTEST_NAME_OF_TEST // [r'expected output']
39 # #elif NCTEST_NAME_OF_TEST // [r'expected output']
40 # #elif DISABLED_NCTEST_NAME_OF_TEST // [r'expected output']
41 #
42 # inside the unittest file.
43 NCTEST_CONFIG_RE = re.compile(r'^#(?:el)?if.*\s+(\S*NCTEST\S*)\s*(//.*)?')
44
45
46 # Matches and removes the defined() preprocesor predicate. This is useful
47 # for test cases that use the preprocessor if-statement form:
48 #
49 # #if defined(NCTEST_NAME_OF_TEST)
50 #
51 # Should be used to post-process the results found by NCTEST_CONFIG_RE.
52 STRIP_DEFINED_RE = re.compile(r'defined\((.*)\)')
53
54
55 # Used to grab the expectation from comment at the end of an #ifdef. See
56 # NCTEST_CONFIG_RE's comment for examples of what the format should look like.
57 #
58 # The extracted substring should be a python array of regular expressions.
59 EXTRACT_EXPECTATION_RE = re.compile(r'//\s*(\[.*\])')
60
61
62 # The header for the result file so that it can be compiled.
63 RESULT_FILE_HEADER = """
64 // This file is generated by the no compile test from:
65 // %s
66
67 #include "base/logging.h"
68 #include "testing/gtest/include/gtest/gtest.h"
69
70 """
71
72
73 # The GUnit test function to output on a successful test completion.
74 SUCCESS_GUNIT_TEMPLATE = """
75 TEST(%s, %s) {
76 LOG(INFO) << "Took %f secs. Started at %f, ended at %f";
77 }
78 """
79
80 # The GUnit test function to output for a disabled test.
81 DISABLED_GUNIT_TEMPLATE = """
82 TEST(%s, %s) { }
83 """
84
85
86 # Timeout constants.
87 NCTEST_TERMINATE_TIMEOUT_SEC = 60
Ami GONE FROM CHROMIUM 2011/09/08 18:30:20 Channeling phajdan.jr, is there a standard timeout
Ami GONE FROM CHROMIUM 2011/09/08 18:30:20 IWBN for that 60 to be significantly lower. I see
awong 2011/09/09 01:01:43 I have no clue what timeout would make sense.
awong 2011/09/09 01:01:43 Yes...on my z600, it's pretty reliably < 2 seconds
88 NCTEST_KILL_TIMEOUT_SEC = NCTEST_TERMINATE_TIMEOUT_SEC + 2
89 BUSY_LOOP_MAX_TIME_SEC = NCTEST_KILL_TIMEOUT_SEC * 2
90
91
92 def ValidateInput(parallelism, sourcefile_path, cflags, resultfile_path):
93 """Make sure the arguments being passed in are sane."""
94 assert parallelism >= 1
95 assert type(sourcefile_path) is str
96 assert type(cflags) is str
97 assert type(resultfile_path) is str
98
99
100 def ParseExpectation(expectation_string):
101 """Extracts expectation definition from the trailing comment on the ifdef.
102
103 See the comment on NCTEST_CONFIG_RE for examples of the format we are parsing.
104
105 Args:
106 expectation_string: A string like '// [r'some_regex']
107
108 Returns:
109 A list of compiled regular expressions indicating all possible valid
110 compiler outputs. If the list is empty, all outputs are considered valid.
111 """
112 if expectation_string is None:
113 raise ConfigurationSyntaxError('Test must specify expected output.')
Ami GONE FROM CHROMIUM 2011/09/08 18:30:20 I'm a bit surprised you still have ConfigurationSy
awong 2011/09/09 01:01:43 All gone.
114
115 match = EXTRACT_EXPECTATION_RE.match(expectation_string)
116 assert match
117
118 raw_expectation = ast.literal_eval(match.group(1))
119 if type(raw_expectation) is not list:
120 raise ConfigurationSyntaxError(
121 'Expectations must be a list of regexps. Instead, got %s' %
122 repr(raw_expectation))
123
124 expectation = []
125 for regex_str in raw_expectation:
126 if type(regex_str) is not str:
127 raise ConfigurationSyntaxError(
128 '"%s" is not a regexp in %s' % (regex_str, expectation_string))
129 expectation.append(re.compile(regex_str))
130 return expectation
131
132
133 def ExtractTestConfigs(sourcefile_path):
134 """Parses the soruce file for test configurations.
135
136 Each no-compile test in the file is separated by an ifdef macro. We scan
137 the source file with the NCTEST_CONFIG_RE to find all ifdefs that look like
138 they demark one no-compile test and try to extract the test configuration
139 from that.
140
141 Args:
142 sourcefile_path: A string containing the path to the source file.
143
144 Returns:
145 A list of test configurations. Each test configuration is a dictionary of
146 the form:
147
148 { name: 'NCTEST_NAME'
149 suite_name: 'SOURCE_FILE_NAME'
150 expectations: [re.Pattern, re.Pattern] }
151
152 The |suite_name| is used to generate a pretty gtest output on successful
153 completion of the no compile test.
154
155 The compiled regexps in |expectations| define the valid outputs of the
156 compiler. If any one of the listed patterns matches either the stderr or
157 stdout from the compilation, and the compilation failed, then the test is
158 considered to have succeeded. If the list is empty, than we ignore the
159 compiler output and just check for failed compilation. If |expectations|
160 is actually None, then this specifies a compiler sanity check test, which
161 should expect a SUCCESSFUL compilation.
162 """
163 sourcefile = open(sourcefile_path, 'r')
164
165 # Convert filename from underscores to CamelCase.
166 words = os.path.splitext(os.path.basename(sourcefile_path))[0].split('_')
167 words = [w.capitalize() for w in words]
168 suite_name = 'NoCompile' + ''.join(words)
169
170 # Start with at least the compiler sanity test. You need to always have one
171 # sanity test to show that compiler flags and configuration are not just
172 # wrong. Otherwise, having a misconfigured compiler, or an error in the
173 # shared portions of the .nc file would cause all tests to erroneously pass.
174 test_configs = [{'name': 'NCTEST_SANITY',
175 'suite_name': suite_name,
176 'expectations': None}]
177
178 for line in sourcefile:
179 match_result = NCTEST_CONFIG_RE.match(line)
180 if not match_result:
181 continue
182
183 groups = match_result.groups()
184
185 # Grab the name and remove the defined() predicate if there is one.
186 name = groups[0]
187 strip_result = STRIP_DEFINED_RE.match(name)
188 if strip_result:
189 name = strip_result.group(1)
190
191 # Read expectations if there are any.
192 test_configs.append({'name': name,
193 'suite_name': suite_name,
194 'expectations': ParseExpectation(groups[1])})
195 sourcefile.close()
196 return test_configs
197
198
199 def StartTest(sourcefile_path, cflags, config):
200 """Start one negative compile test.
201
202 Args:
203 sourcefile_path: A string with path to the source file.
204 cflags: A string with all the CFLAGS to give to gcc. This string will be
205 split by shelex so becareful with escaping.
Ami GONE FROM CHROMIUM 2011/09/08 18:30:20 s/becareful/be careful/
awong 2011/09/09 01:01:43 Done.
206 config: A dictionary describing the test. See ExtractTestConfigs
207 for a description of the config format.
208
209 Returns:
210 A dictionary containing all the information about the started test. The
211 fields in the dictionary are as follows:
212 { 'proc': A subprocess object representing the compiler run.
213 'cmdline': A string containing the exectued command line.
214 'name': A string containing the name of the test.
215 'suite_name': A string containing the suite name to use when generating
216 the gunit test result.
217 'terminate_timeout': The timestamp in seconds since the epoch after
218 which the test should be terminated.
219 'kill_timeout': The timestamp in seconds since the epoch after which
220 the test should be given a hard kill signal.
221 'started_at': A timestamp in seconds since the epoch for when this test
222 was started.
223 'aborted_at': A timestamp in seconds since the epoch for when this test
224 was aborted. If the test completed successfully,
225 this value is 0.
226 'finished_at': A timestamp in seconds since the epoch for when this
227 test was successfully complete. If the test is aborted,
228 or running, this value is 0.
229 'expectations': A dictionary with the test expectations. See
230 ParseExpectation() for the structure.
231 }
232 """
233 # TODO(ajwong): Get the compiler from gyp.
234 cmdline = ['g++']
235 cmdline.extend(shlex.split(cflags))
236 name = config['name']
237 expectations = config['expectations']
238 if expectations is not None:
239 cmdline.append('-D%s' % name)
240 cmdline.extend(['-o', '/dev/null', '-c', '-x', 'c++', sourcefile_path])
241
242 process = subprocess.Popen(cmdline, stdout=subprocess.PIPE,
243 stderr=subprocess.PIPE)
244 now = time.time()
245 return {'proc': process,
246 'cmdline': ' '.join(cmdline),
247 'name': name,
248 'suite_name': config['suite_name'],
249 'terminate_timeout': now + NCTEST_TERMINATE_TIMEOUT_SEC,
250 'kill_timeout': now + NCTEST_KILL_TIMEOUT_SEC,
251 'started_at': now,
252 'aborted_at': 0,
253 'finished_at': 0,
254 'expectations': expectations}
255
256
257 def PassTest(resultfile, test):
258 """Logs the result of a test started by StartTest(), or a disabled test
259 configuration.
260
261 Args:
262 resultfile: File object for .cc file that results are written to.
263 test: An instance of the dictionary returned by StartTest(), a
264 configuration from ExtractTestConfigs().
265 """
266 # The 'started_at' key is only added if a test has been started.
267 if 'started_at' in test:
268 resultfile.write(SUCCESS_GUNIT_TEMPLATE % (
269 test['suite_name'], test['name'],
270 test['finished_at'] - test['started_at'],
271 test['started_at'], test['finished_at']))
272 else:
273 resultfile.write(DISABLED_GUNIT_TEMPLATE % (
274 test['suite_name'], test['name']))
275
276
277 def FailTest(resultfile, test, error, stdout=None, stderr=None):
278 """Logs the result of a test started by StartTest()
279
280 Args:
281 resultfile: File object for .cc file that results are written to.
282 test: An instance of the dictionary returned by StartTest()
283 error: A string containing the reason for the failure.
284 stdout: A string containing the test's output to stdout.
285 stderr: A string containing the test's output to stderr.
286 """
287 resultfile.write('#error %s Failed: %s\n' % (test['name'], error))
288 resultfile.write('#error compile line: %s\n' % test['cmdline'])
289 if stdout and len(stdout) != 0:
290 resultfile.write('#error %s stdout:\n' % test['name'])
291 for line in stdout.split('\n'):
292 resultfile.write('#error %s\n' % line)
293
294 if stderr and len(stderr) != 0:
295 resultfile.write('#error %s stderr:\n' % test['name'])
296 for line in stderr.split('\n'):
297 resultfile.write('#error %s\n' % line)
298 resultfile.write('\n')
299
300
301 def ProcessTestResult(resultfile, test):
302 """Interprets and logs the result of a test started by StartTest()
303
304 Args:
305 resultfile: File object for .cc file that results are written to.
306 test: The dictionary from StartTest() to process.
307 """
308 # Snap a copy of stdout and stderr into the test dictionary immediately
309 # cause we can only call this once on the Popen object, and lots of stuff
310 # below will want access to it.
311 proc = test['proc']
312 (stdout, stderr) = proc.communicate()
313
314 if test['aborted_at'] != 0:
315 FailTest(resultfile, test, "Compile timed out. Started %f ended %f." %
316 (test['started_at'], test['aborted_at']))
317 return
318
319 if test['expectations'] is None:
320 # This signals a compiler sanity check test. Fail iff compilation failed.
321 if proc.poll() == 0:
322 PassTest(resultfile, test)
323 return
324 else:
325 FailTest(resultfile, test, 'Sanity compile failed. Is compiler borked?',
326 stdout, stderr)
327 return
328 elif proc.poll() == 0:
329 # Handle failure due to successful compile.
330 FailTest(resultfile, test,
331 'Unexpected successful compilation.',
332 stdout, stderr)
333 return
334 else:
335 # Check the output has the right expectations. If there are no
336 # expectations, then we just consider the output "matched" by default.
337 if len(test['expectations']) == 0:
338 PassTest(resultfile, test)
339 return
340
341 # Otherwise test against all expectations.
342 for regexp in test['expectations']:
343 if (regexp.search(stdout) is not None or
344 regexp.search(stderr) is not None):
345 PassTest(resultfile, test)
346 return
347 expectation_str = ', '.join(
348 ["r'%s'" % regexp.pattern for regexp in test['expectations']])
349 FailTest(resultfile, test,
350 'Expectations [%s] did not match output.' % expectation_str,
351 stdout, stderr)
352 return
353
354
355 def CompleteAtLeastOneTest(resultfile, executing_tests):
356 """Blocks until at least one task is removed from executing_tests.
357
358 This function removes completed tests from executing_tests, logging failures
359 and output. If no tests can be removed, it will enter a poll-loop until one
360 test finishes or times out. On a timeout, this function is responsible for
361 terminating the process in the appropriate fashion.
362
363 Args:
364 executing_tests: A dict mapping a string containing the test name to the
365 test dict return from StartTest().
366
367 Returns:
368 A tuple with a set of tests that have finished.
Ami GONE FROM CHROMIUM 2011/09/08 18:30:20 "tuple with a set" sounds like a set is involved,
awong 2011/09/09 01:01:43 Yeah yeah...dynamic languages make me uncomfortabl
369 """
370 finished_tests = []
371 while len(finished_tests) == 0:
372 # Select on the output pipes.
373 read_set = []
374 for test in executing_tests.values():
375 read_set.extend([test['proc'].stderr, test['proc'].stdout])
376 result = select.select(read_set, [], read_set, BUSY_LOOP_MAX_TIME_SEC)
377
378 # We timed out on all running tests. Assume this whole thing is hung.
Ami GONE FROM CHROMIUM 2011/09/08 18:30:20 indent
awong 2011/09/09 01:01:43 deleted.
379 if result == ([],[],[]):
380 raise WatchdogException('Busy looping for too long. Aborting no compile '
Ami GONE FROM CHROMIUM 2011/09/08 18:30:20 s/no compile/no-compile/ (b/c otherwise the "no" b
Ami GONE FROM CHROMIUM 2011/09/08 18:30:20 Where does WatchdogException come from?
awong 2011/09/09 01:01:43 deleted.
awong 2011/09/09 01:01:43 deleted
381 'test.')
382
383 # Now attempt to process results.
384 now = time.time()
385 for test in executing_tests.values():
386 proc = test['proc']
387 if proc.poll() is not None:
388 test['finished_at'] = now
389 finished_tests.append(test)
390 elif test['terminate_timeout'] < now:
391 proc.terminate()
392 test['aborted_at'] = now
393 elif test['kill_timeout'] < now:
394 proc.kill()
395 test['aborted_at'] = now
396
397 return finished_tests
398
399
400 def main():
401 if len(sys.argv) != 5:
402 print ('Usage: %s <parallelism> <sourcefile> <cflags> <resultfile>' %
403 sys.argv[0])
404 sys.exit(1)
405
406 # Force us into the "C" locale so the compiler doesn't localize its output.
407 # In particular, this stops gcc from using smart quotes when in english UTF-8
408 # locales. This makes the expectation writing much easier.
409 os.environ['LC_ALL'] = 'C'
410
411 parallelism = int(sys.argv[1])
412 sourcefile_path = sys.argv[2]
413 cflags = sys.argv[3]
414 resultfile_path = sys.argv[4]
415
416 ValidateInput(parallelism, sourcefile_path, cflags, resultfile_path)
417
418 test_configs = ExtractTestConfigs(sourcefile_path)
419
420 resultfile = open(resultfile_path, 'w')
421 resultfile.write(RESULT_FILE_HEADER % sourcefile_path)
422
423 # Run the no-compile tests, but ensure we do not run more than |parallelism|
424 # tests at once.
425 executing_tests = {}
426 finished_tests = []
427 for config in test_configs:
428 # CompleteAtLeastOneTest blocks until at least one test finishes. Thus, this
429 # acts as a semaphore. We cannot use threads + a real semaphore because
430 # subprocess forks, which can cause all sorts of hilarity with threads.
431 if len(executing_tests) >= parallelism:
432 just_finished = CompleteAtLeastOneTest(resultfile, executing_tests)
433 finished_tests.extend(just_finished)
434 for test in just_finished:
435 del executing_tests[test['name']]
436
437 if config['name'].startswith('DISABLED_'):
438 PassTest(resultfile, config)
439 else:
440 test = StartTest(sourcefile_path, cflags, config)
441 executing_tests[test['name']] = test
Ami GONE FROM CHROMIUM 2011/09/08 18:30:20 Any point in making assertions about uniqueness of
awong 2011/09/09 01:01:43 Done.
442
443 # If there are no more test to start, we still need to drain the running
444 # ones.
445 while len(executing_tests) > 0:
446 just_finished = CompleteAtLeastOneTest(resultfile, executing_tests)
Ami GONE FROM CHROMIUM 2011/09/08 18:30:20 Possible to avoid the duplication between l.446-44
awong 2011/09/09 01:01:43 Not that I can think of. But I took your other su
447 finished_tests.extend(just_finished)
448 for test in just_finished:
449 del executing_tests[test['name']]
Ami GONE FROM CHROMIUM 2011/09/08 18:30:20 If this was in CompleteAtLeastOneTest it'd be only
awong 2011/09/09 01:01:43 Done.
450
451 for test in finished_tests:
452 ProcessTestResult(resultfile, test)
453
454 resultfile.close()
455
456
457 if __name__ == '__main__':
458 main()
OLDNEW
« build/nocompile.gypi ('K') | « build/nocompile.gypi ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698