Index: tools/nocompile_driver.py |
diff --git a/tools/nocompile_driver.py b/tools/nocompile_driver.py |
new file mode 100755 |
index 0000000000000000000000000000000000000000..5ded29d417cca14c8b52cbfcba9b3efdc2677ab6 |
--- /dev/null |
+++ b/tools/nocompile_driver.py |
@@ -0,0 +1,382 @@ |
+#!/usr/bin/python |
+# Copyright (c) 2011 The Chromium Authors. All rights reserved. |
+# Use of this source code is governed by a BSD-style license that can be |
+# found in the LICENSE file. |
+ |
+"""Implements a simple "negative compile" test for C++ on linux. |
+ |
+Sometimes a C++ API needs to ensure that various usages cannot compile. To |
+enable unittesting of these assertions, we use this python script to |
+invoke gcc on a source file and assert that compilation fails. |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
Will there be a sites/wiki page about this feature
awong
2011/09/08 02:46:16
Created a skeleton.
|
+""" |
+ |
+import ast |
+import locale |
+import os |
+import re |
+import sys |
+import shlex |
+import subprocess |
+import time |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
alphabetize this list
awong
2011/09/08 02:46:16
Done.
|
+ |
+ |
+class ArgumentValidationException(Exception): |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
Is this a NoCompileSyntaxError?
Maybe a docstring
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
Don't all global names that aren't meant to be pee
awong
2011/09/08 02:46:16
It's a standalone script so making everything _ pr
awong
2011/09/08 02:46:16
Done.
Ami GONE FROM CHROMIUM
2011/09/08 18:30:20
I'm not sure what you're pointing out there; the o
awong
2011/09/09 01:01:43
The naming box at the bottom shows that exceptions
|
+ pass |
+ |
+ |
+# Matches lines that start with #if and have the substring TEST in the |
+# conditional. Also extracts the comment. This allows us to search for |
+# lines like the following: |
+# |
+# #ifdef TEST_NAME_OF_TEST // [r'expected output'] |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
I'm bothered by TEST appearing twice in each of th
awong
2011/09/08 02:46:16
I like making it more specific with NCTEST. Howev
|
+# #if defined(TEST_NAME_OF_TEST) // [r'expected output'] |
+# #if TEST_NAME_OF_TEST // [r'expected output'] |
+# #elif TEST_NAME_OF_TEST // [r'expected output'] |
+# |
+# inside the unittest file. |
+TEST_CONFIG_RE = re.compile(r'^#(?:el)?if.*\s+(\S*TEST\S*)\s*(//.*)?') |
+ |
+ |
+# Used the removed the defined() preprocesor predicate if the test uses |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
english the comment up.
awong
2011/09/08 02:46:16
Tried to rewrite.
|
+# the form: |
+# |
+# #if defined(TEST_NAME_OF_TEST) |
+# |
+# Should be used to post-process the results above. |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
s/above/found by TEST_CONFIG_RE/
awong
2011/09/08 02:46:16
Done.
|
+STRIP_DEFINED_RE = re.compile(r'defined\((.*)\)') |
+ |
+ |
+# Used to grab the expectation from comment at the end of an #ifdef. See |
+# TEST_CONFIG_RE's comment for examples of what the format should look like. |
+# |
+# The extracted substring should be a python array of regular expressions. |
+EXTRACT_EXPECTATION_RE = re.compile(r'//\s*(\[.*\])') |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
Any reason not to make this part of TEST_CONFIG_RE
awong
2011/09/08 02:46:16
Seemed like too much logic in one regex.
Ami GONE FROM CHROMIUM
2011/09/08 18:30:20
Fine to leave as-is; I'm just silently judging you
|
+ |
+ |
+# The header for the result file so that it can be compiled. |
+RESULT_FILE_HEADER = """ |
+// This file is generated by the no compile test from: |
+// %s |
+ |
+#include "testing/gtest/include/gtest/gtest.h" |
+ |
+""" |
+ |
+# Timeout constants. |
+TEST_TERMINATE_TIMEOUT_SEC = 5 |
+TEST_KILL_TIMEOUT_SEC = TEST_TERMINATE_TIMEOUT_SEC + 1 |
+BUSY_LOOP_MAX_TIME_SEC = TEST_KILL_TIMEOUT_SEC * 2 |
+ |
+ |
+def ValidateInput(parallelism, sourcefile_path, cflags, resultfile_path): |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
docstring here and below please.
awong
2011/09/08 02:46:16
Done.
|
+ if parallelism < 1: |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
If it was me I'd replace lines 72-86 with:
assert
awong
2011/09/08 02:46:16
Good point. Done.
|
+ raise ArgumentValidationException("parallelism must be > 1. Got %d" % |
+ parallelism) |
+ |
+ if type(sourcefile_path) is not str: |
+ raise ArgumentValidationException("sourcefile must be a string. Got %s" % |
+ repr(sourcefile_path)) |
+ |
+ if type(cflags) is not str: |
+ raise ArgumentValidationException("cflags must be a string. Got %s" % |
+ repr(cflags)) |
+ |
+ if type(resultfile_path) is not str: |
+ raise ArgumentValidationException("resultfile must be a string. Got %s" % |
+ repr(resultfile_path)) |
+ |
+ |
+def ParseExpecation(expectation_string): |
+ """Extracts expectation definition from the trailing comment on the ifdef. |
+ |
+ See the comment on TEST_CONFIG_RE for examples of the format we are parsing. |
+ |
+ Args: |
+ expectation_string: A string like '// [r'some_regex'] |
+ |
+ Returns: |
+ A list of compiled regular expressions indicating all possible valid |
+ compiler outputs. |
+ """ |
+ if expectation_string is None: |
+ raise ArgumentValidationException('Test must specify expected output.') |
+ |
+ match = EXTRACT_EXPECTATION_RE.match(expectation_string) |
+ if match is None: |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
do you use "is None" over "not" intentionally?
(no
awong
2011/09/08 02:46:16
Done.
|
+ raise ArgumentValidationException( |
+ 'Cannot parse expectations in %s' % expectation_string) |
+ |
+ raw_expectation = ast.literal_eval(match.group(1)) |
+ if type(raw_expectation) is not list: |
+ raise ArgumentValidationException( |
+ 'Expectations must be a list of regexps. Instead, got %s' % |
+ repr(raw_expectation)) |
+ |
+ expectation = [] |
+ for regex_str in raw_expectation: |
+ if type(regex_str) is not str: |
+ raise ArgumentValidationException( |
+ '"%s" is not a regexp in %s' % (regex_str, expectation_string)) |
+ re.compile(regex_str) |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
unreachable
awong
2011/09/08 02:46:16
Done.
|
+ expectation.append(re.compile(regex_str)) |
+ return expectation |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
doco semantics of empty list.
awong
2011/09/08 02:46:16
Done.
|
+ |
+ |
+def ExtractTestConfigs(sourcefile_path): |
+ """Parses the soruce file for test configurations. |
+ |
+ Each no-compile test in the file is separated by an ifdef macro. We scan |
+ the source file with the TEST_CONFIG_RE to find all ifdefs that look like |
+ they demark one no-compile test and try to extract the test configuration |
+ from that. |
+ |
+ Args: |
+ sourcefile_path: A string with path to the source file. |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
"with"?
awong
2011/09/08 02:46:16
"containing"?
Ami GONE FROM CHROMIUM
2011/09/08 18:30:20
"The path to the source file"?
(append "in string
awong
2011/09/09 01:01:43
Done.
|
+ |
+ Returns: |
+ A list of test configurations. Each test configuration is a dictionary of |
+ the form: |
+ { name: 'TEST_NAME' |
+ suite_name: 'SOURCE_PATH_NAME' |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
SourcePathName?
(what is this for? gtest mockery
awong
2011/09/08 02:46:16
Done.
|
+ expectations: [re.Pattern, re.Pattern] } |
+ |
+ The compiled regexps in expectations define the valid outputs of the |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
s/expectations/|expectations|/
awong
2011/09/08 02:46:16
Done.
|
+ compiler. If any one of the listed patterns matches either the stderr or |
+ stdout from the compilation, and the compilation failed, then the test is |
+ considered to have succeeded. If the list is empty, than we ignore the |
+ compiler output and just check for failed compilation. If the expectations |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
ditto (and drop "the")
awong
2011/09/08 02:46:16
Done.
|
+ is actually None, then this specifies a compiler sanity check test, which |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
this is the first time in the file you mention "sa
Ami GONE FROM CHROMIUM
2011/09/08 18:30:20
Missed this or didn't like it?
awong
2011/09/09 01:01:43
Missed, but I'm not sure I like. :)
I'd rather on
|
+ should a SUCCESSFUL compilation. |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
english
awong
2011/09/08 02:46:16
Done.
|
+ """ |
+ sourcefile = open(sourcefile_path, 'r') |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
or die?
awong
2011/09/08 02:46:16
I thought python just raises on errors...
|
+ |
+ # Convert filename from underscores to CamelCase. |
+ words = os.path.splitext('bind_unittest.nc')[0].split('_') |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
'bind_unittest.nc' whaaa?
awong
2011/09/08 02:46:16
oops.
|
+ words = [w.capitalize() for w in words] |
+ suite_name = 'NoCompile' + ''.join(words) |
+ |
+ # Start with at least the compiler sanity test. |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
IMO this needs a justifying comment (IIUC you do t
awong
2011/09/08 02:46:16
Justification added.
|
+ test_configs = [{'name': 'TEST_SANITY', |
+ 'suite_name': suite_name, |
+ 'expectations': None}] |
+ |
+ for line in sourcefile: |
+ match_result = TEST_CONFIG_RE.match(line) |
+ if match_result: |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
if you reverse the test you get to early-continue
awong
2011/09/08 02:46:16
Good call.
|
+ groups = match_result.groups() |
+ |
+ # Grab the name and remove the defined() predicate if there is one. |
+ name = groups[0] |
+ strip_result = STRIP_DEFINED_RE.match(name) |
+ if strip_result: |
+ name = strip_result.group(1) |
+ |
+ # Read expectations if there are any. |
+ expectations = ParseExpecation(groups[1]) |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
temporary isn't used anywhere but the dict below s
awong
2011/09/08 02:46:16
inlined :)
|
+ |
+ test_configs.append({'name': name, |
+ 'suite_name': suite_name, |
+ 'expectations': expectations}) |
+ sourcefile.close() |
+ return test_configs |
+ |
+ |
+def StartTest(sourcefile_path, cflags, config): |
+ """Perform one negative compile test. |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
s/Perform/Start/
awong
2011/09/08 02:46:16
Done.
|
+ |
+ Args: |
+ sourcefile_path: A string with path to the source file. |
+ cflags: A string with all the CFLAGS to give to gcc. |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
doco passing through shlex
awong
2011/09/08 02:46:16
Done.
|
+ config: A dictionary describing the test. See ExtractTestConfigs |
+ for a description of the config format. |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:18
doco return value
awong
2011/09/08 02:46:16
Done.
|
+ """ |
+ # TODO(ajwong): Get the compiler from gyp. |
+ cmdline = ['g++'] |
+ cmdline.extend(shlex.split(cflags)) |
+ name = config['name'] |
+ expectations = config['expectations'] |
+ if expectations is not None: |
+ cmdline.append('-D%s' % name) |
+ cmdline.extend(['-o', '/dev/null', '-c', '-x', 'c++', sourcefile_path]) |
+ |
+ process = subprocess.Popen(cmdline, stdout=subprocess.PIPE, |
+ stderr=subprocess.PIPE) |
+ return {'proc': process, |
+ 'cmdline': ' '.join(cmdline), |
+ 'name': name, |
+ 'suite_name': config['suite_name'], |
+ 'terminate_timeout': time.time() + TEST_TERMINATE_TIMEOUT_SEC, |
+ 'kill_timeout': time.time() + TEST_KILL_TIMEOUT_SEC, |
+ 'aborted': False, |
+ 'expectations': expectations} |
+ |
+ |
+def PassTest(resultfile, test): |
+ """Interprets and logs the result of a test started by StartTest() |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:19
There isn't much interpreting going on (here and i
awong
2011/09/08 02:46:16
Comment fixed.
|
+ |
+ Args: |
+ resultfile: File object for .cc file that results are written to. |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:19
doco |test|
awong
2011/09/08 02:46:16
Done.
|
+ """ |
+ resultfile.write('TEST(%s, %s) { }\n' % (test['suite_name'], test['name'])) |
+ resultfile.write('\n') |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:19
throw into previous write()?
awong
2011/09/08 02:46:16
Done.
|
+ |
+ |
+def FailTest(resultfile, test, error, stdout=None, stderr=None): |
+ """Interprets and logs the result of a test started by StartTest() |
+ |
+ Args: |
+ resultfile: File object for .cc file that results are written to. |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:19
doco other args
awong
2011/09/08 02:46:16
Done.
|
+ """ |
+ resultfile.write('#error %s Failed: %s\n' % (test['name'], error)) |
+ resultfile.write('#error compile line: %s\n' % test['cmdline']) |
+ if stdout and len(stdout) != 0: |
+ resultfile.write('#error %s stdout:\n' % test['name']) |
+ for line in stdout.split('\n'): |
+ resultfile.write('#error %s\n' % line) |
+ |
+ if stderr and len(stderr) != 0: |
+ resultfile.write('#error %s stderr:\n' % test['name']) |
+ for line in stderr.split('\n'): |
+ resultfile.write('#error %s\n' % line) |
+ resultfile.write('\n') |
+ |
+ |
+def ProcessTestResult(resultfile, test): |
+ """Interprets and logs the result of a test started by StartTest() |
+ |
+ Args: |
+ resultfile: File object for .cc file that results are written to. |
+ test: The dictionary from StartTest() to process. |
+ """ |
+ # Snap a copy of stdout and stderr into the test dictionary immediately |
+ # cause we can only call this once on the Popen object, and lots of stuff |
+ # below will want access to it. |
+ proc = test['proc'] |
+ (stdout, stderr) = proc.communicate() |
+ |
+ if test['aborted']: |
+ FailTest(resultfile, test, "Compile timed out.") |
+ return |
+ |
+ if test['expectations'] is None: |
+ # This signals a compiler sanity check test. Fail iff compilation failed. |
+ if proc.poll() != 0: |
+ FailTest(resultfile, test, 'Sanity compile failed. Is compiler borked?', |
+ stdout, stderr) |
+ return |
+ elif proc.poll() == 0: |
+ # Handle failure due to successful compile. |
+ FailTest(resultfile, test, |
+ 'Unexpected successful compilation.', |
+ stdout, stderr) |
+ return |
+ else: |
+ # Check the output has the right expectations. If there are no |
+ # expectations, then we just consider the output "matched" by default. |
+ matched = len(test['expectations']) == 0 |
+ for regexp in test['expectations']: |
+ if (regexp.search(stdout) is not None or |
+ regexp.search(stderr) is not None): |
+ matched = True |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:19
PassTest & return?
awong
2011/09/08 02:46:16
Done.
|
+ break |
+ if matched is False: |
+ expectation_str = ', '.join( |
+ ["r'%s'" % regexp.pattern for regexp in test['expectations']]) |
+ FailTest(resultfile, test, |
+ 'Expecatations [%s] did not match output.' % expectation_str, |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:19
typo
awong
2011/09/08 02:46:16
Done.
|
+ stdout, stderr) |
+ return |
+ |
+ # No failures. Success! |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:19
If you take my suggestion at l.280 above, this can
awong
2011/09/08 02:46:16
Done.
|
+ PassTest(resultfile, test) |
+ |
+ |
+def DrainExecutingTasks(resultfile, executing_tests): |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:19
DrainAtLeastOneExecutingTask?
I don't like either
awong
2011/09/08 02:46:16
I don't have any good names either. This function
|
+ """Blocks until one task is removed from executing_tests. |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:19
s/one/at least one/
awong
2011/09/08 02:46:16
Done.
|
+ |
+ This function removes completed tests from executing_tests, logging failures |
+ and output. If no tests can be removed, it will enter a poll-loop until one |
+ test finishes or times out. On a timeout, this function is responsible for |
+ terminating the process in the appropriate fashion. |
+ |
+ Args: |
+ resultfile: File object for .cc file that results are written to. |
+ executing_tests: Array of currently executing tests from StartTest(). |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:19
doco return value
awong
2011/09/08 02:46:16
Done.
|
+ """ |
+ finished_tests = [] |
+ busy_loop_watchdog = time.time() + BUSY_LOOP_MAX_TIME_SEC |
+ while len(finished_tests) == 0: |
+ if time.time() > busy_loop_watchdog: |
+ # We've looping too long. Assume this whole thing is hung. |
+ raise WatchdogException('Busy looping for too long. Aborting negative ' |
+ 'compile test.') |
+ |
+ for test in executing_tests: |
+ proc = test['proc'] |
+ if proc.poll() is not None: |
+ finished_tests.append(test) |
+ elif test['terminate_timeout'] < time.time(): |
+ proc.terminate() |
+ test['aborted'] = True |
+ elif test['kill_timeout'] < time.time(): |
+ proc.kill() |
+ test['aborted'] = True |
+ |
+ for test in finished_tests: |
+ ProcessTestResult(resultfile, test) |
+ |
+ return filter(lambda x : x not in finished_tests, executing_tests) |
+ |
+ |
+def main(): |
+ if len(sys.argv) < 3: |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:19
s/</!=/
awong
2011/09/08 02:46:16
Done.
|
+ print ('Usage: %s <parallelism> <sourcefile> <cflags> <resultfile>' % |
+ sys.argv[0]) |
+ sys.exit(1) |
+ pass |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:19
why the pass?
awong
2011/09/08 02:46:16
removed.
|
+ |
+ # Force us to the "C" locale so that compiler doesn't localize its output. |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:19
s/that/the/
Ami GONE FROM CHROMIUM
2011/08/31 21:50:19
This is strange. Is this something that gyp does
awong
2011/09/08 02:46:16
Done.
awong
2011/09/08 02:46:16
No...it's just gcc localizing its output. If you'r
|
+ os.environ['LC_ALL'] = 'C' |
+ |
+ parallelism = int(sys.argv[1]) |
+ sourcefile_path = sys.argv[2] |
+ cflags = sys.argv[3] |
+ resultfile_path = sys.argv[4] |
+ |
+ ValidateInput(parallelism, sourcefile_path, cflags, resultfile_path) |
+ |
+ print 'parallelism: %s' % parallelism |
+ print 'sourcefile: %s' % sourcefile_path |
+ print 'cflags: %s' % cflags |
+ print 'resultfile: %s' % resultfile_path |
Ami GONE FROM CHROMIUM
2011/08/31 21:50:19
Drop these?
awong
2011/09/08 02:46:16
dropped.
|
+ |
+ test_configs = ExtractTestConfigs(sourcefile_path) |
+ |
+ resultfile = open(resultfile_path, 'w') |
+ resultfile.write(RESULT_FILE_HEADER % sourcefile_path) |
+ |
+ # Run the no-compile tests, but ensure we do not run more than |parallelism| |
+ # tests at once. |
+ executing_tests = [] |
+ for config in test_configs: |
+ # DrainExecutingTasks blocks until at least one test finishes. Thus, this |
+ # acts as a semaphore. We cannot use threads + a real semaphore because |
+ # subprocess forks, which can cause all sorts of hilarity with threads. |
+ if len(executing_tests) > parallelism: |
+ executing_tests = DrainExecutingTasks(resultfile, executing_tests) |
+ |
+ if config['name'].startswith('DISABLED_'): |
+ PassTest(resultfile, config) |
+ else: |
+ executing_tests.append(StartTest(sourcefile_path, cflags, config)) |
+ |
+ # If there are no more test to start, we still need to drain the running |
+ # ones. |
+ while len(executing_tests) > 0: |
+ executing_tests = DrainExecutingTasks(resultfile, executing_tests) |
+ |
+ resultfile.close() |
+ |
+ |
+if __name__ == '__main__': |
+ main() |