OLD | NEW |
(Empty) | |
| 1 #!/usr/bin/python |
| 2 # Copyright (c) 2011 The Chromium Authors. All rights reserved. |
| 3 # Use of this source code is governed by a BSD-style license that can be |
| 4 # found in the LICENSE file. |
| 5 |
| 6 """Implements a simple "negative compile" test for C++ on linux. |
| 7 |
| 8 Sometimes a C++ API needs to ensure that various usages cannot compile. To |
| 9 enable unittesting of these assertions, we use this python script to |
| 10 invoke gcc on a source file and assert that compilation fails. |
| 11 |
| 12 For more info, see: |
| 13 http://dev.chromium.org/developers/testing/no-compile-tests |
| 14 """ |
| 15 |
| 16 import ast |
| 17 import locale |
| 18 import os |
| 19 import re |
| 20 import select |
| 21 import shlex |
| 22 import subprocess |
| 23 import sys |
| 24 import time |
| 25 |
| 26 |
| 27 # Matches lines that start with #if and have the substring TEST in the |
| 28 # conditional. Also extracts the comment. This allows us to search for |
| 29 # lines like the following: |
| 30 # |
| 31 # #ifdef NCTEST_NAME_OF_TEST // [r'expected output'] |
| 32 # #if defined(NCTEST_NAME_OF_TEST) // [r'expected output'] |
| 33 # #if NCTEST_NAME_OF_TEST // [r'expected output'] |
| 34 # #elif NCTEST_NAME_OF_TEST // [r'expected output'] |
| 35 # #elif DISABLED_NCTEST_NAME_OF_TEST // [r'expected output'] |
| 36 # |
| 37 # inside the unittest file. |
| 38 NCTEST_CONFIG_RE = re.compile(r'^#(?:el)?if.*\s+(\S*NCTEST\S*)\s*(//.*)?') |
| 39 |
| 40 |
| 41 # Matches and removes the defined() preprocesor predicate. This is useful |
| 42 # for test cases that use the preprocessor if-statement form: |
| 43 # |
| 44 # #if defined(NCTEST_NAME_OF_TEST) |
| 45 # |
| 46 # Should be used to post-process the results found by NCTEST_CONFIG_RE. |
| 47 STRIP_DEFINED_RE = re.compile(r'defined\((.*)\)') |
| 48 |
| 49 |
| 50 # Used to grab the expectation from comment at the end of an #ifdef. See |
| 51 # NCTEST_CONFIG_RE's comment for examples of what the format should look like. |
| 52 # |
| 53 # The extracted substring should be a python array of regular expressions. |
| 54 EXTRACT_EXPECTATION_RE = re.compile(r'//\s*(\[.*\])') |
| 55 |
| 56 |
| 57 # The header for the result file so that it can be compiled. |
| 58 RESULT_FILE_HEADER = """ |
| 59 // This file is generated by the no compile test from: |
| 60 // %s |
| 61 |
| 62 #include "base/logging.h" |
| 63 #include "testing/gtest/include/gtest/gtest.h" |
| 64 |
| 65 """ |
| 66 |
| 67 |
| 68 # The GUnit test function to output on a successful test completion. |
| 69 SUCCESS_GUNIT_TEMPLATE = """ |
| 70 TEST(%s, %s) { |
| 71 LOG(INFO) << "Took %f secs. Started at %f, ended at %f"; |
| 72 } |
| 73 """ |
| 74 |
| 75 # The GUnit test function to output for a disabled test. |
| 76 DISABLED_GUNIT_TEMPLATE = """ |
| 77 TEST(%s, %s) { } |
| 78 """ |
| 79 |
| 80 |
| 81 # Timeout constants. |
| 82 NCTEST_TERMINATE_TIMEOUT_SEC = 60 |
| 83 NCTEST_KILL_TIMEOUT_SEC = NCTEST_TERMINATE_TIMEOUT_SEC + 2 |
| 84 BUSY_LOOP_MAX_TIME_SEC = NCTEST_KILL_TIMEOUT_SEC * 2 |
| 85 |
| 86 |
| 87 def ValidateInput(parallelism, sourcefile_path, cflags, resultfile_path): |
| 88 """Make sure the arguments being passed in are sane.""" |
| 89 assert parallelism >= 1 |
| 90 assert type(sourcefile_path) is str |
| 91 assert type(cflags) is str |
| 92 assert type(resultfile_path) is str |
| 93 |
| 94 |
| 95 def ParseExpectation(expectation_string): |
| 96 """Extracts expectation definition from the trailing comment on the ifdef. |
| 97 |
| 98 See the comment on NCTEST_CONFIG_RE for examples of the format we are parsing. |
| 99 |
| 100 Args: |
| 101 expectation_string: A string like "// [r'some_regex']" |
| 102 |
| 103 Returns: |
| 104 A list of compiled regular expressions indicating all possible valid |
| 105 compiler outputs. If the list is empty, all outputs are considered valid. |
| 106 """ |
| 107 assert expectation_string is not None |
| 108 |
| 109 match = EXTRACT_EXPECTATION_RE.match(expectation_string) |
| 110 assert match |
| 111 |
| 112 raw_expectation = ast.literal_eval(match.group(1)) |
| 113 assert type(raw_expectation) is list |
| 114 |
| 115 expectation = [] |
| 116 for regex_str in raw_expectation: |
| 117 assert type(regex_str) is str |
| 118 expectation.append(re.compile(regex_str)) |
| 119 return expectation |
| 120 |
| 121 |
| 122 def ExtractTestConfigs(sourcefile_path): |
| 123 """Parses the soruce file for test configurations. |
| 124 |
| 125 Each no-compile test in the file is separated by an ifdef macro. We scan |
| 126 the source file with the NCTEST_CONFIG_RE to find all ifdefs that look like |
| 127 they demark one no-compile test and try to extract the test configuration |
| 128 from that. |
| 129 |
| 130 Args: |
| 131 sourcefile_path: The path to the source file. |
| 132 |
| 133 Returns: |
| 134 A list of test configurations. Each test configuration is a dictionary of |
| 135 the form: |
| 136 |
| 137 { name: 'NCTEST_NAME' |
| 138 suite_name: 'SOURCE_FILE_NAME' |
| 139 expectations: [re.Pattern, re.Pattern] } |
| 140 |
| 141 The |suite_name| is used to generate a pretty gtest output on successful |
| 142 completion of the no compile test. |
| 143 |
| 144 The compiled regexps in |expectations| define the valid outputs of the |
| 145 compiler. If any one of the listed patterns matches either the stderr or |
| 146 stdout from the compilation, and the compilation failed, then the test is |
| 147 considered to have succeeded. If the list is empty, than we ignore the |
| 148 compiler output and just check for failed compilation. If |expectations| |
| 149 is actually None, then this specifies a compiler sanity check test, which |
| 150 should expect a SUCCESSFUL compilation. |
| 151 """ |
| 152 sourcefile = open(sourcefile_path, 'r') |
| 153 |
| 154 # Convert filename from underscores to CamelCase. |
| 155 words = os.path.splitext(os.path.basename(sourcefile_path))[0].split('_') |
| 156 words = [w.capitalize() for w in words] |
| 157 suite_name = 'NoCompile' + ''.join(words) |
| 158 |
| 159 # Start with at least the compiler sanity test. You need to always have one |
| 160 # sanity test to show that compiler flags and configuration are not just |
| 161 # wrong. Otherwise, having a misconfigured compiler, or an error in the |
| 162 # shared portions of the .nc file would cause all tests to erroneously pass. |
| 163 test_configs = [{'name': 'NCTEST_SANITY', |
| 164 'suite_name': suite_name, |
| 165 'expectations': None}] |
| 166 |
| 167 for line in sourcefile: |
| 168 match_result = NCTEST_CONFIG_RE.match(line) |
| 169 if not match_result: |
| 170 continue |
| 171 |
| 172 groups = match_result.groups() |
| 173 |
| 174 # Grab the name and remove the defined() predicate if there is one. |
| 175 name = groups[0] |
| 176 strip_result = STRIP_DEFINED_RE.match(name) |
| 177 if strip_result: |
| 178 name = strip_result.group(1) |
| 179 |
| 180 # Read expectations if there are any. |
| 181 test_configs.append({'name': name, |
| 182 'suite_name': suite_name, |
| 183 'expectations': ParseExpectation(groups[1])}) |
| 184 sourcefile.close() |
| 185 return test_configs |
| 186 |
| 187 |
| 188 def StartTest(sourcefile_path, cflags, config): |
| 189 """Start one negative compile test. |
| 190 |
| 191 Args: |
| 192 sourcefile_path: The path to the source file. |
| 193 cflags: A string with all the CFLAGS to give to gcc. This string will be |
| 194 split by shelex so be careful with escaping. |
| 195 config: A dictionary describing the test. See ExtractTestConfigs |
| 196 for a description of the config format. |
| 197 |
| 198 Returns: |
| 199 A dictionary containing all the information about the started test. The |
| 200 fields in the dictionary are as follows: |
| 201 { 'proc': A subprocess object representing the compiler run. |
| 202 'cmdline': The exectued command line. |
| 203 'name': The name of the test. |
| 204 'suite_name': The suite name to use when generating the gunit test |
| 205 result. |
| 206 'terminate_timeout': The timestamp in seconds since the epoch after |
| 207 which the test should be terminated. |
| 208 'kill_timeout': The timestamp in seconds since the epoch after which |
| 209 the test should be given a hard kill signal. |
| 210 'started_at': A timestamp in seconds since the epoch for when this test |
| 211 was started. |
| 212 'aborted_at': A timestamp in seconds since the epoch for when this test |
| 213 was aborted. If the test completed successfully, |
| 214 this value is 0. |
| 215 'finished_at': A timestamp in seconds since the epoch for when this |
| 216 test was successfully complete. If the test is aborted, |
| 217 or running, this value is 0. |
| 218 'expectations': A dictionary with the test expectations. See |
| 219 ParseExpectation() for the structure. |
| 220 } |
| 221 """ |
| 222 # TODO(ajwong): Get the compiler from gyp. |
| 223 cmdline = ['g++'] |
| 224 cmdline.extend(shlex.split(cflags)) |
| 225 name = config['name'] |
| 226 expectations = config['expectations'] |
| 227 if expectations is not None: |
| 228 cmdline.append('-D%s' % name) |
| 229 cmdline.extend(['-o', '/dev/null', '-c', '-x', 'c++', sourcefile_path]) |
| 230 |
| 231 process = subprocess.Popen(cmdline, stdout=subprocess.PIPE, |
| 232 stderr=subprocess.PIPE) |
| 233 now = time.time() |
| 234 return {'proc': process, |
| 235 'cmdline': ' '.join(cmdline), |
| 236 'name': name, |
| 237 'suite_name': config['suite_name'], |
| 238 'terminate_timeout': now + NCTEST_TERMINATE_TIMEOUT_SEC, |
| 239 'kill_timeout': now + NCTEST_KILL_TIMEOUT_SEC, |
| 240 'started_at': now, |
| 241 'aborted_at': 0, |
| 242 'finished_at': 0, |
| 243 'expectations': expectations} |
| 244 |
| 245 |
| 246 def PassTest(resultfile, test): |
| 247 """Logs the result of a test started by StartTest(), or a disabled test |
| 248 configuration. |
| 249 |
| 250 Args: |
| 251 resultfile: File object for .cc file that results are written to. |
| 252 test: An instance of the dictionary returned by StartTest(), a |
| 253 configuration from ExtractTestConfigs(). |
| 254 """ |
| 255 # The 'started_at' key is only added if a test has been started. |
| 256 if 'started_at' in test: |
| 257 resultfile.write(SUCCESS_GUNIT_TEMPLATE % ( |
| 258 test['suite_name'], test['name'], |
| 259 test['finished_at'] - test['started_at'], |
| 260 test['started_at'], test['finished_at'])) |
| 261 else: |
| 262 resultfile.write(DISABLED_GUNIT_TEMPLATE % ( |
| 263 test['suite_name'], test['name'])) |
| 264 |
| 265 |
| 266 def FailTest(resultfile, test, error, stdout=None, stderr=None): |
| 267 """Logs the result of a test started by StartTest() |
| 268 |
| 269 Args: |
| 270 resultfile: File object for .cc file that results are written to. |
| 271 test: An instance of the dictionary returned by StartTest() |
| 272 error: The printable reason for the failure. |
| 273 stdout: The test's output to stdout. |
| 274 stderr: The test's output to stderr. |
| 275 """ |
| 276 resultfile.write('#error %s Failed: %s\n' % (test['name'], error)) |
| 277 resultfile.write('#error compile line: %s\n' % test['cmdline']) |
| 278 if stdout and len(stdout) != 0: |
| 279 resultfile.write('#error %s stdout:\n' % test['name']) |
| 280 for line in stdout.split('\n'): |
| 281 resultfile.write('#error %s\n' % line) |
| 282 |
| 283 if stderr and len(stderr) != 0: |
| 284 resultfile.write('#error %s stderr:\n' % test['name']) |
| 285 for line in stderr.split('\n'): |
| 286 resultfile.write('#error %s\n' % line) |
| 287 resultfile.write('\n') |
| 288 |
| 289 |
| 290 def WriteStats(resultfile, suite_name, timings): |
| 291 """Logs the peformance timings for each stage of the script into a fake test. |
| 292 |
| 293 Args: |
| 294 resultfile: File object for .cc file that results are written to. |
| 295 suite_name: The name of the GUnit suite this test belongs to. |
| 296 timings: Dictionary with timestamps for each stage of the script run. |
| 297 """ |
| 298 stats_template = ("Started %f, Ended %f, Total %fs, Extract %fs, " |
| 299 "Compile %fs, Process %fs") |
| 300 total_secs = timings['results_processed'] - timings['started'] |
| 301 extract_secs = timings['extract_done'] - timings['started'] |
| 302 compile_secs = timings['compile_done'] - timings['extract_done'] |
| 303 process_secs = timings['results_processed'] - timings['compile_done'] |
| 304 resultfile.write('TEST(%s, Stats) { LOG(INFO) << "%s"; }\n' % ( |
| 305 suite_name, stats_template % ( |
| 306 timings['started'], timings['results_processed'], total_secs, |
| 307 extract_secs, compile_secs, process_secs))) |
| 308 |
| 309 |
| 310 def ProcessTestResult(resultfile, test): |
| 311 """Interprets and logs the result of a test started by StartTest() |
| 312 |
| 313 Args: |
| 314 resultfile: File object for .cc file that results are written to. |
| 315 test: The dictionary from StartTest() to process. |
| 316 """ |
| 317 # Snap a copy of stdout and stderr into the test dictionary immediately |
| 318 # cause we can only call this once on the Popen object, and lots of stuff |
| 319 # below will want access to it. |
| 320 proc = test['proc'] |
| 321 (stdout, stderr) = proc.communicate() |
| 322 |
| 323 if test['aborted_at'] != 0: |
| 324 FailTest(resultfile, test, "Compile timed out. Started %f ended %f." % |
| 325 (test['started_at'], test['aborted_at'])) |
| 326 return |
| 327 |
| 328 if test['expectations'] is None: |
| 329 # This signals a compiler sanity check test. Fail iff compilation failed. |
| 330 if proc.poll() == 0: |
| 331 PassTest(resultfile, test) |
| 332 return |
| 333 else: |
| 334 FailTest(resultfile, test, 'Sanity compile failed. Is compiler borked?', |
| 335 stdout, stderr) |
| 336 return |
| 337 elif proc.poll() == 0: |
| 338 # Handle failure due to successful compile. |
| 339 FailTest(resultfile, test, |
| 340 'Unexpected successful compilation.', |
| 341 stdout, stderr) |
| 342 return |
| 343 else: |
| 344 # Check the output has the right expectations. If there are no |
| 345 # expectations, then we just consider the output "matched" by default. |
| 346 if len(test['expectations']) == 0: |
| 347 PassTest(resultfile, test) |
| 348 return |
| 349 |
| 350 # Otherwise test against all expectations. |
| 351 for regexp in test['expectations']: |
| 352 if (regexp.search(stdout) is not None or |
| 353 regexp.search(stderr) is not None): |
| 354 PassTest(resultfile, test) |
| 355 return |
| 356 expectation_str = ', '.join( |
| 357 ["r'%s'" % regexp.pattern for regexp in test['expectations']]) |
| 358 FailTest(resultfile, test, |
| 359 'Expectations [%s] did not match output.' % expectation_str, |
| 360 stdout, stderr) |
| 361 return |
| 362 |
| 363 |
| 364 def CompleteAtLeastOneTest(resultfile, executing_tests): |
| 365 """Blocks until at least one task is removed from executing_tests. |
| 366 |
| 367 This function removes completed tests from executing_tests, logging failures |
| 368 and output. If no tests can be removed, it will enter a poll-loop until one |
| 369 test finishes or times out. On a timeout, this function is responsible for |
| 370 terminating the process in the appropriate fashion. |
| 371 |
| 372 Args: |
| 373 executing_tests: A dict mapping a string containing the test name to the |
| 374 test dict return from StartTest(). |
| 375 |
| 376 Returns: |
| 377 A list of tests that have finished. |
| 378 """ |
| 379 finished_tests = [] |
| 380 busy_loop_timeout = time.time() + BUSY_LOOP_MAX_TIME_SEC |
| 381 while len(finished_tests) == 0: |
| 382 # If we don't make progress for too long, assume the code is just dead. |
| 383 assert busy_loop_timeout > time.time() |
| 384 |
| 385 # Select on the output pipes. |
| 386 read_set = [] |
| 387 for test in executing_tests.values(): |
| 388 read_set.extend([test['proc'].stderr, test['proc'].stdout]) |
| 389 result = select.select(read_set, [], read_set, NCTEST_TERMINATE_TIMEOUT_SEC) |
| 390 |
| 391 # Now attempt to process results. |
| 392 now = time.time() |
| 393 for test in executing_tests.values(): |
| 394 proc = test['proc'] |
| 395 if proc.poll() is not None: |
| 396 test['finished_at'] = now |
| 397 finished_tests.append(test) |
| 398 elif test['terminate_timeout'] < now: |
| 399 proc.terminate() |
| 400 test['aborted_at'] = now |
| 401 elif test['kill_timeout'] < now: |
| 402 proc.kill() |
| 403 test['aborted_at'] = now |
| 404 |
| 405 for test in finished_tests: |
| 406 del executing_tests[test['name']] |
| 407 return finished_tests |
| 408 |
| 409 |
| 410 def main(): |
| 411 if len(sys.argv) != 5: |
| 412 print ('Usage: %s <parallelism> <sourcefile> <cflags> <resultfile>' % |
| 413 sys.argv[0]) |
| 414 sys.exit(1) |
| 415 |
| 416 # Force us into the "C" locale so the compiler doesn't localize its output. |
| 417 # In particular, this stops gcc from using smart quotes when in english UTF-8 |
| 418 # locales. This makes the expectation writing much easier. |
| 419 os.environ['LC_ALL'] = 'C' |
| 420 |
| 421 parallelism = int(sys.argv[1]) |
| 422 sourcefile_path = sys.argv[2] |
| 423 cflags = sys.argv[3] |
| 424 resultfile_path = sys.argv[4] |
| 425 |
| 426 timings = {'started': time.time()} |
| 427 |
| 428 ValidateInput(parallelism, sourcefile_path, cflags, resultfile_path) |
| 429 |
| 430 test_configs = ExtractTestConfigs(sourcefile_path) |
| 431 timings['extract_done'] = time.time() |
| 432 |
| 433 resultfile = open(resultfile_path, 'w') |
| 434 resultfile.write(RESULT_FILE_HEADER % sourcefile_path) |
| 435 |
| 436 # Run the no-compile tests, but ensure we do not run more than |parallelism| |
| 437 # tests at once. |
| 438 timings['header_written'] = time.time() |
| 439 executing_tests = {} |
| 440 finished_tests = [] |
| 441 for config in test_configs: |
| 442 # CompleteAtLeastOneTest blocks until at least one test finishes. Thus, this |
| 443 # acts as a semaphore. We cannot use threads + a real semaphore because |
| 444 # subprocess forks, which can cause all sorts of hilarity with threads. |
| 445 if len(executing_tests) >= parallelism: |
| 446 finished_tests.extend(CompleteAtLeastOneTest(resultfile, executing_tests)) |
| 447 |
| 448 if config['name'].startswith('DISABLED_'): |
| 449 PassTest(resultfile, config) |
| 450 else: |
| 451 test = StartTest(sourcefile_path, cflags, config) |
| 452 assert test['name'] not in executing_tests |
| 453 executing_tests[test['name']] = test |
| 454 |
| 455 # If there are no more test to start, we still need to drain the running |
| 456 # ones. |
| 457 while len(executing_tests) > 0: |
| 458 finished_tests.extend(CompleteAtLeastOneTest(resultfile, executing_tests)) |
| 459 timings['compile_done'] = time.time() |
| 460 |
| 461 for test in finished_tests: |
| 462 ProcessTestResult(resultfile, test) |
| 463 timings['results_processed'] = time.time() |
| 464 |
| 465 # We always know at least a sanity test was run. |
| 466 WriteStats(resultfile, finished_tests[0]['suite_name'], timings) |
| 467 |
| 468 resultfile.close() |
| 469 |
| 470 |
| 471 if __name__ == '__main__': |
| 472 main() |
OLD | NEW |