Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(893)

Side by Side Diff: tools/nocompile_driver.py

Issue 2644003002: Make no-compile test deterministic (Closed)
Patch Set: fix comments Created 3 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright (c) 2011 The Chromium Authors. All rights reserved. 2 # Copyright (c) 2011 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """Implements a simple "negative compile" test for C++ on linux. 6 """Implements a simple "negative compile" test for C++ on linux.
7 7
8 Sometimes a C++ API needs to ensure that various usages cannot compile. To 8 Sometimes a C++ API needs to ensure that various usages cannot compile. To
9 enable unittesting of these assertions, we use this python script to 9 enable unittesting of these assertions, we use this python script to
10 invoke gcc on a source file and assert that compilation fails. 10 invoke gcc on a source file and assert that compilation fails.
11 11
12 For more info, see: 12 For more info, see:
13 http://dev.chromium.org/developers/testing/no-compile-tests 13 http://dev.chromium.org/developers/testing/no-compile-tests
14 """ 14 """
15 15
16 import StringIO 16 import StringIO
17 import ast 17 import ast
18 import locale
19 import os 18 import os
20 import re 19 import re
21 import select 20 import select
22 import shlex 21 import shlex
23 import subprocess 22 import subprocess
24 import sys 23 import sys
25 import time 24 import time
26 25
27 26
28 # Matches lines that start with #if and have the substring TEST in the 27 # Matches lines that start with #if and have the substring TEST in the
(...skipping 30 matching lines...) Expand all
59 RESULT_FILE_HEADER = """ 58 RESULT_FILE_HEADER = """
60 // This file is generated by the no compile test from: 59 // This file is generated by the no compile test from:
61 // %s 60 // %s
62 61
63 #include "base/logging.h" 62 #include "base/logging.h"
64 #include "testing/gtest/include/gtest/gtest.h" 63 #include "testing/gtest/include/gtest/gtest.h"
65 64
66 """ 65 """
67 66
68 67
69 # The GUnit test function to output on a successful test completion. 68 # The log message on a test completion.
70 SUCCESS_GUNIT_TEMPLATE = """ 69 LOG_TEMPLATE = """
71 TEST(%s, %s) { 70 TEST(%s, %s) took %f secs. Started at %f, ended at %f.
72 LOG(INFO) << "Took %f secs. Started at %f, ended at %f";
73 }
74 """ 71 """
75 72
76 # The GUnit test function to output for a disabled test. 73 # The GUnit test function to output for a successful or disabled test.
dcheng 2017/01/19 05:36:22 Nit: probably OK to just remove "successful or dis
wychen 2017/01/19 15:43:57 But failed tests wouldn't use this template.
77 DISABLED_GUNIT_TEMPLATE = """ 74 GUNIT_TEMPLATE = """
78 TEST(%s, %s) { } 75 TEST(%s, %s) { }
79 """ 76 """
80 77
81 78
82 # Timeout constants. 79 # Timeout constants.
83 NCTEST_TERMINATE_TIMEOUT_SEC = 60 80 NCTEST_TERMINATE_TIMEOUT_SEC = 60
84 NCTEST_KILL_TIMEOUT_SEC = NCTEST_TERMINATE_TIMEOUT_SEC + 2 81 NCTEST_KILL_TIMEOUT_SEC = NCTEST_TERMINATE_TIMEOUT_SEC + 2
85 BUSY_LOOP_MAX_TIME_SEC = NCTEST_KILL_TIMEOUT_SEC * 2 82 BUSY_LOOP_MAX_TIME_SEC = NCTEST_KILL_TIMEOUT_SEC * 2
86 83
87 84
(...skipping 146 matching lines...) Expand 10 before | Expand all | Expand 10 after
234 'name': name, 231 'name': name,
235 'suite_name': config['suite_name'], 232 'suite_name': config['suite_name'],
236 'terminate_timeout': now + NCTEST_TERMINATE_TIMEOUT_SEC, 233 'terminate_timeout': now + NCTEST_TERMINATE_TIMEOUT_SEC,
237 'kill_timeout': now + NCTEST_KILL_TIMEOUT_SEC, 234 'kill_timeout': now + NCTEST_KILL_TIMEOUT_SEC,
238 'started_at': now, 235 'started_at': now,
239 'aborted_at': 0, 236 'aborted_at': 0,
240 'finished_at': 0, 237 'finished_at': 0,
241 'expectations': expectations} 238 'expectations': expectations}
242 239
243 240
244 def PassTest(resultfile, test): 241 def PassTest(resultfile, resultlog, test):
245 """Logs the result of a test started by StartTest(), or a disabled test 242 """Logs the result of a test started by StartTest(), or a disabled test
246 configuration. 243 configuration.
247 244
248 Args: 245 Args:
249 resultfile: File object for .cc file that results are written to. 246 resultfile: File object for .cc file that results are written to.
247 resultlog: File object for the log file.
250 test: An instance of the dictionary returned by StartTest(), a 248 test: An instance of the dictionary returned by StartTest(), a
251 configuration from ExtractTestConfigs(). 249 configuration from ExtractTestConfigs().
252 """ 250 """
251 resultfile.write(GUNIT_TEMPLATE % (
252 test['suite_name'], test['name']))
253
253 # The 'started_at' key is only added if a test has been started. 254 # The 'started_at' key is only added if a test has been started.
254 if 'started_at' in test: 255 if 'started_at' in test:
255 resultfile.write(SUCCESS_GUNIT_TEMPLATE % ( 256 resultlog.write(LOG_TEMPLATE % (
256 test['suite_name'], test['name'], 257 test['suite_name'], test['name'],
257 test['finished_at'] - test['started_at'], 258 test['finished_at'] - test['started_at'],
258 test['started_at'], test['finished_at'])) 259 test['started_at'], test['finished_at']))
259 else:
260 resultfile.write(DISABLED_GUNIT_TEMPLATE % (
261 test['suite_name'], test['name']))
262 260
263 261
264 def FailTest(resultfile, test, error, stdout=None, stderr=None): 262 def FailTest(resultfile, test, error, stdout=None, stderr=None):
265 """Logs the result of a test started by StartTest() 263 """Logs the result of a test started by StartTest()
266 264
267 Args: 265 Args:
268 resultfile: File object for .cc file that results are written to. 266 resultfile: File object for .cc file that results are written to.
269 test: An instance of the dictionary returned by StartTest() 267 test: An instance of the dictionary returned by StartTest()
270 error: The printable reason for the failure. 268 error: The printable reason for the failure.
271 stdout: The test's output to stdout. 269 stdout: The test's output to stdout.
272 stderr: The test's output to stderr. 270 stderr: The test's output to stderr.
273 """ 271 """
274 resultfile.write('#error "%s Failed: %s"\n' % (test['name'], error)) 272 resultfile.write('#error "%s Failed: %s"\n' % (test['name'], error))
275 resultfile.write('#error "compile line: %s"\n' % test['cmdline']) 273 resultfile.write('#error "compile line: %s"\n' % test['cmdline'])
276 if stdout and len(stdout) != 0: 274 if stdout and len(stdout) != 0:
277 resultfile.write('#error "%s stdout:"\n' % test['name']) 275 resultfile.write('#error "%s stdout:"\n' % test['name'])
278 for line in stdout.split('\n'): 276 for line in stdout.split('\n'):
279 resultfile.write('#error " %s:"\n' % line) 277 resultfile.write('#error " %s:"\n' % line)
280 278
281 if stderr and len(stderr) != 0: 279 if stderr and len(stderr) != 0:
282 resultfile.write('#error "%s stderr:"\n' % test['name']) 280 resultfile.write('#error "%s stderr:"\n' % test['name'])
283 for line in stderr.split('\n'): 281 for line in stderr.split('\n'):
284 resultfile.write('#error " %s"\n' % line) 282 resultfile.write('#error " %s"\n' % line)
285 resultfile.write('\n') 283 resultfile.write('\n')
286 284
287 285
288 def WriteStats(resultfile, suite_name, timings): 286 def WriteStats(resultlog, suite_name, timings):
289 """Logs the peformance timings for each stage of the script into a fake test. 287 """Logs the peformance timings for each stage of the script.
290 288
291 Args: 289 Args:
292 resultfile: File object for .cc file that results are written to. 290 resultlog: File object for the log file.
293 suite_name: The name of the GUnit suite this test belongs to. 291 suite_name: The name of the GUnit suite this test belongs to.
294 timings: Dictionary with timestamps for each stage of the script run. 292 timings: Dictionary with timestamps for each stage of the script run.
295 """ 293 """
296 stats_template = ("Started %f, Ended %f, Total %fs, Extract %fs, " 294 stats_template = """
297 "Compile %fs, Process %fs") 295 TEST(%s): Started %f, Ended %f, Total %fs, Extract %fs, Compile %fs, Process %fs
296 """
298 total_secs = timings['results_processed'] - timings['started'] 297 total_secs = timings['results_processed'] - timings['started']
299 extract_secs = timings['extract_done'] - timings['started'] 298 extract_secs = timings['extract_done'] - timings['started']
300 compile_secs = timings['compile_done'] - timings['extract_done'] 299 compile_secs = timings['compile_done'] - timings['extract_done']
301 process_secs = timings['results_processed'] - timings['compile_done'] 300 process_secs = timings['results_processed'] - timings['compile_done']
302 resultfile.write('TEST(%s, Stats) { LOG(INFO) << "%s"; }\n' % ( 301 resultlog.write(stats_template % (
303 suite_name, stats_template % ( 302 suite_name, timings['started'], timings['results_processed'], total_secs,
304 timings['started'], timings['results_processed'], total_secs, 303 extract_secs, compile_secs, process_secs))
305 extract_secs, compile_secs, process_secs)))
306 304
307 305
308 def ProcessTestResult(resultfile, test): 306 def ProcessTestResult(resultfile, resultlog, test):
309 """Interprets and logs the result of a test started by StartTest() 307 """Interprets and logs the result of a test started by StartTest()
310 308
311 Args: 309 Args:
312 resultfile: File object for .cc file that results are written to. 310 resultfile: File object for .cc file that results are written to.
311 resultlog: File object for the log file.
313 test: The dictionary from StartTest() to process. 312 test: The dictionary from StartTest() to process.
314 """ 313 """
315 # Snap a copy of stdout and stderr into the test dictionary immediately 314 # Snap a copy of stdout and stderr into the test dictionary immediately
316 # cause we can only call this once on the Popen object, and lots of stuff 315 # cause we can only call this once on the Popen object, and lots of stuff
317 # below will want access to it. 316 # below will want access to it.
318 proc = test['proc'] 317 proc = test['proc']
319 (stdout, stderr) = proc.communicate() 318 (stdout, stderr) = proc.communicate()
320 319
321 if test['aborted_at'] != 0: 320 if test['aborted_at'] != 0:
322 FailTest(resultfile, test, "Compile timed out. Started %f ended %f." % 321 FailTest(resultfile, test, "Compile timed out. Started %f ended %f." %
323 (test['started_at'], test['aborted_at'])) 322 (test['started_at'], test['aborted_at']))
324 return 323 return
325 324
326 if proc.poll() == 0: 325 if proc.poll() == 0:
327 # Handle failure due to successful compile. 326 # Handle failure due to successful compile.
328 FailTest(resultfile, test, 327 FailTest(resultfile, test,
329 'Unexpected successful compilation.', 328 'Unexpected successful compilation.',
330 stdout, stderr) 329 stdout, stderr)
331 return 330 return
332 else: 331 else:
333 # Check the output has the right expectations. If there are no 332 # Check the output has the right expectations. If there are no
334 # expectations, then we just consider the output "matched" by default. 333 # expectations, then we just consider the output "matched" by default.
335 if len(test['expectations']) == 0: 334 if len(test['expectations']) == 0:
336 PassTest(resultfile, test) 335 PassTest(resultfile, resultlog, test)
337 return 336 return
338 337
339 # Otherwise test against all expectations. 338 # Otherwise test against all expectations.
340 for regexp in test['expectations']: 339 for regexp in test['expectations']:
341 if (regexp.search(stdout) is not None or 340 if (regexp.search(stdout) is not None or
342 regexp.search(stderr) is not None): 341 regexp.search(stderr) is not None):
343 PassTest(resultfile, test) 342 PassTest(resultfile, resultlog, test)
344 return 343 return
345 expectation_str = ', '.join( 344 expectation_str = ', '.join(
346 ["r'%s'" % regexp.pattern for regexp in test['expectations']]) 345 ["r'%s'" % regexp.pattern for regexp in test['expectations']])
347 FailTest(resultfile, test, 346 FailTest(resultfile, test,
348 'Expectations [%s] did not match output.' % expectation_str, 347 'Expectations [%s] did not match output.' % expectation_str,
349 stdout, stderr) 348 stdout, stderr)
350 return 349 return
351 350
352 351
353 def CompleteAtLeastOneTest(resultfile, executing_tests): 352 def CompleteAtLeastOneTest(executing_tests):
354 """Blocks until at least one task is removed from executing_tests. 353 """Blocks until at least one task is removed from executing_tests.
355 354
356 This function removes completed tests from executing_tests, logging failures 355 This function removes completed tests from executing_tests, logging failures
357 and output. If no tests can be removed, it will enter a poll-loop until one 356 and output. If no tests can be removed, it will enter a poll-loop until one
358 test finishes or times out. On a timeout, this function is responsible for 357 test finishes or times out. On a timeout, this function is responsible for
359 terminating the process in the appropriate fashion. 358 terminating the process in the appropriate fashion.
360 359
361 Args: 360 Args:
362 executing_tests: A dict mapping a string containing the test name to the 361 executing_tests: A dict mapping a string containing the test name to the
363 test dict return from StartTest(). 362 test dict return from StartTest().
364 363
365 Returns: 364 Returns:
366 A list of tests that have finished. 365 A list of tests that have finished.
367 """ 366 """
368 finished_tests = [] 367 finished_tests = []
369 busy_loop_timeout = time.time() + BUSY_LOOP_MAX_TIME_SEC 368 busy_loop_timeout = time.time() + BUSY_LOOP_MAX_TIME_SEC
370 while len(finished_tests) == 0: 369 while len(finished_tests) == 0:
371 # If we don't make progress for too long, assume the code is just dead. 370 # If we don't make progress for too long, assume the code is just dead.
372 assert busy_loop_timeout > time.time() 371 assert busy_loop_timeout > time.time()
373 372
374 # Select on the output pipes. 373 # Select on the output pipes.
375 read_set = [] 374 read_set = []
376 for test in executing_tests.values(): 375 for test in executing_tests.values():
377 read_set.extend([test['proc'].stderr, test['proc'].stdout]) 376 read_set.extend([test['proc'].stderr, test['proc'].stdout])
378 result = select.select(read_set, [], read_set, NCTEST_TERMINATE_TIMEOUT_SEC) 377 select.select(read_set, [], read_set, NCTEST_TERMINATE_TIMEOUT_SEC)
379 378
380 # Now attempt to process results. 379 # Now attempt to process results.
381 now = time.time() 380 now = time.time()
382 for test in executing_tests.values(): 381 for test in executing_tests.values():
383 proc = test['proc'] 382 proc = test['proc']
384 if proc.poll() is not None: 383 if proc.poll() is not None:
385 test['finished_at'] = now 384 test['finished_at'] = now
386 finished_tests.append(test) 385 finished_tests.append(test)
387 elif test['terminate_timeout'] < now: 386 elif test['terminate_timeout'] < now:
388 proc.terminate() 387 proc.terminate()
(...skipping 29 matching lines...) Expand all
418 417
419 # Convert filename from underscores to CamelCase. 418 # Convert filename from underscores to CamelCase.
420 words = os.path.splitext(os.path.basename(sourcefile_path))[0].split('_') 419 words = os.path.splitext(os.path.basename(sourcefile_path))[0].split('_')
421 words = [w.capitalize() for w in words] 420 words = [w.capitalize() for w in words]
422 suite_name = 'NoCompile' + ''.join(words) 421 suite_name = 'NoCompile' + ''.join(words)
423 422
424 test_configs = ExtractTestConfigs(sourcefile_path, suite_name) 423 test_configs = ExtractTestConfigs(sourcefile_path, suite_name)
425 timings['extract_done'] = time.time() 424 timings['extract_done'] = time.time()
426 425
427 resultfile = StringIO.StringIO() 426 resultfile = StringIO.StringIO()
427 resultlog = StringIO.StringIO()
428 resultfile.write(RESULT_FILE_HEADER % sourcefile_path) 428 resultfile.write(RESULT_FILE_HEADER % sourcefile_path)
429 429
430 # Run the no-compile tests, but ensure we do not run more than |parallelism| 430 # Run the no-compile tests, but ensure we do not run more than |parallelism|
431 # tests at once. 431 # tests at once.
432 timings['header_written'] = time.time() 432 timings['header_written'] = time.time()
433 executing_tests = {} 433 executing_tests = {}
434 finished_tests = [] 434 finished_tests = []
435 435
436 test = StartTest( 436 test = StartTest(
437 sourcefile_path, 437 sourcefile_path,
438 cflags + ' -MMD -MF %s.d -MT %s' % (resultfile_path, resultfile_path), 438 cflags + ' -MMD -MF %s.d -MT %s' % (resultfile_path, resultfile_path),
439 { 'name': 'NCTEST_SANITY', 439 { 'name': 'NCTEST_SANITY',
440 'suite_name': suite_name, 440 'suite_name': suite_name,
441 'expectations': None, 441 'expectations': None,
442 }) 442 })
443 executing_tests[test['name']] = test 443 executing_tests[test['name']] = test
444 444
445 for config in test_configs: 445 for config in test_configs:
446 # CompleteAtLeastOneTest blocks until at least one test finishes. Thus, this 446 # CompleteAtLeastOneTest blocks until at least one test finishes. Thus, this
447 # acts as a semaphore. We cannot use threads + a real semaphore because 447 # acts as a semaphore. We cannot use threads + a real semaphore because
448 # subprocess forks, which can cause all sorts of hilarity with threads. 448 # subprocess forks, which can cause all sorts of hilarity with threads.
449 if len(executing_tests) >= parallelism: 449 if len(executing_tests) >= parallelism:
450 finished_tests.extend(CompleteAtLeastOneTest(resultfile, executing_tests)) 450 finished_tests.extend(CompleteAtLeastOneTest(executing_tests))
451 451
452 if config['name'].startswith('DISABLED_'): 452 if config['name'].startswith('DISABLED_'):
453 PassTest(resultfile, config) 453 PassTest(resultfile, resultlog, config)
454 else: 454 else:
455 test = StartTest(sourcefile_path, cflags, config) 455 test = StartTest(sourcefile_path, cflags, config)
456 assert test['name'] not in executing_tests 456 assert test['name'] not in executing_tests
457 executing_tests[test['name']] = test 457 executing_tests[test['name']] = test
458 458
459 # If there are no more test to start, we still need to drain the running 459 # If there are no more test to start, we still need to drain the running
460 # ones. 460 # ones.
461 while len(executing_tests) > 0: 461 while len(executing_tests) > 0:
462 finished_tests.extend(CompleteAtLeastOneTest(resultfile, executing_tests)) 462 finished_tests.extend(CompleteAtLeastOneTest(executing_tests))
463 timings['compile_done'] = time.time() 463 timings['compile_done'] = time.time()
464 464
465 for test in finished_tests: 465 for test in finished_tests:
466 if test['name'] == 'NCTEST_SANITY': 466 if test['name'] == 'NCTEST_SANITY':
467 _, stderr = test['proc'].communicate() 467 _, stderr = test['proc'].communicate()
468 return_code = test['proc'].poll() 468 return_code = test['proc'].poll()
469 if return_code != 0: 469 if return_code != 0:
470 sys.stderr.write(stderr) 470 sys.stderr.write(stderr)
471 continue 471 continue
472 ProcessTestResult(resultfile, test) 472 ProcessTestResult(resultfile, resultlog, test)
473 timings['results_processed'] = time.time() 473 timings['results_processed'] = time.time()
474 474
475 WriteStats(resultfile, suite_name, timings) 475 WriteStats(resultlog, suite_name, timings)
476 476
477 with open(resultfile_path + '.log', 'w') as fd:
478 fd.write(resultlog.getvalue())
477 if return_code == 0: 479 if return_code == 0:
478 with open(resultfile_path, 'w') as fd: 480 with open(resultfile_path, 'w') as fd:
479 fd.write(resultfile.getvalue()) 481 fd.write(resultfile.getvalue())
480 482
481 resultfile.close() 483 resultfile.close()
482 sys.exit(return_code) 484 sys.exit(return_code)
483 485
484 486
485 if __name__ == '__main__': 487 if __name__ == '__main__':
486 main() 488 main()
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698