OLD | NEW |
---|---|
1 #!/usr/bin/python | 1 #!/usr/bin/python |
2 # Copyright (c) 2011 The Native Client Authors. All rights reserved. | 2 # Copyright (c) 2011 The Native Client Authors. All rights reserved. |
3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
5 | 5 |
6 | 6 |
7 """Simple testing harness for running commands and checking expected output. | 7 """Simple testing harness for running commands and checking expected output. |
8 | 8 |
9 This harness is used instead of shell scripts to ensure windows compatibility | 9 This harness is used instead of shell scripts to ensure windows compatibility |
10 | 10 |
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
98 | 98 |
99 # This option must be '1' for the output to be captured, for checking | 99 # This option must be '1' for the output to be captured, for checking |
100 # against golden files, special exit_status signals, etc. | 100 # against golden files, special exit_status signals, etc. |
101 # When this option is '0', stdout and stderr will be streamed out. | 101 # When this option is '0', stdout and stderr will be streamed out. |
102 'capture_output': '1', | 102 'capture_output': '1', |
103 | 103 |
104 'filter_regex': None, | 104 'filter_regex': None, |
105 'filter_inverse': False, | 105 'filter_inverse': False, |
106 'filter_group_only': False, | 106 'filter_group_only': False, |
107 | 107 |
108 # Script for processing output along with its arguments. | 108 # Number of times a test is run. |
109 'process_output': '', | 109 # This is useful for getting multiple samples for time perf tests. |
110 'num_runs': 1, | |
111 | |
112 # Scripts for processing output along with its arguments. | |
113 # This script is given the output of a single run. | |
114 'process_output_single': None, | |
115 # This script is given the concatenated output of all |num_runs|, after | |
116 # having been filtered by |process_output_single| for individual runs. | |
117 'process_output_combined': None, | |
110 | 118 |
111 'time_warning': 0, | 119 'time_warning': 0, |
112 'time_error': 0, | 120 'time_error': 0, |
113 | 121 |
114 'run_under': None, | 122 'run_under': None, |
115 } | 123 } |
116 | 124 |
117 def StringifyList(lst): | 125 def StringifyList(lst): |
118 return ','.join(lst) | 126 return ','.join(lst) |
119 | 127 |
120 def DestringifyList(lst): | 128 def DestringifyList(lst): |
121 # BUG(robertm): , is a legitimate character for an environment variable | 129 # BUG(robertm): , is a legitimate character for an environment variable |
122 # value. | 130 # value. |
123 return lst.split(',') | 131 return lst.split(',') |
124 | 132 |
125 # The following messages match gtest's formatting. This improves log | 133 # The following messages match gtest's formatting. This improves log |
126 # greppability for people who primarily work on Chrome. It also allows | 134 # greppability for people who primarily work on Chrome. It also allows |
127 # gtest-specific hooks on the buildbots to fire. | 135 # gtest-specific hooks on the buildbots to fire. |
128 # The buildbots expect test names in the format "suite_name.test_name", so we | 136 # The buildbots expect test names in the format "suite_name.test_name", so we |
129 # prefix the test name with a bogus suite name (nacl). | 137 # prefix the test name with a bogus suite name (nacl). |
130 def RunMessage(): | 138 def RunMessage(): |
131 return '[ RUN ] nacl.%s' % GlobalSettings['name'] | 139 msg = '[ RUN ] nacl.%s' % GlobalSettings['name'] |
Nick Bray
2011/11/11 23:46:33
nit: revert it to the original version? No point
jvoung - send to chromium...
2011/11/12 19:46:51
Done.
| |
140 return msg | |
132 | 141 |
133 def FailureMessage(total_time): | 142 def FailureMessage(total_time): |
134 return '[ FAILED ] nacl.%s (%d ms)' % (GlobalSettings['name'], | 143 return '[ FAILED ] nacl.%s (%d ms)' % (GlobalSettings['name'], |
135 total_time * 1000.0) | 144 total_time * 1000.0) |
136 | 145 |
137 def SuccessMessage(total_time): | 146 def SuccessMessage(total_time): |
138 return '[ OK ] nacl.%s (%d ms)' % (GlobalSettings['name'], | 147 return '[ OK ] nacl.%s (%d ms)' % (GlobalSettings['name'], |
139 total_time * 1000.0) | 148 total_time * 1000.0) |
140 | 149 |
141 def LogPerfResult(graph_name, trace_name, value, units): | 150 def LogPerfResult(graph_name, trace_name, value, units): |
(...skipping 161 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
303 words[0] == 'qemu:' and words[1] == 'uncaught' and | 312 words[0] == 'qemu:' and words[1] == 'uncaught' and |
304 words[2] == 'target' and words[3] == 'signal'): | 313 words[2] == 'target' and words[3] == 'signal'): |
305 return -int(words[4]) | 314 return -int(words[4]) |
306 return default | 315 return default |
307 | 316 |
308 def FormatExitStatus(number): | 317 def FormatExitStatus(number): |
309 # Include the hex version because it makes the Windows error exit | 318 # Include the hex version because it makes the Windows error exit |
310 # statuses (STATUS_*) more recognisable. | 319 # statuses (STATUS_*) more recognisable. |
311 return '%i (0x%x)' % (number, number & 0xffffffff) | 320 return '%i (0x%x)' % (number, number & 0xffffffff) |
312 | 321 |
322 def PrintStdStreams(stdout, stderr): | |
323 if stderr is not None: | |
324 Banner('Stdout for %s:' % os.path.basename(GlobalSettings['name'])) | |
325 Print(stdout) | |
326 Banner('Stderr for %s:' % os.path.basename(GlobalSettings['name'])) | |
327 Print(stderr) | |
328 | |
313 def CheckExitStatus(failed, req_status, using_nacl_signal_handler, | 329 def CheckExitStatus(failed, req_status, using_nacl_signal_handler, |
314 exit_status, stdout, stderr): | 330 exit_status, stdout, stderr): |
315 expected_sigtype = 'normal' | 331 expected_sigtype = 'normal' |
316 if req_status in status_map: | 332 if req_status in status_map: |
317 expected_statuses = status_map[req_status][GlobalPlatform] | 333 expected_statuses = status_map[req_status][GlobalPlatform] |
318 if using_nacl_signal_handler: | 334 if using_nacl_signal_handler: |
319 if req_status.startswith('trusted_'): | 335 if req_status.startswith('trusted_'): |
320 expected_sigtype = 'trusted' | 336 expected_sigtype = 'trusted' |
321 elif req_status.startswith('untrusted_'): | 337 elif req_status.startswith('untrusted_'): |
322 expected_sigtype = 'untrusted' | 338 expected_sigtype = 'untrusted' |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
361 expected_printed = (expected_printed_signum, expected_sigtype) | 377 expected_printed = (expected_printed_signum, expected_sigtype) |
362 actual_printed = GetNaClSignalInfoFromStderr(stderr) | 378 actual_printed = GetNaClSignalInfoFromStderr(stderr) |
363 msg = ('\nERROR: Command printed the signal info %s to stderr ' | 379 msg = ('\nERROR: Command printed the signal info %s to stderr ' |
364 'but we expected %s' % | 380 'but we expected %s' % |
365 (actual_printed, expected_printed)) | 381 (actual_printed, expected_printed)) |
366 if actual_printed != expected_printed: | 382 if actual_printed != expected_printed: |
367 Print(msg) | 383 Print(msg) |
368 failed = True | 384 failed = True |
369 | 385 |
370 if failed: | 386 if failed: |
371 if stderr is not None: | 387 PrintStdStreams(stdout, stderr) |
372 Banner('Stdout') | |
373 Print(stdout) | |
374 Banner('Stderr') | |
375 Print(stderr) | |
376 return not failed | 388 return not failed |
377 | 389 |
378 def CheckTimeBounds(total_time): | 390 def CheckTimeBounds(total_time): |
379 if GlobalSettings['time_error']: | 391 if GlobalSettings['time_error']: |
380 if total_time > GlobalSettings['time_error']: | 392 if total_time > GlobalSettings['time_error']: |
381 Print('ERROR: should have taken less than %f secs' % | 393 Print('ERROR: should have taken less than %f secs' % |
382 (GlobalSettings['time_error'])) | 394 (GlobalSettings['time_error'])) |
383 return False | 395 return False |
384 | 396 |
385 if GlobalSettings['time_warning']: | 397 if GlobalSettings['time_warning']: |
(...skipping 14 matching lines...) Expand all Loading... | |
400 actual = getter() | 412 actual = getter() |
401 if GlobalSettings['filter_regex']: | 413 if GlobalSettings['filter_regex']: |
402 actual = test_lib.RegexpFilterLines(GlobalSettings['filter_regex'], | 414 actual = test_lib.RegexpFilterLines(GlobalSettings['filter_regex'], |
403 GlobalSettings['filter_inverse'], | 415 GlobalSettings['filter_inverse'], |
404 GlobalSettings['filter_group_only'], | 416 GlobalSettings['filter_group_only'], |
405 actual) | 417 actual) |
406 if DifferentFromGolden(actual, golden_data, stream): | 418 if DifferentFromGolden(actual, golden_data, stream): |
407 return False | 419 return False |
408 return True | 420 return True |
409 | 421 |
410 def ProcessLogOutput(stdout, stderr): | 422 def ProcessLogOutputSingle(stdout, stderr): |
411 output_processor = GlobalSettings['process_output'] | 423 output_processor = GlobalSettings['process_output_single'] |
412 if output_processor: | 424 if output_processor is None: |
425 return (True, stdout, stderr) | |
426 else: | |
413 output_processor_cmd = DestringifyList(output_processor) | 427 output_processor_cmd = DestringifyList(output_processor) |
414 # Also, get the output from logout (to get NaClLog output in Windows). | 428 # Also, get the output from log_file to get NaClLog output in Windows. |
415 log_output = open(GlobalSettings['log_file']).read() | 429 log_output = open(GlobalSettings['log_file']).read() |
416 # Assume the log processor does not care about the order of the lines. | 430 # Assume the log processor does not care about the order of the lines. |
417 all_output = log_output + stdout + stderr | 431 all_output = log_output + stdout + stderr |
418 if not test_lib.RunCmdWithInput(output_processor_cmd, all_output): | 432 _, retcode, failed, new_stdout, new_stderr = \ |
433 test_lib.RunTestWithInputOutput(output_processor_cmd, all_output) | |
434 # Print the result, since we have done some processing and we need | |
435 # to have the processed data. However, if we intend to process it some | |
436 # more later via process_output_combined, do not duplicate the data here. | |
437 # Only print out the final result! | |
438 if not GlobalSettings['process_output_combined']: | |
439 PrintStdStreams(new_stdout, new_stderr) | |
440 if retcode != 0 or failed: | |
441 return (False, new_stdout, new_stderr) | |
442 else: | |
443 return (True, new_stdout, new_stderr) | |
444 | |
445 def ProcessLogOutputCombined(stdout, stderr): | |
446 output_processor = GlobalSettings['process_output_combined'] | |
447 if output_processor is None: | |
448 return True | |
449 else: | |
450 output_processor_cmd = DestringifyList(output_processor) | |
451 all_output = stdout + stderr | |
452 _, retcode, failed, new_stdout, new_stderr = \ | |
453 test_lib.RunTestWithInputOutput(output_processor_cmd, all_output) | |
454 # Print the result, since we have done some processing. | |
455 PrintStdStreams(new_stdout, new_stderr) | |
456 if retcode != 0 or failed: | |
419 return False | 457 return False |
420 return True | 458 else: |
459 return True | |
421 | 460 |
422 def main(argv): | 461 def DoRun(command, stdin_data): |
423 global GlobalPlatform | 462 """ |
424 global GlobalReportStream | 463 Run the command, given stdin_data. Returns a return code (0 is good) |
425 command = ProcessOptions(argv) | 464 and optionally a captured version of stdout, stderr from the run |
426 | 465 (if the global setting capture_output is true). |
427 if GlobalSettings['report']: | 466 """ |
428 GlobalReportStream.append(open(GlobalSettings['report'], 'w')) | 467 # Initialize stdout, stderr to indicate we have not captured |
429 | 468 # any of stdout or stderr. |
430 if not GlobalSettings['name']: | 469 stdout = '' |
431 GlobalSettings['name'] = command[0] | 470 stderr = '' |
432 GlobalSettings['name'] = os.path.basename(GlobalSettings['name']) | |
433 | |
434 Print(RunMessage()) | |
435 | |
436 if GlobalSettings['osenv']: | |
437 Banner('setting environment') | |
438 env_vars = DestringifyList(GlobalSettings['osenv']) | |
439 else: | |
440 env_vars = [] | |
441 for env_var in env_vars: | |
442 key, val = env_var.split('=', 1) | |
443 Print('[%s] = [%s]' % (key, val)) | |
444 os.putenv(key, val) | |
445 | |
446 stdin_data = '' | |
447 if GlobalSettings['stdin']: | |
448 stdin_data = open(GlobalSettings['stdin']) | |
449 | |
450 if GlobalSettings['log_file']: | |
451 try: | |
452 os.unlink(GlobalSettings['log_file']) # might not pre-exist | |
453 except OSError: | |
454 pass | |
455 | |
456 run_under = GlobalSettings['run_under'] | |
457 if run_under: | |
458 command = run_under.split(',') + command | |
459 | |
460 Banner('running %s' % str(command)) | |
461 # print the command in copy-and-pastable fashion | |
462 print " ".join(env_vars + command) | |
463 | |
464 if not int(GlobalSettings['capture_output']): | 471 if not int(GlobalSettings['capture_output']): |
465 # We are only blurting out the stdout and stderr, not capturing it | 472 # We are only blurting out the stdout and stderr, not capturing it |
466 # for comparison, etc. | 473 # for comparison, etc. |
467 assert (not GlobalSettings['stdout_golden'] | 474 assert (not GlobalSettings['stdout_golden'] |
468 and not GlobalSettings['stderr_golden'] | 475 and not GlobalSettings['stderr_golden'] |
469 and not GlobalSettings['log_golden'] | 476 and not GlobalSettings['log_golden'] |
470 and not GlobalSettings['filter_regex'] | 477 and not GlobalSettings['filter_regex'] |
471 and not GlobalSettings['filter_inverse'] | 478 and not GlobalSettings['filter_inverse'] |
472 and not GlobalSettings['filter_group_only'] | 479 and not GlobalSettings['filter_group_only'] |
473 and not GlobalSettings['process_output'] | 480 and not GlobalSettings['process_output_single'] |
481 and not GlobalSettings['process_output_combined'] | |
474 ) | 482 ) |
475 # If python ever changes popen.stdout.read() to not risk deadlock, | 483 # If python ever changes popen.stdout.read() to not risk deadlock, |
476 # we could stream and capture, and use RunTestWithInputOutput instead. | 484 # we could stream and capture, and use RunTestWithInputOutput instead. |
477 (total_time, exit_status, failed) = test_lib.RunTestWithInput(command, | 485 (total_time, exit_status, failed) = test_lib.RunTestWithInput(command, |
478 stdin_data) | 486 stdin_data) |
479 PrintTotalTime(total_time) | 487 PrintTotalTime(total_time) |
480 if not CheckExitStatus(failed, | 488 if not CheckExitStatus(failed, |
481 GlobalSettings['exit_status'], | 489 GlobalSettings['exit_status'], |
482 GlobalSettings['using_nacl_signal_handler'], | 490 GlobalSettings['using_nacl_signal_handler'], |
483 exit_status, None, None): | 491 exit_status, None, None): |
484 Print(FailureMessage(total_time)) | 492 Print(FailureMessage(total_time)) |
485 return -1 | 493 return (-1, stdout, stderr) |
486 else: | 494 else: |
487 (total_time, exit_status, | 495 (total_time, exit_status, |
488 failed, stdout, stderr) = test_lib.RunTestWithInputOutput( | 496 failed, stdout, stderr) = test_lib.RunTestWithInputOutput( |
489 command, stdin_data) | 497 command, stdin_data) |
490 PrintTotalTime(total_time) | 498 PrintTotalTime(total_time) |
499 # CheckExitStatus may spew stdout/stderr when there is an error. | |
500 # Otherwise, we do not spew stdout/stderr in this case (capture_output). | |
491 if not CheckExitStatus(failed, | 501 if not CheckExitStatus(failed, |
492 GlobalSettings['exit_status'], | 502 GlobalSettings['exit_status'], |
493 GlobalSettings['using_nacl_signal_handler'], | 503 GlobalSettings['using_nacl_signal_handler'], |
494 exit_status, stdout, stderr): | 504 exit_status, stdout, stderr): |
495 Print(FailureMessage(total_time)) | 505 Print(FailureMessage(total_time)) |
496 return -1 | 506 return (-1, stdout, stderr) |
497 if not CheckGoldenOutput(stdout, stderr): | 507 if not CheckGoldenOutput(stdout, stderr): |
498 Print(FailureMessage(total_time)) | 508 Print(FailureMessage(total_time)) |
499 return -1 | 509 return (-1, stdout, stderr) |
500 if not ProcessLogOutput(stdout, stderr): | 510 success, stdout, stderr = ProcessLogOutputSingle(stdout, stderr) |
501 Print(FailureMessage(total_time)) | 511 if not success: |
502 return -1 | 512 Print(FailureMessage(total_time) + ' ProcessLogOutputSingle failed!') |
513 return (-1, stdout, stderr) | |
503 | 514 |
504 if not CheckTimeBounds(total_time): | 515 if not CheckTimeBounds(total_time): |
505 Print(FailureMessage(total_time)) | 516 Print(FailureMessage(total_time)) |
506 return -1 | 517 return (-1, stdout, stderr) |
507 | 518 |
508 Print(SuccessMessage(total_time)) | 519 Print(SuccessMessage(total_time)) |
520 return (0, stdout, stderr) | |
521 | |
522 | |
523 def Main(argv): | |
524 command = ProcessOptions(argv) | |
525 | |
526 if GlobalSettings['report']: | |
527 GlobalReportStream.append(open(GlobalSettings['report'], 'w')) | |
528 | |
529 if not GlobalSettings['name']: | |
530 GlobalSettings['name'] = command[0] | |
531 GlobalSettings['name'] = os.path.basename(GlobalSettings['name']) | |
532 | |
533 Print(RunMessage()) | |
534 num_runs = GlobalSettings['num_runs'] | |
535 if num_runs > 1: | |
536 Print(' (running %d times)' % num_runs) | |
537 | |
538 if GlobalSettings['osenv']: | |
539 Banner('setting environment') | |
540 env_vars = DestringifyList(GlobalSettings['osenv']) | |
541 else: | |
542 env_vars = [] | |
543 for env_var in env_vars: | |
544 key, val = env_var.split('=', 1) | |
545 Print('[%s] = [%s]' % (key, val)) | |
546 os.environ[key] = val | |
547 | |
548 stdin_data = '' | |
549 if GlobalSettings['stdin']: | |
550 stdin_data = open(GlobalSettings['stdin']) | |
551 | |
552 run_under = GlobalSettings['run_under'] | |
553 if run_under: | |
554 command = run_under.split(',') + command | |
555 | |
556 Banner('running %s' % str(command)) | |
557 # print the command in copy-and-pastable fashion | |
558 print " ".join(env_vars + command) | |
Nick Bray
2011/11/11 23:46:33
' '.join(...)
jvoung - send to chromium...
2011/11/12 19:46:51
Done.
| |
559 | |
560 # Concatenate output when running multiple times (e.g., for timing). | |
561 combined_stdout = '' | |
562 combined_stderr = '' | |
563 cur_runs = 0 | |
564 num_runs = GlobalSettings['num_runs'] | |
565 while cur_runs < num_runs: | |
566 cur_runs += 1 | |
567 # Clear out previous log_file. | |
568 if GlobalSettings['log_file']: | |
569 try: | |
570 os.unlink(GlobalSettings['log_file']) # might not pre-exist | |
571 except OSError: | |
572 pass | |
573 ret_code, stdout, stderr = DoRun(command, stdin_data) | |
574 if ret_code != 0: | |
575 return ret_code | |
576 combined_stdout += stdout | |
577 combined_stderr += stderr | |
578 # Process the log output after all the runs. | |
579 success = ProcessLogOutputCombined(combined_stdout, combined_stderr) | |
580 if not success: | |
581 # Bogus time, since only ProcessLogOutputCombined failed. | |
582 Print(FailureMessage(0.0) + ' ProcessLogOutputCombined failed!') | |
583 return -1 | |
509 return 0 | 584 return 0 |
510 | 585 |
511 | |
512 if __name__ == '__main__': | 586 if __name__ == '__main__': |
513 retval = main(sys.argv[1:]) | 587 retval = Main(sys.argv[1:]) |
514 # Add some whitepsace to make the logs easier to read. | 588 # Add some whitepsace to make the logs easier to read. |
515 sys.stdout.write('\n\n') | 589 sys.stdout.write('\n\n') |
516 sys.exit(retval) | 590 sys.exit(retval) |
517 | |
OLD | NEW |