Chromium Code Reviews| Index: scripts/slave/runtest.py |
| diff --git a/scripts/slave/runtest.py b/scripts/slave/runtest.py |
| index f8f4e42c2d489c7e58a202c894326c2af1d0a4c0..bd44a053701d03b22780d89cd75ec721187d1950 100755 |
| --- a/scripts/slave/runtest.py |
| +++ b/scripts/slave/runtest.py |
| @@ -173,10 +173,12 @@ def _ShutdownDBus(): |
| print ' cleared DBUS_SESSION_BUS_ADDRESS environment variable' |
| -def _RunGTestCommand(command, extra_env, log_processor=None, pipes=None): |
| +def _RunGTestCommand( |
| + options, command, extra_env, log_processor=None, pipes=None): |
| """Runs a test, printing and possibly processing the output. |
| Args: |
| + options: Options passed for this invocation of runtest.py. |
| command: A list of strings in a command (the command and its arguments). |
| extra_env: A dictionary of extra environment variables to set. |
| log_processor: A log processor instance which has the ProcessLine method. |
| @@ -199,11 +201,25 @@ def _RunGTestCommand(command, extra_env, log_processor=None, pipes=None): |
| # TODO(phajdan.jr): Clean this up when internal waterfalls are fixed. |
| env.update({'CHROMIUM_TEST_LAUNCHER_BOT_MODE': '1'}) |
| + log_processors = {} |
| if log_processor: |
| - return chromium_utils.RunCommand( |
| - command, pipes=pipes, parser_func=log_processor.ProcessLine, env=env) |
| - else: |
| - return chromium_utils.RunCommand(command, pipes=pipes, env=env) |
| + log_processors[log_processor.__class__.__name__] = log_processor |
| + |
| + if not 'GTestLogParser' in log_processors and options.gtest_output_file: |
| + log_processors['GTestLogParser'] = gtest_utils.GTestLogParser() |
| + |
| + def _ProcessLine(line): |
| + for current_log_processor in log_processors.values(): |
| + current_log_processor.ProcessLine(line) |
| + |
| + result = chromium_utils.RunCommand( |
| + command, pipes=pipes, parser_func=_ProcessLine, env=env) |
| + |
| + if options.gtest_output_file: |
|
Paweł Hajdan Jr.
2015/01/30 21:20:30
Let's call the option log_processor_output_file.
shatch
2015/02/09 21:23:59
Done.
|
| + _WriteLogProcessorResultsToOutput( |
| + log_processors['GTestLogParser'], options.gtest_output_file) |
| + |
| + return result |
| def _GetMaster(): |
| @@ -723,6 +739,23 @@ def _GenerateDashboardJson(log_processor, args): |
| return None |
| +def _WriteLogProcessorResultsToOutput(log_processor, log_output_file): |
| + """Writes the log processor's results to a file. |
| + |
| + Args: |
| + chartjson_file: Path to the file to write the results. |
| + log_processor: An instance of a log processor class, which has been used to |
| + process the test output, so it contains the test results. |
| + """ |
| + with open(log_output_file, 'w') as f: |
| + results = { |
| + 'passed': log_processor.PassedTests(), |
| + 'failed': log_processor.FailedTests(), |
| + 'flakes': log_processor.FlakyTests(), |
| + } |
| + json.dump(results, f) |
| + |
| + |
| def _WriteChartJsonToOutput(chartjson_file, log_processor, args): |
| """Writes the dashboard chartjson to a file for display in the waterfall. |
| @@ -1167,7 +1200,7 @@ def _MainMac(options, args, extra_env): |
| command = _GenerateRunIsolatedCommand(build_dir, test_exe_path, options, |
| command) |
| - result = _RunGTestCommand(command, extra_env, pipes=pipes, |
| + result = _RunGTestCommand(options, command, extra_env, pipes=pipes, |
| log_processor=log_processor) |
| finally: |
| if http_server: |
| @@ -1274,7 +1307,7 @@ def _MainIOS(options, args, extra_env): |
| crash_files_after = set([]) |
| crash_files_before = set(crash_utils.list_crash_logs()) |
| - result = _RunGTestCommand(command, extra_env, log_processor) |
| + result = _RunGTestCommand(options, command, extra_env, log_processor) |
| # Because test apps kill themselves, iossim sometimes returns non-zero |
| # status even though all tests have passed. Check the log_processor to |
| @@ -1440,7 +1473,7 @@ def _MainLinux(options, args, extra_env): |
| command = _GenerateRunIsolatedCommand(build_dir, test_exe_path, options, |
| command) |
| - result = _RunGTestCommand(command, extra_env, pipes=pipes, |
| + result = _RunGTestCommand(options, command, extra_env, pipes=pipes, |
| log_processor=log_processor) |
| finally: |
| if http_server: |
| @@ -1567,7 +1600,7 @@ def _MainWin(options, args, extra_env): |
| command = _GenerateRunIsolatedCommand(build_dir, test_exe_path, options, |
| command) |
| - result = _RunGTestCommand(command, extra_env, log_processor) |
| + result = _RunGTestCommand(options, command, extra_env, log_processor) |
| finally: |
| if http_server: |
| http_server.StopServer() |
| @@ -1646,7 +1679,8 @@ def _MainAndroid(options, args, extra_env): |
| command += ['--flakiness-dashboard-server=%s' % |
| options.flakiness_dashboard_server] |
| - result = _RunGTestCommand(command, extra_env, log_processor=log_processor) |
| + result = _RunGTestCommand( |
| + options, command, extra_env, log_processor=log_processor) |
| if options.generate_json_file: |
| if not _GenerateJSONForTestResults(options, log_processor): |
| @@ -1791,6 +1825,8 @@ def main(): |
| help='output results directory for JSON file.') |
| option_parser.add_option('--chartjson-file', default='', |
| help='File to dump chartjson results.') |
| + option_parser.add_option('--gtest-output-file', default='', |
| + help='File to dump gtest log processor results.') |
| option_parser.add_option('--builder-name', default=None, |
| help='The name of the builder running this script.') |
| option_parser.add_option('--slave-name', default=None, |