Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(340)

Side by Side Diff: scripts/slave/runtest.py

Issue 873403002: Add support for cc_perftests and other non-telemetry gtest based tests. (Closed) Base URL: https://chromium.googlesource.com/chromium/tools/build.git@master
Patch Set: Added gtest failure output. Created 5 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """A tool used to run a Chrome test executable and process the output. 6 """A tool used to run a Chrome test executable and process the output.
7 7
8 This script is used by the buildbot slaves. It must be run from the outer 8 This script is used by the buildbot slaves. It must be run from the outer
9 build directory, e.g. chrome-release/build/. 9 build directory, e.g. chrome-release/build/.
10 10
(...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after
166 except OSError as e: 166 except OSError as e:
167 print ' error killing dbus-daemon with PID %s: %s' % (dbus_pid, e) 167 print ' error killing dbus-daemon with PID %s: %s' % (dbus_pid, e)
168 # Try to clean up any stray DBUS_SESSION_BUS_ADDRESS environment 168 # Try to clean up any stray DBUS_SESSION_BUS_ADDRESS environment
169 # variable too. Some of the bots seem to re-invoke runtest.py in a 169 # variable too. Some of the bots seem to re-invoke runtest.py in a
170 # way that this variable sticks around from run to run. 170 # way that this variable sticks around from run to run.
171 if 'DBUS_SESSION_BUS_ADDRESS' in os.environ: 171 if 'DBUS_SESSION_BUS_ADDRESS' in os.environ:
172 del os.environ['DBUS_SESSION_BUS_ADDRESS'] 172 del os.environ['DBUS_SESSION_BUS_ADDRESS']
173 print ' cleared DBUS_SESSION_BUS_ADDRESS environment variable' 173 print ' cleared DBUS_SESSION_BUS_ADDRESS environment variable'
174 174
175 175
176 def _RunGTestCommand(command, extra_env, log_processor=None, pipes=None): 176 def _RunGTestCommand(
177 options, command, extra_env, log_processor=None, pipes=None):
177 """Runs a test, printing and possibly processing the output. 178 """Runs a test, printing and possibly processing the output.
178 179
179 Args: 180 Args:
181 options: Options passed for this invocation of runtest.py.
180 command: A list of strings in a command (the command and its arguments). 182 command: A list of strings in a command (the command and its arguments).
181 extra_env: A dictionary of extra environment variables to set. 183 extra_env: A dictionary of extra environment variables to set.
182 log_processor: A log processor instance which has the ProcessLine method. 184 log_processor: A log processor instance which has the ProcessLine method.
183 pipes: A list of command string lists which the output will be piped to. 185 pipes: A list of command string lists which the output will be piped to.
184 186
185 Returns: 187 Returns:
186 The process return code. 188 The process return code.
187 """ 189 """
188 env = os.environ.copy() 190 env = os.environ.copy()
189 if extra_env: 191 if extra_env:
190 print 'Additional test environment:' 192 print 'Additional test environment:'
191 for k, v in sorted(extra_env.items()): 193 for k, v in sorted(extra_env.items()):
192 print ' %s=%s' % (k, v) 194 print ' %s=%s' % (k, v)
193 env.update(extra_env or {}) 195 env.update(extra_env or {})
194 196
195 # Trigger bot mode (test retries, redirection of stdio, possibly faster, 197 # Trigger bot mode (test retries, redirection of stdio, possibly faster,
196 # etc.) - using an environment variable instead of command-line flags because 198 # etc.) - using an environment variable instead of command-line flags because
197 # some internal waterfalls run this (_RunGTestCommand) for totally non-gtest 199 # some internal waterfalls run this (_RunGTestCommand) for totally non-gtest
198 # code. 200 # code.
199 # TODO(phajdan.jr): Clean this up when internal waterfalls are fixed. 201 # TODO(phajdan.jr): Clean this up when internal waterfalls are fixed.
200 env.update({'CHROMIUM_TEST_LAUNCHER_BOT_MODE': '1'}) 202 env.update({'CHROMIUM_TEST_LAUNCHER_BOT_MODE': '1'})
201 203
204 log_processors = {}
202 if log_processor: 205 if log_processor:
203 return chromium_utils.RunCommand( 206 log_processors[log_processor.__class__.__name__] = log_processor
204 command, pipes=pipes, parser_func=log_processor.ProcessLine, env=env) 207
205 else: 208 if not 'GTestLogParser' in log_processors and options.gtest_output_file:
206 return chromium_utils.RunCommand(command, pipes=pipes, env=env) 209 log_processors['GTestLogParser'] = gtest_utils.GTestLogParser()
210
211 def _ProcessLine(line):
212 for current_log_processor in log_processors.values():
213 current_log_processor.ProcessLine(line)
214
215 result = chromium_utils.RunCommand(
216 command, pipes=pipes, parser_func=_ProcessLine, env=env)
217
218 if options.gtest_output_file:
Paweł Hajdan Jr. 2015/01/30 21:20:30 Let's call the option log_processor_output_file.
shatch 2015/02/09 21:23:59 Done.
219 _WriteLogProcessorResultsToOutput(
220 log_processors['GTestLogParser'], options.gtest_output_file)
221
222 return result
207 223
208 224
209 def _GetMaster(): 225 def _GetMaster():
210 """Return the master name for the current host.""" 226 """Return the master name for the current host."""
211 return chromium_utils.GetActiveMaster() 227 return chromium_utils.GetActiveMaster()
212 228
213 229
214 def _GetMasterString(master): 230 def _GetMasterString(master):
215 """Returns a message describing what the master is.""" 231 """Returns a message describing what the master is."""
216 return '[Running for master: "%s"]' % master 232 return '[Running for master: "%s"]' % master
(...skipping 499 matching lines...) Expand 10 before | Expand all | Expand 10 after
716 chart_json = log_processor.ChartJson() 732 chart_json = log_processor.ChartJson()
717 if chart_json: 733 if chart_json:
718 return results_dashboard.MakeDashboardJsonV1( 734 return results_dashboard.MakeDashboardJsonV1(
719 chart_json, 735 chart_json,
720 args['revisions'], args['system'], args['mastername'], 736 args['revisions'], args['system'], args['mastername'],
721 args['buildername'], args['buildnumber'], 737 args['buildername'], args['buildnumber'],
722 args['supplemental_columns'], log_processor.IsReferenceBuild()) 738 args['supplemental_columns'], log_processor.IsReferenceBuild())
723 return None 739 return None
724 740
725 741
742 def _WriteLogProcessorResultsToOutput(log_processor, log_output_file):
743 """Writes the log processor's results to a file.
744
745 Args:
746 chartjson_file: Path to the file to write the results.
747 log_processor: An instance of a log processor class, which has been used to
748 process the test output, so it contains the test results.
749 """
750 with open(log_output_file, 'w') as f:
751 results = {
752 'passed': log_processor.PassedTests(),
753 'failed': log_processor.FailedTests(),
754 'flakes': log_processor.FlakyTests(),
755 }
756 json.dump(results, f)
757
758
726 def _WriteChartJsonToOutput(chartjson_file, log_processor, args): 759 def _WriteChartJsonToOutput(chartjson_file, log_processor, args):
727 """Writes the dashboard chartjson to a file for display in the waterfall. 760 """Writes the dashboard chartjson to a file for display in the waterfall.
728 761
729 Args: 762 Args:
730 chartjson_file: Path to the file to write the chartjson. 763 chartjson_file: Path to the file to write the chartjson.
731 log_processor: An instance of a log processor class, which has been used to 764 log_processor: An instance of a log processor class, which has been used to
732 process the test output, so it contains the test results. 765 process the test output, so it contains the test results.
733 args: Dict of additional args to send to results_dashboard. 766 args: Dict of additional args to send to results_dashboard.
734 """ 767 """
735 assert log_processor.IsChartJson() 768 assert log_processor.IsChartJson()
(...skipping 424 matching lines...) Expand 10 before | Expand all | Expand 10 after
1160 options.test_launcher_summary_output) 1193 options.test_launcher_summary_output)
1161 command.append('--test-launcher-summary-output=%s' % json_file_name) 1194 command.append('--test-launcher-summary-output=%s' % json_file_name)
1162 1195
1163 pipes = [] 1196 pipes = []
1164 if options.enable_asan: 1197 if options.enable_asan:
1165 symbolize_command = _GetSanitizerSymbolizeCommand() 1198 symbolize_command = _GetSanitizerSymbolizeCommand()
1166 pipes = [symbolize_command, ['c++filt']] 1199 pipes = [symbolize_command, ['c++filt']]
1167 1200
1168 command = _GenerateRunIsolatedCommand(build_dir, test_exe_path, options, 1201 command = _GenerateRunIsolatedCommand(build_dir, test_exe_path, options,
1169 command) 1202 command)
1170 result = _RunGTestCommand(command, extra_env, pipes=pipes, 1203 result = _RunGTestCommand(options, command, extra_env, pipes=pipes,
1171 log_processor=log_processor) 1204 log_processor=log_processor)
1172 finally: 1205 finally:
1173 if http_server: 1206 if http_server:
1174 http_server.StopServer() 1207 http_server.StopServer()
1175 if _UsingGtestJson(options): 1208 if _UsingGtestJson(options):
1176 _UploadGtestJsonSummary(json_file_name, 1209 _UploadGtestJsonSummary(json_file_name,
1177 options.build_properties, 1210 options.build_properties,
1178 test_exe) 1211 test_exe)
1179 log_processor.ProcessJSONFile(options.build_dir) 1212 log_processor.ProcessJSONFile(options.build_dir)
1180 1213
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after
1267 1300
1268 # Nuke anything that appears to be stale chrome items in the temporary 1301 # Nuke anything that appears to be stale chrome items in the temporary
1269 # directory from previous test runs (i.e.- from crashes or unittest leaks). 1302 # directory from previous test runs (i.e.- from crashes or unittest leaks).
1270 slave_utils.RemoveChromeTemporaryFiles() 1303 slave_utils.RemoveChromeTemporaryFiles()
1271 1304
1272 dirs_to_cleanup = [tmpdir] 1305 dirs_to_cleanup = [tmpdir]
1273 crash_files_before = set([]) 1306 crash_files_before = set([])
1274 crash_files_after = set([]) 1307 crash_files_after = set([])
1275 crash_files_before = set(crash_utils.list_crash_logs()) 1308 crash_files_before = set(crash_utils.list_crash_logs())
1276 1309
1277 result = _RunGTestCommand(command, extra_env, log_processor) 1310 result = _RunGTestCommand(options, command, extra_env, log_processor)
1278 1311
1279 # Because test apps kill themselves, iossim sometimes returns non-zero 1312 # Because test apps kill themselves, iossim sometimes returns non-zero
1280 # status even though all tests have passed. Check the log_processor to 1313 # status even though all tests have passed. Check the log_processor to
1281 # see if the test run was successful. 1314 # see if the test run was successful.
1282 if log_processor.CompletedWithoutFailure(): 1315 if log_processor.CompletedWithoutFailure():
1283 result = 0 1316 result = 0
1284 else: 1317 else:
1285 result = 1 1318 result = 1
1286 1319
1287 if result != 0: 1320 if result != 0:
(...skipping 145 matching lines...) Expand 10 before | Expand all | Expand 10 after
1433 1466
1434 pipes = [] 1467 pipes = []
1435 # See the comment in main() regarding offline symbolization. 1468 # See the comment in main() regarding offline symbolization.
1436 if options.use_symbolization_script: 1469 if options.use_symbolization_script:
1437 symbolize_command = _GetSanitizerSymbolizeCommand( 1470 symbolize_command = _GetSanitizerSymbolizeCommand(
1438 strip_path_prefix=options.strip_path_prefix) 1471 strip_path_prefix=options.strip_path_prefix)
1439 pipes = [symbolize_command] 1472 pipes = [symbolize_command]
1440 1473
1441 command = _GenerateRunIsolatedCommand(build_dir, test_exe_path, options, 1474 command = _GenerateRunIsolatedCommand(build_dir, test_exe_path, options,
1442 command) 1475 command)
1443 result = _RunGTestCommand(command, extra_env, pipes=pipes, 1476 result = _RunGTestCommand(options, command, extra_env, pipes=pipes,
1444 log_processor=log_processor) 1477 log_processor=log_processor)
1445 finally: 1478 finally:
1446 if http_server: 1479 if http_server:
1447 http_server.StopServer() 1480 http_server.StopServer()
1448 if start_xvfb: 1481 if start_xvfb:
1449 xvfb.StopVirtualX(slave_name) 1482 xvfb.StopVirtualX(slave_name)
1450 if _UsingGtestJson(options): 1483 if _UsingGtestJson(options):
1451 if options.use_symbolization_script: 1484 if options.use_symbolization_script:
1452 _SymbolizeSnippetsInJSON(options, json_file_name) 1485 _SymbolizeSnippetsInJSON(options, json_file_name)
1453 if json_file_name: 1486 if json_file_name:
(...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after
1560 test_exe_path=test_exe_path, 1593 test_exe_path=test_exe_path,
1561 document_root=options.document_root) 1594 document_root=options.document_root)
1562 1595
1563 if _UsingGtestJson(options): 1596 if _UsingGtestJson(options):
1564 json_file_name = log_processor.PrepareJSONFile( 1597 json_file_name = log_processor.PrepareJSONFile(
1565 options.test_launcher_summary_output) 1598 options.test_launcher_summary_output)
1566 command.append('--test-launcher-summary-output=%s' % json_file_name) 1599 command.append('--test-launcher-summary-output=%s' % json_file_name)
1567 1600
1568 command = _GenerateRunIsolatedCommand(build_dir, test_exe_path, options, 1601 command = _GenerateRunIsolatedCommand(build_dir, test_exe_path, options,
1569 command) 1602 command)
1570 result = _RunGTestCommand(command, extra_env, log_processor) 1603 result = _RunGTestCommand(options, command, extra_env, log_processor)
1571 finally: 1604 finally:
1572 if http_server: 1605 if http_server:
1573 http_server.StopServer() 1606 http_server.StopServer()
1574 if _UsingGtestJson(options): 1607 if _UsingGtestJson(options):
1575 _UploadGtestJsonSummary(json_file_name, 1608 _UploadGtestJsonSummary(json_file_name,
1576 options.build_properties, 1609 options.build_properties,
1577 test_exe) 1610 test_exe)
1578 log_processor.ProcessJSONFile(options.build_dir) 1611 log_processor.ProcessJSONFile(options.build_dir)
1579 1612
1580 if options.enable_pageheap: 1613 if options.enable_pageheap:
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
1639 run_test_target_option = '--release' 1672 run_test_target_option = '--release'
1640 if options.target == 'Debug': 1673 if options.target == 'Debug':
1641 run_test_target_option = '--debug' 1674 run_test_target_option = '--debug'
1642 command = ['src/build/android/test_runner.py', 'gtest', 1675 command = ['src/build/android/test_runner.py', 'gtest',
1643 run_test_target_option, '-s', test_suite] 1676 run_test_target_option, '-s', test_suite]
1644 1677
1645 if options.flakiness_dashboard_server: 1678 if options.flakiness_dashboard_server:
1646 command += ['--flakiness-dashboard-server=%s' % 1679 command += ['--flakiness-dashboard-server=%s' %
1647 options.flakiness_dashboard_server] 1680 options.flakiness_dashboard_server]
1648 1681
1649 result = _RunGTestCommand(command, extra_env, log_processor=log_processor) 1682 result = _RunGTestCommand(
1683 options, command, extra_env, log_processor=log_processor)
1650 1684
1651 if options.generate_json_file: 1685 if options.generate_json_file:
1652 if not _GenerateJSONForTestResults(options, log_processor): 1686 if not _GenerateJSONForTestResults(options, log_processor):
1653 return 1 1687 return 1
1654 1688
1655 if options.annotate: 1689 if options.annotate:
1656 annotation_utils.annotate( 1690 annotation_utils.annotate(
1657 options.test_type, result, log_processor, 1691 options.test_type, result, log_processor,
1658 options.factory_properties.get('full_test_name'), 1692 options.factory_properties.get('full_test_name'),
1659 perf_dashboard_id=options.perf_dashboard_id) 1693 perf_dashboard_id=options.perf_dashboard_id)
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after
1784 help='Start virtual X server on Linux.') 1818 help='Start virtual X server on Linux.')
1785 option_parser.add_option('--no-xvfb', action='store_false', dest='xvfb', 1819 option_parser.add_option('--no-xvfb', action='store_false', dest='xvfb',
1786 help='Do not start virtual X server on Linux.') 1820 help='Do not start virtual X server on Linux.')
1787 option_parser.add_option('--sharding-args', dest='sharding_args', 1821 option_parser.add_option('--sharding-args', dest='sharding_args',
1788 default='', 1822 default='',
1789 help='Options to pass to sharding_supervisor.') 1823 help='Options to pass to sharding_supervisor.')
1790 option_parser.add_option('-o', '--results-directory', default='', 1824 option_parser.add_option('-o', '--results-directory', default='',
1791 help='output results directory for JSON file.') 1825 help='output results directory for JSON file.')
1792 option_parser.add_option('--chartjson-file', default='', 1826 option_parser.add_option('--chartjson-file', default='',
1793 help='File to dump chartjson results.') 1827 help='File to dump chartjson results.')
1828 option_parser.add_option('--gtest-output-file', default='',
1829 help='File to dump gtest log processor results.')
1794 option_parser.add_option('--builder-name', default=None, 1830 option_parser.add_option('--builder-name', default=None,
1795 help='The name of the builder running this script.') 1831 help='The name of the builder running this script.')
1796 option_parser.add_option('--slave-name', default=None, 1832 option_parser.add_option('--slave-name', default=None,
1797 help='The name of the slave running this script.') 1833 help='The name of the slave running this script.')
1798 option_parser.add_option('--master-class-name', default=None, 1834 option_parser.add_option('--master-class-name', default=None,
1799 help='The class name of the buildbot master running ' 1835 help='The class name of the buildbot master running '
1800 'this script: examples include "Chromium", ' 1836 'this script: examples include "Chromium", '
1801 '"ChromiumWebkit", and "ChromiumGPU". The ' 1837 '"ChromiumWebkit", and "ChromiumGPU". The '
1802 'flakiness dashboard uses this value to ' 1838 'flakiness dashboard uses this value to '
1803 'categorize results. See buildershandler.py ' 1839 'categorize results. See buildershandler.py '
(...skipping 316 matching lines...) Expand 10 before | Expand all | Expand 10 after
2120 finally: 2156 finally:
2121 if did_launch_dbus: 2157 if did_launch_dbus:
2122 # It looks like the command line argument --exit-with-session 2158 # It looks like the command line argument --exit-with-session
2123 # isn't working to clean up the spawned dbus-daemon. Kill it 2159 # isn't working to clean up the spawned dbus-daemon. Kill it
2124 # manually. 2160 # manually.
2125 _ShutdownDBus() 2161 _ShutdownDBus()
2126 2162
2127 2163
2128 if '__main__' == __name__: 2164 if '__main__' == __name__:
2129 sys.exit(main()) 2165 sys.exit(main())
OLDNEW
« scripts/slave/recipes/android/perf.py ('K') | « scripts/slave/recipes/android/perf.py ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698