Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(924)

Side by Side Diff: scripts/slave/runtest.py

Issue 873403002: Add support for cc_perftests and other non-telemetry gtest based tests. (Closed) Base URL: https://chromium.googlesource.com/chromium/tools/build.git@master
Patch Set: Use template string. Created 5 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """A tool used to run a Chrome test executable and process the output. 6 """A tool used to run a Chrome test executable and process the output.
7 7
8 This script is used by the buildbot slaves. It must be run from the outer 8 This script is used by the buildbot slaves. It must be run from the outer
9 build directory, e.g. chrome-release/build/. 9 build directory, e.g. chrome-release/build/.
10 10
(...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after
166 except OSError as e: 166 except OSError as e:
167 print ' error killing dbus-daemon with PID %s: %s' % (dbus_pid, e) 167 print ' error killing dbus-daemon with PID %s: %s' % (dbus_pid, e)
168 # Try to clean up any stray DBUS_SESSION_BUS_ADDRESS environment 168 # Try to clean up any stray DBUS_SESSION_BUS_ADDRESS environment
169 # variable too. Some of the bots seem to re-invoke runtest.py in a 169 # variable too. Some of the bots seem to re-invoke runtest.py in a
170 # way that this variable sticks around from run to run. 170 # way that this variable sticks around from run to run.
171 if 'DBUS_SESSION_BUS_ADDRESS' in os.environ: 171 if 'DBUS_SESSION_BUS_ADDRESS' in os.environ:
172 del os.environ['DBUS_SESSION_BUS_ADDRESS'] 172 del os.environ['DBUS_SESSION_BUS_ADDRESS']
173 print ' cleared DBUS_SESSION_BUS_ADDRESS environment variable' 173 print ' cleared DBUS_SESSION_BUS_ADDRESS environment variable'
174 174
175 175
176 def _RunGTestCommand(command, extra_env, log_processor=None, pipes=None): 176 def _RunGTestCommand(
177 options, command, extra_env, log_processor=None, pipes=None):
177 """Runs a test, printing and possibly processing the output. 178 """Runs a test, printing and possibly processing the output.
178 179
179 Args: 180 Args:
181 options: Options passed for this invocation of runtest.py.
180 command: A list of strings in a command (the command and its arguments). 182 command: A list of strings in a command (the command and its arguments).
181 extra_env: A dictionary of extra environment variables to set. 183 extra_env: A dictionary of extra environment variables to set.
182 log_processor: A log processor instance which has the ProcessLine method. 184 log_processor: A log processor instance which has the ProcessLine method.
183 pipes: A list of command string lists which the output will be piped to. 185 pipes: A list of command string lists which the output will be piped to.
184 186
185 Returns: 187 Returns:
186 The process return code. 188 The process return code.
187 """ 189 """
188 env = os.environ.copy() 190 env = os.environ.copy()
189 if extra_env: 191 if extra_env:
190 print 'Additional test environment:' 192 print 'Additional test environment:'
191 for k, v in sorted(extra_env.items()): 193 for k, v in sorted(extra_env.items()):
192 print ' %s=%s' % (k, v) 194 print ' %s=%s' % (k, v)
193 env.update(extra_env or {}) 195 env.update(extra_env or {})
194 196
195 # Trigger bot mode (test retries, redirection of stdio, possibly faster, 197 # Trigger bot mode (test retries, redirection of stdio, possibly faster,
196 # etc.) - using an environment variable instead of command-line flags because 198 # etc.) - using an environment variable instead of command-line flags because
197 # some internal waterfalls run this (_RunGTestCommand) for totally non-gtest 199 # some internal waterfalls run this (_RunGTestCommand) for totally non-gtest
198 # code. 200 # code.
199 # TODO(phajdan.jr): Clean this up when internal waterfalls are fixed. 201 # TODO(phajdan.jr): Clean this up when internal waterfalls are fixed.
200 env.update({'CHROMIUM_TEST_LAUNCHER_BOT_MODE': '1'}) 202 env.update({'CHROMIUM_TEST_LAUNCHER_BOT_MODE': '1'})
201 203
204 log_processors = {}
Paweł Hajdan Jr. 2015/02/11 08:24:03 Could you extract the runtest.py change to a separ
shatch 2015/02/12 20:46:32 Done.
202 if log_processor: 205 if log_processor:
203 return chromium_utils.RunCommand( 206 log_processors[log_processor.__class__.__name__] = log_processor
204 command, pipes=pipes, parser_func=log_processor.ProcessLine, env=env) 207
205 else: 208 if (not 'GTestLogParser' in log_processors and
206 return chromium_utils.RunCommand(command, pipes=pipes, env=env) 209 options.log_processor_output_file):
210 log_processors['GTestLogParser'] = gtest_utils.GTestLogParser()
211
212 def _ProcessLine(line):
213 for current_log_processor in log_processors.values():
214 current_log_processor.ProcessLine(line)
215
216 result = chromium_utils.RunCommand(
217 command, pipes=pipes, parser_func=_ProcessLine, env=env)
218
219 if options.log_processor_output_file:
220 _WriteLogProcessorResultsToOutput(
221 log_processors['GTestLogParser'], options.log_processor_output_file)
222
223 return result
207 224
208 225
209 def _GetMaster(): 226 def _GetMaster():
210 """Return the master name for the current host.""" 227 """Return the master name for the current host."""
211 return chromium_utils.GetActiveMaster() 228 return chromium_utils.GetActiveMaster()
212 229
213 230
214 def _GetMasterString(master): 231 def _GetMasterString(master):
215 """Returns a message describing what the master is.""" 232 """Returns a message describing what the master is."""
216 return '[Running for master: "%s"]' % master 233 return '[Running for master: "%s"]' % master
(...skipping 499 matching lines...) Expand 10 before | Expand all | Expand 10 after
716 chart_json = log_processor.ChartJson() 733 chart_json = log_processor.ChartJson()
717 if chart_json: 734 if chart_json:
718 return results_dashboard.MakeDashboardJsonV1( 735 return results_dashboard.MakeDashboardJsonV1(
719 chart_json, 736 chart_json,
720 args['revisions'], args['system'], args['mastername'], 737 args['revisions'], args['system'], args['mastername'],
721 args['buildername'], args['buildnumber'], 738 args['buildername'], args['buildnumber'],
722 args['supplemental_columns'], log_processor.IsReferenceBuild()) 739 args['supplemental_columns'], log_processor.IsReferenceBuild())
723 return None 740 return None
724 741
725 742
743 def _WriteLogProcessorResultsToOutput(log_processor, log_output_file):
744 """Writes the log processor's results to a file.
745
746 Args:
747 chartjson_file: Path to the file to write the results.
748 log_processor: An instance of a log processor class, which has been used to
749 process the test output, so it contains the test results.
750 """
751 with open(log_output_file, 'w') as f:
752 results = {
753 'passed': log_processor.PassedTests(),
754 'failed': log_processor.FailedTests(),
755 'flakes': log_processor.FlakyTests(),
756 }
757 json.dump(results, f)
758
759
726 def _WriteChartJsonToOutput(chartjson_file, log_processor, args): 760 def _WriteChartJsonToOutput(chartjson_file, log_processor, args):
727 """Writes the dashboard chartjson to a file for display in the waterfall. 761 """Writes the dashboard chartjson to a file for display in the waterfall.
728 762
729 Args: 763 Args:
730 chartjson_file: Path to the file to write the chartjson. 764 chartjson_file: Path to the file to write the chartjson.
731 log_processor: An instance of a log processor class, which has been used to 765 log_processor: An instance of a log processor class, which has been used to
732 process the test output, so it contains the test results. 766 process the test output, so it contains the test results.
733 args: Dict of additional args to send to results_dashboard. 767 args: Dict of additional args to send to results_dashboard.
734 """ 768 """
735 assert log_processor.IsChartJson() 769 assert log_processor.IsChartJson()
(...skipping 424 matching lines...) Expand 10 before | Expand all | Expand 10 after
1160 options.test_launcher_summary_output) 1194 options.test_launcher_summary_output)
1161 command.append('--test-launcher-summary-output=%s' % json_file_name) 1195 command.append('--test-launcher-summary-output=%s' % json_file_name)
1162 1196
1163 pipes = [] 1197 pipes = []
1164 if options.enable_asan: 1198 if options.enable_asan:
1165 symbolize_command = _GetSanitizerSymbolizeCommand() 1199 symbolize_command = _GetSanitizerSymbolizeCommand()
1166 pipes = [symbolize_command, ['c++filt']] 1200 pipes = [symbolize_command, ['c++filt']]
1167 1201
1168 command = _GenerateRunIsolatedCommand(build_dir, test_exe_path, options, 1202 command = _GenerateRunIsolatedCommand(build_dir, test_exe_path, options,
1169 command) 1203 command)
1170 result = _RunGTestCommand(command, extra_env, pipes=pipes, 1204 result = _RunGTestCommand(options, command, extra_env, pipes=pipes,
1171 log_processor=log_processor) 1205 log_processor=log_processor)
1172 finally: 1206 finally:
1173 if http_server: 1207 if http_server:
1174 http_server.StopServer() 1208 http_server.StopServer()
1175 if _UsingGtestJson(options): 1209 if _UsingGtestJson(options):
1176 _UploadGtestJsonSummary(json_file_name, 1210 _UploadGtestJsonSummary(json_file_name,
1177 options.build_properties, 1211 options.build_properties,
1178 test_exe) 1212 test_exe)
1179 log_processor.ProcessJSONFile(options.build_dir) 1213 log_processor.ProcessJSONFile(options.build_dir)
1180 1214
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after
1267 1301
1268 # Nuke anything that appears to be stale chrome items in the temporary 1302 # Nuke anything that appears to be stale chrome items in the temporary
1269 # directory from previous test runs (i.e.- from crashes or unittest leaks). 1303 # directory from previous test runs (i.e.- from crashes or unittest leaks).
1270 slave_utils.RemoveChromeTemporaryFiles() 1304 slave_utils.RemoveChromeTemporaryFiles()
1271 1305
1272 dirs_to_cleanup = [tmpdir] 1306 dirs_to_cleanup = [tmpdir]
1273 crash_files_before = set([]) 1307 crash_files_before = set([])
1274 crash_files_after = set([]) 1308 crash_files_after = set([])
1275 crash_files_before = set(crash_utils.list_crash_logs()) 1309 crash_files_before = set(crash_utils.list_crash_logs())
1276 1310
1277 result = _RunGTestCommand(command, extra_env, log_processor) 1311 result = _RunGTestCommand(options, command, extra_env, log_processor)
1278 1312
1279 # Because test apps kill themselves, iossim sometimes returns non-zero 1313 # Because test apps kill themselves, iossim sometimes returns non-zero
1280 # status even though all tests have passed. Check the log_processor to 1314 # status even though all tests have passed. Check the log_processor to
1281 # see if the test run was successful. 1315 # see if the test run was successful.
1282 if log_processor.CompletedWithoutFailure(): 1316 if log_processor.CompletedWithoutFailure():
1283 result = 0 1317 result = 0
1284 else: 1318 else:
1285 result = 1 1319 result = 1
1286 1320
1287 if result != 0: 1321 if result != 0:
(...skipping 145 matching lines...) Expand 10 before | Expand all | Expand 10 after
1433 1467
1434 pipes = [] 1468 pipes = []
1435 # See the comment in main() regarding offline symbolization. 1469 # See the comment in main() regarding offline symbolization.
1436 if options.use_symbolization_script: 1470 if options.use_symbolization_script:
1437 symbolize_command = _GetSanitizerSymbolizeCommand( 1471 symbolize_command = _GetSanitizerSymbolizeCommand(
1438 strip_path_prefix=options.strip_path_prefix) 1472 strip_path_prefix=options.strip_path_prefix)
1439 pipes = [symbolize_command] 1473 pipes = [symbolize_command]
1440 1474
1441 command = _GenerateRunIsolatedCommand(build_dir, test_exe_path, options, 1475 command = _GenerateRunIsolatedCommand(build_dir, test_exe_path, options,
1442 command) 1476 command)
1443 result = _RunGTestCommand(command, extra_env, pipes=pipes, 1477 result = _RunGTestCommand(options, command, extra_env, pipes=pipes,
1444 log_processor=log_processor) 1478 log_processor=log_processor)
1445 finally: 1479 finally:
1446 if http_server: 1480 if http_server:
1447 http_server.StopServer() 1481 http_server.StopServer()
1448 if start_xvfb: 1482 if start_xvfb:
1449 xvfb.StopVirtualX(slave_name) 1483 xvfb.StopVirtualX(slave_name)
1450 if _UsingGtestJson(options): 1484 if _UsingGtestJson(options):
1451 if options.use_symbolization_script: 1485 if options.use_symbolization_script:
1452 _SymbolizeSnippetsInJSON(options, json_file_name) 1486 _SymbolizeSnippetsInJSON(options, json_file_name)
1453 if json_file_name: 1487 if json_file_name:
(...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after
1560 test_exe_path=test_exe_path, 1594 test_exe_path=test_exe_path,
1561 document_root=options.document_root) 1595 document_root=options.document_root)
1562 1596
1563 if _UsingGtestJson(options): 1597 if _UsingGtestJson(options):
1564 json_file_name = log_processor.PrepareJSONFile( 1598 json_file_name = log_processor.PrepareJSONFile(
1565 options.test_launcher_summary_output) 1599 options.test_launcher_summary_output)
1566 command.append('--test-launcher-summary-output=%s' % json_file_name) 1600 command.append('--test-launcher-summary-output=%s' % json_file_name)
1567 1601
1568 command = _GenerateRunIsolatedCommand(build_dir, test_exe_path, options, 1602 command = _GenerateRunIsolatedCommand(build_dir, test_exe_path, options,
1569 command) 1603 command)
1570 result = _RunGTestCommand(command, extra_env, log_processor) 1604 result = _RunGTestCommand(options, command, extra_env, log_processor)
1571 finally: 1605 finally:
1572 if http_server: 1606 if http_server:
1573 http_server.StopServer() 1607 http_server.StopServer()
1574 if _UsingGtestJson(options): 1608 if _UsingGtestJson(options):
1575 _UploadGtestJsonSummary(json_file_name, 1609 _UploadGtestJsonSummary(json_file_name,
1576 options.build_properties, 1610 options.build_properties,
1577 test_exe) 1611 test_exe)
1578 log_processor.ProcessJSONFile(options.build_dir) 1612 log_processor.ProcessJSONFile(options.build_dir)
1579 1613
1580 if options.enable_pageheap: 1614 if options.enable_pageheap:
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
1639 run_test_target_option = '--release' 1673 run_test_target_option = '--release'
1640 if options.target == 'Debug': 1674 if options.target == 'Debug':
1641 run_test_target_option = '--debug' 1675 run_test_target_option = '--debug'
1642 command = ['src/build/android/test_runner.py', 'gtest', 1676 command = ['src/build/android/test_runner.py', 'gtest',
1643 run_test_target_option, '-s', test_suite] 1677 run_test_target_option, '-s', test_suite]
1644 1678
1645 if options.flakiness_dashboard_server: 1679 if options.flakiness_dashboard_server:
1646 command += ['--flakiness-dashboard-server=%s' % 1680 command += ['--flakiness-dashboard-server=%s' %
1647 options.flakiness_dashboard_server] 1681 options.flakiness_dashboard_server]
1648 1682
1649 result = _RunGTestCommand(command, extra_env, log_processor=log_processor) 1683 result = _RunGTestCommand(
1684 options, command, extra_env, log_processor=log_processor)
1650 1685
1651 if options.generate_json_file: 1686 if options.generate_json_file:
1652 if not _GenerateJSONForTestResults(options, log_processor): 1687 if not _GenerateJSONForTestResults(options, log_processor):
1653 return 1 1688 return 1
1654 1689
1655 if options.annotate: 1690 if options.annotate:
1656 annotation_utils.annotate( 1691 annotation_utils.annotate(
1657 options.test_type, result, log_processor, 1692 options.test_type, result, log_processor,
1658 options.factory_properties.get('full_test_name'), 1693 options.factory_properties.get('full_test_name'),
1659 perf_dashboard_id=options.perf_dashboard_id) 1694 perf_dashboard_id=options.perf_dashboard_id)
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after
1784 help='Start virtual X server on Linux.') 1819 help='Start virtual X server on Linux.')
1785 option_parser.add_option('--no-xvfb', action='store_false', dest='xvfb', 1820 option_parser.add_option('--no-xvfb', action='store_false', dest='xvfb',
1786 help='Do not start virtual X server on Linux.') 1821 help='Do not start virtual X server on Linux.')
1787 option_parser.add_option('--sharding-args', dest='sharding_args', 1822 option_parser.add_option('--sharding-args', dest='sharding_args',
1788 default='', 1823 default='',
1789 help='Options to pass to sharding_supervisor.') 1824 help='Options to pass to sharding_supervisor.')
1790 option_parser.add_option('-o', '--results-directory', default='', 1825 option_parser.add_option('-o', '--results-directory', default='',
1791 help='output results directory for JSON file.') 1826 help='output results directory for JSON file.')
1792 option_parser.add_option('--chartjson-file', default='', 1827 option_parser.add_option('--chartjson-file', default='',
1793 help='File to dump chartjson results.') 1828 help='File to dump chartjson results.')
1829 option_parser.add_option('--log-processor-output-file', default='',
1830 help='File to dump gtest log processor results.')
1794 option_parser.add_option('--builder-name', default=None, 1831 option_parser.add_option('--builder-name', default=None,
1795 help='The name of the builder running this script.') 1832 help='The name of the builder running this script.')
1796 option_parser.add_option('--slave-name', default=None, 1833 option_parser.add_option('--slave-name', default=None,
1797 help='The name of the slave running this script.') 1834 help='The name of the slave running this script.')
1798 option_parser.add_option('--master-class-name', default=None, 1835 option_parser.add_option('--master-class-name', default=None,
1799 help='The class name of the buildbot master running ' 1836 help='The class name of the buildbot master running '
1800 'this script: examples include "Chromium", ' 1837 'this script: examples include "Chromium", '
1801 '"ChromiumWebkit", and "ChromiumGPU". The ' 1838 '"ChromiumWebkit", and "ChromiumGPU". The '
1802 'flakiness dashboard uses this value to ' 1839 'flakiness dashboard uses this value to '
1803 'categorize results. See buildershandler.py ' 1840 'categorize results. See buildershandler.py '
(...skipping 316 matching lines...) Expand 10 before | Expand all | Expand 10 after
2120 finally: 2157 finally:
2121 if did_launch_dbus: 2158 if did_launch_dbus:
2122 # It looks like the command line argument --exit-with-session 2159 # It looks like the command line argument --exit-with-session
2123 # isn't working to clean up the spawned dbus-daemon. Kill it 2160 # isn't working to clean up the spawned dbus-daemon. Kill it
2124 # manually. 2161 # manually.
2125 _ShutdownDBus() 2162 _ShutdownDBus()
2126 2163
2127 2164
2128 if '__main__' == __name__: 2165 if '__main__' == __name__:
2129 sys.exit(main()) 2166 sys.exit(main())
OLDNEW
« scripts/slave/recipes/android/perf.py ('K') | « scripts/slave/recipes/android/perf.py ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698