| Index: scripts/slave/runtest.py
|
| diff --git a/scripts/slave/runtest.py b/scripts/slave/runtest.py
|
| index 60b250c5f700c1579fa1f36e803956e5eaf6f41a..678eeb45ae7460021dc42d2b23e7a6387df71369 100755
|
| --- a/scripts/slave/runtest.py
|
| +++ b/scripts/slave/runtest.py
|
| @@ -42,6 +42,7 @@ import config
|
| from slave import crash_utils
|
| from slave import gtest_slave_utils
|
| from slave import process_log_utils
|
| +from slave import results_dashboard
|
| from slave import slave_utils
|
| from slave import xvfb
|
| from slave.gtest.json_results_generator import GetSvnRevision
|
| @@ -347,7 +348,7 @@ def create_results_tracker(tracker_class, options):
|
|
|
|
|
| def annotate(test_name, result, results_tracker, full_name=False,
|
| - perf_dashboard_id=None):
|
| + perf_dashboard_id=None, results_url=None, system=None):
|
| """Given a test result and tracker, update the waterfall with test results."""
|
| get_text_result = process_log_utils.SUCCESS
|
|
|
| @@ -398,6 +399,9 @@ def annotate(test_name, result, results_tracker, full_name=False,
|
| for logname, log in results_tracker.PerformanceLogs().iteritems():
|
| lines = [str(l).rstrip() for l in log]
|
| slave_utils.WriteLogLines(logname, lines, perf=perf_dashboard_id)
|
| + if results_url:
|
| + results_dashboard.SendResults(
|
| + logname, lines, system, test_name, results_url)
|
|
|
|
|
| def get_build_dir_and_exe_path_mac(options, target_dir, exe_name):
|
| @@ -542,7 +546,9 @@ def main_mac(options, args):
|
| annotate(options.test_type, result, results_tracker,
|
| options.factory_properties.get('full_test_name'),
|
| perf_dashboard_id=options.factory_properties.get(
|
| - 'test_name'))
|
| + 'test_name'),
|
| + results_url=options.results_url,
|
| + system=options.factory_properties.get('perf_id'))
|
|
|
| return result
|
|
|
| @@ -788,7 +794,9 @@ def main_linux(options, args):
|
| annotate(options.test_type, result, results_tracker,
|
| options.factory_properties.get('full_test_name'),
|
| perf_dashboard_id=options.factory_properties.get(
|
| - 'test_name'))
|
| + 'test_name'),
|
| + results_url=options.results_url,
|
| + system=options.factory_properties.get('perf_id'))
|
|
|
| return result
|
|
|
| @@ -862,7 +870,9 @@ def main_win(options, args):
|
| annotate(options.test_type, result, results_tracker,
|
| options.factory_properties.get('full_test_name'),
|
| perf_dashboard_id=options.factory_properties.get(
|
| - 'test_name'))
|
| + 'test_name'),
|
| + results_url=options.results_url,
|
| + system=options.factory_properties.get('perf_id'))
|
|
|
| return result
|
|
|
| @@ -965,6 +975,9 @@ def main():
|
| help='Annotate output when run as a buildstep. '
|
| 'Specify which type of test to parse, available'
|
| ' types listed with --annotate=list.')
|
| + option_parser.add_option('', '--results-url', default='',
|
| + help='The URI of the perf dashboard to upload '
|
| + 'results to.')
|
| chromium_utils.AddPropertiesOptions(option_parser)
|
| options, args = option_parser.parse_args()
|
|
|
|
|