Chromium Code Reviews| Index: build/android/test_runner.py |
| diff --git a/build/android/test_runner.py b/build/android/test_runner.py |
| index b22607fa4c1b34fc3e94f4bf6fd5755ea12baac1..e9d7df7d3878ef57b64c9bd6d981e4c7e348d5f2 100755 |
| --- a/build/android/test_runner.py |
| +++ b/build/android/test_runner.py |
| @@ -15,6 +15,7 @@ import os |
| import shutil |
| import signal |
| import sys |
| +import tempfile |
| import threading |
| import traceback |
| import unittest |
| @@ -36,10 +37,13 @@ from devil.utils import run_tests_helper |
| from pylib import constants |
| from pylib.base import base_test_result |
| from pylib.base import environment_factory |
| +from pylib.base import output_manager |
| +from pylib.base import output_manager_factory |
| from pylib.base import test_instance_factory |
| from pylib.base import test_run_factory |
| from pylib.results import json_results |
| from pylib.results import report_results |
| +from pylib.results.presentation import test_results_presentation |
| from pylib.utils import logdog_helper |
| from pylib.utils import logging_utils |
| @@ -146,6 +150,11 @@ def AddCommonOptions(parser): |
| default='local', choices=constants.VALID_ENVIRONMENTS, |
| help='Test environment to run in (default: %(default)s).') |
| + parser.add_argument( |
| + '--local-output', action='store_true', |
|
jbudorick
2017/08/10 16:27:38
nit: action on its own line
mikecase (-- gone --)
2017/08/23 04:28:20
Done
|
| + help='Whether to archive test output locally and generate ' |
| + 'a local results detail page.') |
| + |
| class FastLocalDevAction(argparse.Action): |
| def __call__(self, parser, namespace, values, option_string=None): |
| namespace.verbose_count = max(namespace.verbose_count, 1) |
| @@ -817,69 +826,90 @@ def RunTestsInPlatformMode(args): |
| ### Set up test objects. |
| - env = environment_factory.CreateEnvironment(args, infra_error) |
| + out_manager = output_manager_factory.CreateOutputManager(args) |
| + env = environment_factory.CreateEnvironment( |
| + args, out_manager, infra_error) |
| test_instance = test_instance_factory.CreateTestInstance(args, infra_error) |
| test_run = test_run_factory.CreateTestRun( |
| args, env, test_instance, infra_error) |
| ### Run. |
| - with json_writer, logcats_uploader, env, test_instance, test_run: |
| - |
| - repetitions = (xrange(args.repeat + 1) if args.repeat >= 0 |
| - else itertools.count()) |
| - result_counts = collections.defaultdict( |
| - lambda: collections.defaultdict(int)) |
| - iteration_count = 0 |
| - for _ in repetitions: |
| - raw_results = test_run.RunTests() |
| - if not raw_results: |
| - continue |
| - |
| - all_raw_results.append(raw_results) |
| - |
| - iteration_results = base_test_result.TestRunResults() |
| - for r in reversed(raw_results): |
| - iteration_results.AddTestRunResults(r) |
| - all_iteration_results.append(iteration_results) |
| - |
| - iteration_count += 1 |
| - for r in iteration_results.GetAll(): |
| - result_counts[r.GetName()][r.GetType()] += 1 |
| - report_results.LogFull( |
| - results=iteration_results, |
| - test_type=test_instance.TestType(), |
| - test_package=test_run.TestPackage(), |
| - annotation=getattr(args, 'annotations', None), |
| - flakiness_server=getattr(args, 'flakiness_dashboard_server', |
| - None)) |
| - if args.break_on_failure and not iteration_results.DidRunPass(): |
| - break |
| - |
| - if iteration_count > 1: |
| - # display summary results |
| - # only display results for a test if at least one test did not pass |
| - all_pass = 0 |
| - tot_tests = 0 |
| - for test_name in result_counts: |
| - tot_tests += 1 |
| - if any(result_counts[test_name][x] for x in ( |
| - base_test_result.ResultType.FAIL, |
| - base_test_result.ResultType.CRASH, |
| - base_test_result.ResultType.TIMEOUT, |
| - base_test_result.ResultType.UNKNOWN)): |
| - logging.critical( |
| - '%s: %s', |
| - test_name, |
| - ', '.join('%s %s' % (str(result_counts[test_name][i]), i) |
| - for i in base_test_result.ResultType.GetTypes())) |
| - else: |
| - all_pass += 1 |
| - |
| - logging.critical('%s of %s tests passed in all %s runs', |
| - str(all_pass), |
| - str(tot_tests), |
| - str(iteration_count)) |
| + with out_manager: |
|
jbudorick
2017/08/10 16:27:38
Can this be part of the with line below?
|
| + with json_writer, logcats_uploader, env, test_instance, test_run: |
| + |
| + repetitions = (xrange(args.repeat + 1) if args.repeat >= 0 |
| + else itertools.count()) |
| + result_counts = collections.defaultdict( |
| + lambda: collections.defaultdict(int)) |
| + iteration_count = 0 |
| + for _ in repetitions: |
| + raw_results = test_run.RunTests() |
| + if not raw_results: |
| + continue |
| + |
| + all_raw_results.append(raw_results) |
| + |
| + iteration_results = base_test_result.TestRunResults() |
| + for r in reversed(raw_results): |
| + iteration_results.AddTestRunResults(r) |
| + all_iteration_results.append(iteration_results) |
| + |
| + iteration_count += 1 |
| + for r in iteration_results.GetAll(): |
| + result_counts[r.GetName()][r.GetType()] += 1 |
| + report_results.LogFull( |
| + results=iteration_results, |
| + test_type=test_instance.TestType(), |
| + test_package=test_run.TestPackage(), |
| + annotation=getattr(args, 'annotations', None), |
| + flakiness_server=getattr(args, 'flakiness_dashboard_server', |
| + None)) |
| + if args.break_on_failure and not iteration_results.DidRunPass(): |
| + break |
| + |
| + if iteration_count > 1: |
| + # display summary results |
| + # only display results for a test if at least one test did not pass |
| + all_pass = 0 |
| + tot_tests = 0 |
| + for test_name in result_counts: |
| + tot_tests += 1 |
| + if any(result_counts[test_name][x] for x in ( |
| + base_test_result.ResultType.FAIL, |
| + base_test_result.ResultType.CRASH, |
| + base_test_result.ResultType.TIMEOUT, |
| + base_test_result.ResultType.UNKNOWN)): |
| + logging.critical( |
| + '%s: %s', |
| + test_name, |
| + ', '.join('%s %s' % (str(result_counts[test_name][i]), i) |
| + for i in base_test_result.ResultType.GetTypes())) |
| + else: |
| + all_pass += 1 |
| + |
| + logging.critical('%s of %s tests passed in all %s runs', |
| + str(all_pass), |
| + str(tot_tests), |
| + str(iteration_count)) |
| + |
| + if args.local_output and args.json_results_file: |
| + try: |
| + results_detail_file = tempfile.NamedTemporaryFile(delete=False) |
| + result_html_string, _, _ = test_results_presentation.result_details( |
| + json_path=args.json_results_file, |
| + test_name=args.command, |
| + cs_base_url='http://cs.chromium.org', |
| + local_output=True) |
| + results_detail_file.write(result_html_string) |
| + results_detail_file.flush() |
| + finally: |
| + results_detail_link = out_manager.ArchiveAndDeleteFile( |
| + results_detail_file.name, |
| + 'test_results_presentation.html', |
| + 'test_results_presentation', |
| + output_manager.Datatype.HTML) |
| + logging.critical('TEST RESULTS: %s', results_detail_link) |
| if args.command == 'perf' and (args.steps or args.single_step): |
| return 0 |