Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(657)

Unified Diff: build/android/test_runner.py

Issue 757683002: [Android] Implement generic JSON results that match base/test/launcher. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: fix presubmit issue Created 6 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « build/android/pylib/utils/report_results.py ('k') | no next file » | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: build/android/test_runner.py
diff --git a/build/android/test_runner.py b/build/android/test_runner.py
index b483d2b29e00444d4742ca5a6184963fc8ff7c55..ab8d6303f1287e9e85e9c9e22e76e428f5563f3a 100755
--- a/build/android/test_runner.py
+++ b/build/android/test_runner.py
@@ -39,11 +39,12 @@ from pylib.monkey import test_options as monkey_test_options
from pylib.perf import setup as perf_setup
from pylib.perf import test_options as perf_test_options
from pylib.perf import test_runner as perf_test_runner
+from pylib.results import json_results
+from pylib.results import report_results
from pylib.uiautomator import setup as uiautomator_setup
from pylib.uiautomator import test_options as uiautomator_test_options
from pylib.utils import apk_helper
from pylib.utils import command_option_parser
-from pylib.utils import report_results
from pylib.utils import reraiser_thread
from pylib.utils import run_tests_helper
@@ -94,6 +95,9 @@ def AddCommonOptions(option_parser):
group.add_option('--adb-path',
help=('Specify the absolute path of the adb binary that '
'should be used.'))
+ group.add_option('--json-results-file', dest='json_results_file',
+ help='If set, will dump results in JSON format '
+ 'to specified file.')
option_parser.add_option_group(group)
@@ -161,8 +165,7 @@ def AddGTestOptions(option_parser):
dest='isolate_file_path',
help='.isolate file path to override the default '
'path')
- # TODO(gkanwar): Move these to Common Options once we have the plumbing
- # in our other test types to handle these commands
+
AddCommonOptions(option_parser)
AddDeviceOptions(option_parser)
@@ -638,6 +641,9 @@ def _RunGTests(options, devices):
test_package=suite_name,
flakiness_server=options.flakiness_dashboard_server)
+ if options.json_results_file:
+ json_results.GenerateJsonResultsFile(results, options.json_results_file)
+
if os.path.isdir(constants.ISOLATE_DEPS_DIR):
shutil.rmtree(constants.ISOLATE_DEPS_DIR)
@@ -657,6 +663,9 @@ def _RunLinkerTests(options, devices):
test_type='Linker test',
test_package='ChromiumLinkerTest')
+ if options.json_results_file:
+ json_results.GenerateJsonResultsFile(results, options.json_results_file)
+
return exit_code
@@ -708,6 +717,9 @@ def _RunInstrumentationTests(options, error_func, devices):
annotation=options.annotations,
flakiness_server=options.flakiness_dashboard_server)
+ if options.json_results_file:
+ json_results.GenerateJsonResultsFile(results, options.json_results_file)
+
return exit_code
@@ -728,6 +740,9 @@ def _RunUIAutomatorTests(options, error_func, devices):
annotation=options.annotations,
flakiness_server=options.flakiness_dashboard_server)
+ if options.json_results_file:
+ json_results.GenerateJsonResultsFile(results, options.json_results_file)
+
return exit_code
@@ -755,6 +770,9 @@ def _RunMonkeyTests(options, error_func, devices):
test_type='Monkey',
test_package='Monkey')
+ if options.json_results_file:
+ json_results.GenerateJsonResultsFile(results, options.json_results_file)
+
return exit_code
@@ -786,6 +804,9 @@ def _RunPerfTests(options, args, error_func):
test_type='Perf',
test_package='Perf')
+ if options.json_results_file:
+ json_results.GenerateJsonResultsFile(results, options.json_results_file)
+
if perf_options.single_step:
return perf_test_runner.PrintTestOutput('single_step')
@@ -926,6 +947,10 @@ def RunTestsInPlatformMode(command, options, option_parser):
annotation=options.annotations,
flakiness_server=options.flakiness_dashboard_server)
+ if options.json_results_file:
+ json_results.GenerateJsonResultsFile(
+ results, options.json_results_file)
+
return results
« no previous file with comments | « build/android/pylib/utils/report_results.py ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698