Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(178)

Unified Diff: build/android/test_runner.py

Issue 1971433002: ABANDONED [Android] Expose each try result in test results JSON. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: fixes: use last result in an iteration, only use iteration_results in exit code determination Created 4 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « build/android/pylib/remote/device/remote_device_test_run.py ('k') | no next file » | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: build/android/test_runner.py
diff --git a/build/android/test_runner.py b/build/android/test_runner.py
index 23c403995953224f97282d1639d12c9c49c6600c..a17f559a70f427abab824fab7764abab0007b3f9 100755
--- a/build/android/test_runner.py
+++ b/build/android/test_runner.py
@@ -849,28 +849,37 @@ def RunTestsInPlatformMode(args):
with test_instance_factory.CreateTestInstance(args, infra_error) as test:
with test_run_factory.CreateTestRun(
args, env, test, infra_error) as test_run:
- results = []
+ all_raw_results = []
+ all_iteration_results = []
repetitions = (xrange(args.repeat + 1) if args.repeat >= 0
else itertools.count())
result_counts = collections.defaultdict(
lambda: collections.defaultdict(int))
iteration_count = 0
for _ in repetitions:
- iteration_results = test_run.RunTests()
- if iteration_results is not None:
- iteration_count += 1
- results.append(iteration_results)
- for r in iteration_results.GetAll():
- result_counts[r.GetName()][r.GetType()] += 1
- report_results.LogFull(
- results=iteration_results,
- test_type=test.TestType(),
- test_package=test_run.TestPackage(),
- annotation=getattr(args, 'annotations', None),
- flakiness_server=getattr(args, 'flakiness_dashboard_server',
- None))
- if args.break_on_failure and not iteration_results.DidRunPass():
- break
+ raw_results = test_run.RunTests()
+ if not raw_results:
+ continue
+
+ all_raw_results.extend(raw_results)
+
+ iteration_results = base_test_result.TestRunResults()
+ for r in reversed(raw_results):
+ iteration_results.AddTestRunResults(r)
+ all_iteration_results.append(iteration_results)
+
+ iteration_count += 1
+ for r in iteration_results.GetAll():
+ result_counts[r.GetName()][r.GetType()] += 1
+ report_results.LogFull(
+ results=iteration_results,
+ test_type=test.TestType(),
+ test_package=test_run.TestPackage(),
+ annotation=getattr(args, 'annotations', None),
+ flakiness_server=getattr(args, 'flakiness_dashboard_server',
+ None))
+ if args.break_on_failure and not iteration_results.DidRunPass():
+ break
if iteration_count > 1:
# display summary results
@@ -899,9 +908,9 @@ def RunTestsInPlatformMode(args):
if args.json_results_file:
json_results.GenerateJsonResultsFile(
- results, args.json_results_file)
+ all_raw_results, args.json_results_file)
- return (0 if all(r.DidRunPass() for r in results)
+ return (0 if all(r.DidRunPass() for r in all_iteration_results)
else constants.ERROR_EXIT_CODE)
« no previous file with comments | « build/android/pylib/remote/device/remote_device_test_run.py ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698