Index: build/android/test_runner.py |
diff --git a/build/android/test_runner.py b/build/android/test_runner.py |
index 23c403995953224f97282d1639d12c9c49c6600c..a17f559a70f427abab824fab7764abab0007b3f9 100755 |
--- a/build/android/test_runner.py |
+++ b/build/android/test_runner.py |
@@ -849,28 +849,37 @@ def RunTestsInPlatformMode(args): |
with test_instance_factory.CreateTestInstance(args, infra_error) as test: |
with test_run_factory.CreateTestRun( |
args, env, test, infra_error) as test_run: |
- results = [] |
+ all_raw_results = [] |
+ all_iteration_results = [] |
repetitions = (xrange(args.repeat + 1) if args.repeat >= 0 |
else itertools.count()) |
result_counts = collections.defaultdict( |
lambda: collections.defaultdict(int)) |
iteration_count = 0 |
for _ in repetitions: |
- iteration_results = test_run.RunTests() |
- if iteration_results is not None: |
- iteration_count += 1 |
- results.append(iteration_results) |
- for r in iteration_results.GetAll(): |
- result_counts[r.GetName()][r.GetType()] += 1 |
- report_results.LogFull( |
- results=iteration_results, |
- test_type=test.TestType(), |
- test_package=test_run.TestPackage(), |
- annotation=getattr(args, 'annotations', None), |
- flakiness_server=getattr(args, 'flakiness_dashboard_server', |
- None)) |
- if args.break_on_failure and not iteration_results.DidRunPass(): |
- break |
+ raw_results = test_run.RunTests() |
+ if not raw_results: |
+ continue |
+ |
+ all_raw_results.extend(raw_results) |
+ |
+ iteration_results = base_test_result.TestRunResults() |
+ for r in reversed(raw_results): |
+ iteration_results.AddTestRunResults(r) |
+ all_iteration_results.append(iteration_results) |
+ |
+ iteration_count += 1 |
+ for r in iteration_results.GetAll(): |
+ result_counts[r.GetName()][r.GetType()] += 1 |
+ report_results.LogFull( |
+ results=iteration_results, |
+ test_type=test.TestType(), |
+ test_package=test_run.TestPackage(), |
+ annotation=getattr(args, 'annotations', None), |
+ flakiness_server=getattr(args, 'flakiness_dashboard_server', |
+ None)) |
+ if args.break_on_failure and not iteration_results.DidRunPass(): |
+ break |
if iteration_count > 1: |
# display summary results |
@@ -899,9 +908,9 @@ def RunTestsInPlatformMode(args): |
if args.json_results_file: |
json_results.GenerateJsonResultsFile( |
- results, args.json_results_file) |
+ all_raw_results, args.json_results_file) |
- return (0 if all(r.DidRunPass() for r in results) |
+ return (0 if all(r.DidRunPass() for r in all_iteration_results) |
else constants.ERROR_EXIT_CODE) |