Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(552)

Unified Diff: tools/telemetry/telemetry/unittest/json_results.py

Issue 426123002: Implement auto-retry of failed tests for telemetry{_perf}_unittests. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: update w/ review feedback Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « no previous file | tools/telemetry/telemetry/unittest/run_tests.py » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: tools/telemetry/telemetry/unittest/json_results.py
diff --git a/tools/telemetry/telemetry/unittest/json_results.py b/tools/telemetry/telemetry/unittest/json_results.py
index 76a5c28ccb09f61186f309449376cacf0b419ac1..3dbb238c904f2d3f2ea5ac227366c45ade7de634 100644
--- a/tools/telemetry/telemetry/unittest/json_results.py
+++ b/tools/telemetry/telemetry/unittest/json_results.py
@@ -27,22 +27,19 @@ def ValidateArgs(parser, args):
parser.error('Error: malformed metadata "%s"' % val)
-def WriteandUploadResultsIfNecessary(args, test_suite, result):
+def WriteFullResultsIfNecessary(args, full_results):
if not args.write_full_results_to:
return
- full_results = _FullResults(test_suite, result, args.metadata)
-
with open(args.write_full_results_to, 'w') as fp:
json.dump(full_results, fp, indent=2)
fp.write("\n")
- # TODO(dpranke): upload to test-results.appspot.com if requested as well.
TEST_SEPARATOR = '.'
-def _FullResults(suite, result, metadata):
+def FullResults(args, suite, results):
"""Convert the unittest results to the Chromium JSON test result format.
This matches run-webkit-tests (the layout tests) and the flakiness dashboard.
@@ -53,49 +50,79 @@ def _FullResults(suite, result, metadata):
full_results['path_delimiter'] = TEST_SEPARATOR
full_results['version'] = 3
full_results['seconds_since_epoch'] = time.time()
- for md in metadata:
+ for md in args.metadata:
key, val = md.split('=', 1)
full_results[key] = val
- all_test_names = _AllTestNames(suite)
- failed_test_names = _FailedTestNames(result)
+ # TODO(dpranke): Handle skipped tests as well.
+ all_test_names = AllTestNames(suite)
+ num_failures = NumFailuresAfterRetries(results)
full_results['num_failures_by_type'] = {
- 'FAIL': len(failed_test_names),
- 'PASS': len(all_test_names) - len(failed_test_names),
+ 'FAIL': num_failures,
+ 'PASS': len(all_test_names) - num_failures,
}
+ sets_of_passing_test_names = map(PassingTestNames, results)
+ sets_of_failing_test_names = map(FailedTestNames, results)
+
full_results['tests'] = {}
for test_name in all_test_names:
- value = {}
- value['expected'] = 'PASS'
- if test_name in failed_test_names:
- value['actual'] = 'FAIL'
- value['is_unexpected'] = True
- else:
- value['actual'] = 'PASS'
-
+ value = {
+ 'expected': 'PASS',
+ 'actual': ActualResultsForTest(test_name, sets_of_failing_test_names,
+ sets_of_passing_test_names)
+ }
_AddPathToTrie(full_results['tests'], test_name, value)
return full_results
-def _AllTestNames(suite):
+def ActualResultsForTest(test_name, sets_of_failing_test_names,
+ sets_of_passing_test_names):
+ actuals = []
+ for retry_num in range(len(sets_of_failing_test_names)):
+ if test_name in sets_of_failing_test_names[retry_num]:
+ actuals.append('FAIL')
+ elif test_name in sets_of_passing_test_names[retry_num]:
+ assert ((retry_num == 0) or
+ (test_name in sets_of_failing_test_names[retry_num - 1])), (
+ 'We should not have run a test that did not fail '
+ 'on the previous run.')
+ actuals.append('PASS')
+
+ assert actuals, 'We did not find any result data for %s.' % test_name
+ return ' '.join(actuals)
+
+
+def ExitCodeFromFullResults(full_results):
+ return 1 if full_results['num_failures_by_type']['FAIL'] else 0
+
+
+def AllTestNames(suite):
test_names = []
# _tests is protected pylint: disable=W0212
for test in suite._tests:
if isinstance(test, unittest.suite.TestSuite):
- test_names.extend(_AllTestNames(test))
+ test_names.extend(AllTestNames(test))
else:
test_names.append(test.id())
return test_names
-def _FailedTestNames(result):
+def NumFailuresAfterRetries(results):
+ return len(FailedTestNames(results[-1]))
+
+
+def FailedTestNames(result):
return set(test.id() for test, _ in result.failures + result.errors)
+def PassingTestNames(result):
+ return set(test.id() for test in result.successes)
+
+
def _AddPathToTrie(trie, path, value):
if TEST_SEPARATOR not in path:
trie[path] = value
@@ -104,5 +131,3 @@ def _AddPathToTrie(trie, path, value):
if directory not in trie:
trie[directory] = {}
_AddPathToTrie(trie[directory], rest, value)
-
-
« no previous file with comments | « no previous file | tools/telemetry/telemetry/unittest/run_tests.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698