Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(216)

Unified Diff: tools/telemetry/telemetry/unittest/json_results.py

Issue 484333002: Fix handling of skipped tests in telemetry_unittests when uploading results. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: tools/telemetry/telemetry/unittest/json_results.py
diff --git a/tools/telemetry/telemetry/unittest/json_results.py b/tools/telemetry/telemetry/unittest/json_results.py
index c03432e8769c0d0dd011f580b65a81ceddd01250..80d7cbe128401313c369a92c4b814472f7d37f9c 100644
--- a/tools/telemetry/telemetry/unittest/json_results.py
+++ b/tools/telemetry/telemetry/unittest/json_results.py
@@ -85,28 +85,44 @@ def FullResults(args, suite, results):
key, val = md.split('=', 1)
full_results[key] = val
- # TODO(dpranke): Handle skipped tests as well.
-
all_test_names = AllTestNames(suite)
+ sets_of_passing_test_names = map(PassingTestNames, results)
+ sets_of_failing_test_names = map(FailedTestNames, results)
+
+ # TODO(crbug.com/405379): This handles tests that are skipped via the
+ # unittest skip decorators (like skipUnless). The tests that are skipped via
+ # telemetry's decorators package are not included in the test suite at all so
+ # we need those to be passed in in order to include them.
+ skipped_tests = (set(all_test_names) - sets_of_passing_test_names[0]
+ - sets_of_failing_test_names[0])
+
+ num_tests = len(all_test_names)
num_failures = NumFailuresAfterRetries(results)
+ num_skips = len(skipped_tests)
+ num_passes = num_tests - num_failures - num_skips
full_results['num_failures_by_type'] = {
'FAIL': num_failures,
- 'PASS': len(all_test_names) - num_failures,
+ 'PASS': num_passes,
+ 'SKIP': num_skips,
}
- sets_of_passing_test_names = map(PassingTestNames, results)
- sets_of_failing_test_names = map(FailedTestNames, results)
-
full_results['tests'] = {}
for test_name in all_test_names:
- value = {
- 'expected': 'PASS',
- 'actual': ActualResultsForTest(test_name, sets_of_failing_test_names,
- sets_of_passing_test_names)
- }
- if value['actual'].endswith('FAIL'):
- value['is_unexpected'] = True
+ if test_name in skipped_tests:
+ value = {
+ 'expected': 'SKIP',
+ 'actual': 'SKIP',
+ }
+ else:
+ value = {
+ 'expected': 'PASS',
+ 'actual': ActualResultsForTest(test_name,
+ sets_of_failing_test_names,
+ sets_of_passing_test_names),
+ }
+ if value['actual'].endswith('FAIL'):
+ value['is_unexpected'] = True
_AddPathToTrie(full_results['tests'], test_name, value)
return full_results
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698