Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(157)

Side by Side Diff: build/android/pylib/base/test_result.py

Issue 11901003: [Android] Reland 'Keep track of unknown test results at the TestRunner layer.' (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 7 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 4
5 5
6 import json 6 import json
7 import logging 7 import logging
8 import os 8 import os
9 import re 9 import re
10 import time 10 import time
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
48 48
49 class TestResults(object): 49 class TestResults(object):
50 """Results of a test run.""" 50 """Results of a test run."""
51 51
52 def __init__(self): 52 def __init__(self):
53 self.ok = [] 53 self.ok = []
54 self.failed = [] 54 self.failed = []
55 self.crashed = [] 55 self.crashed = []
56 self.unknown = [] 56 self.unknown = []
57 self.timed_out = [] 57 self.timed_out = []
58 self.overall_timed_out = False
59 self.overall_fail = False
60 self.device_exception = None 58 self.device_exception = None
61 59
62 @staticmethod 60 @staticmethod
63 def FromRun(ok=None, failed=None, crashed=None, timed_out=None, 61 def FromRun(ok=None, failed=None, crashed=None, timed_out=None):
64 overall_timed_out=False, overall_fail=False,
65 device_exception=None):
66 ret = TestResults() 62 ret = TestResults()
67 ret.ok = ok or [] 63 ret.ok = ok or []
68 ret.failed = failed or [] 64 ret.failed = failed or []
69 ret.crashed = crashed or [] 65 ret.crashed = crashed or []
70 ret.timed_out = timed_out or [] 66 ret.timed_out = timed_out or []
71 ret.overall_timed_out = overall_timed_out
72 ret.overall_fail = overall_fail
73 ret.device_exception = device_exception
74 return ret 67 return ret
75 68
76 @staticmethod 69 @staticmethod
77 def FromTestResults(results): 70 def FromTestResults(results):
78 """Combines a list of results in a single TestResults object.""" 71 """Combines a list of results in a single TestResults object."""
79 ret = TestResults() 72 ret = TestResults()
80 for t in results: 73 for t in results:
81 ret.ok += t.ok 74 ret.ok += t.ok
82 ret.failed += t.failed 75 ret.failed += t.failed
83 ret.crashed += t.crashed 76 ret.crashed += t.crashed
84 ret.unknown += t.unknown 77 ret.unknown += t.unknown
85 ret.timed_out += t.timed_out 78 ret.timed_out += t.timed_out
86 if t.overall_timed_out:
87 ret.overall_timed_out = True
88 if t.overall_fail:
89 ret.overall_fail = True
90 return ret 79 return ret
91 80
92 @staticmethod 81 @staticmethod
93 def FromPythonException(test_name, start_date_ms, exc_info): 82 def FromPythonException(test_name, start_date_ms, exc_info):
94 """Constructs a TestResults with exception information for the given test. 83 """Constructs a TestResults with exception information for the given test.
95 84
96 Args: 85 Args:
97 test_name: name of the test which raised an exception. 86 test_name: name of the test which raised an exception.
98 start_date_ms: the starting time for the test. 87 start_date_ms: the starting time for the test.
99 exc_info: exception info, ostensibly from sys.exc_info(). 88 exc_info: exception info, ostensibly from sys.exc_info().
(...skipping 24 matching lines...) Expand all
124 def _Log(self, sorted_list): 113 def _Log(self, sorted_list):
125 for t in sorted_list: 114 for t in sorted_list:
126 logging.critical(t.name) 115 logging.critical(t.name)
127 if t.log: 116 if t.log:
128 logging.critical(t.log) 117 logging.critical(t.log)
129 118
130 def GetAllBroken(self): 119 def GetAllBroken(self):
131 """Returns the all broken tests.""" 120 """Returns the all broken tests."""
132 return self.failed + self.crashed + self.unknown + self.timed_out 121 return self.failed + self.crashed + self.unknown + self.timed_out
133 122
123 def GetAll(self):
124 """Returns the all tests."""
125 return self.ok + self.GetAllBroken()
126
134 def _LogToFile(self, test_type, test_suite, build_type): 127 def _LogToFile(self, test_type, test_suite, build_type):
135 """Log results to local files which can be used for aggregation later.""" 128 """Log results to local files which can be used for aggregation later."""
136 # TODO(frankf): Report tests that failed to run here too. 129 # TODO(frankf): Report tests that failed to run here too.
137 log_file_path = os.path.join(constants.CHROME_DIR, 'out', 130 log_file_path = os.path.join(constants.CHROME_DIR, 'out',
138 build_type, 'test_logs') 131 build_type, 'test_logs')
139 if not os.path.exists(log_file_path): 132 if not os.path.exists(log_file_path):
140 os.mkdir(log_file_path) 133 os.mkdir(log_file_path)
141 full_file_name = os.path.join( 134 full_file_name = os.path.join(
142 log_file_path, re.sub('\W', '_', test_type).lower() + '.log') 135 log_file_path, re.sub('\W', '_', test_type).lower() + '.log')
143 if not os.path.exists(full_file_name): 136 if not os.path.exists(full_file_name):
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
194 # Downstream server. 187 # Downstream server.
195 else: 188 else:
196 dashboard_test_type = 'Chromium_Android_Instrumentation' 189 dashboard_test_type = 'Chromium_Android_Instrumentation'
197 190
198 flakiness_dashboard_results_uploader.Upload( 191 flakiness_dashboard_results_uploader.Upload(
199 flakiness_server, dashboard_test_type, self) 192 flakiness_server, dashboard_test_type, self)
200 except Exception as e: 193 except Exception as e:
201 logging.error(e) 194 logging.error(e)
202 195
203 def LogFull(self, test_type, test_package, annotation=None, 196 def LogFull(self, test_type, test_package, annotation=None,
204 build_type='Debug', all_tests=None, flakiness_server=None): 197 build_type='Debug', flakiness_server=None):
205 """Log the tests results for the test suite. 198 """Log the tests results for the test suite.
206 199
207 The results will be logged three different ways: 200 The results will be logged three different ways:
208 1. Log to stdout. 201 1. Log to stdout.
209 2. Log to local files for aggregating multiple test steps 202 2. Log to local files for aggregating multiple test steps
210 (on buildbots only). 203 (on buildbots only).
211 3. Log to flakiness dashboard (on buildbots only). 204 3. Log to flakiness dashboard (on buildbots only).
212 205
213 Args: 206 Args:
214 test_type: Type of the test (e.g. 'Instrumentation', 'Unit test', etc.). 207 test_type: Type of the test (e.g. 'Instrumentation', 'Unit test', etc.).
215 test_package: Test package name (e.g. 'ipc_tests' for gtests, 208 test_package: Test package name (e.g. 'ipc_tests' for gtests,
216 'ContentShellTest' for instrumentation tests) 209 'ContentShellTest' for instrumentation tests)
217 annotation: If instrumenation test type, this is a list of annotations 210 annotation: If instrumenation test type, this is a list of annotations
218 (e.g. ['Smoke', 'SmallTest']). 211 (e.g. ['Smoke', 'SmallTest']).
219 build_type: Release/Debug 212 build_type: Release/Debug
220 all_tests: A list of all tests that were supposed to run.
221 This is used to determine which tests have failed to run.
222 If None, we assume all tests ran.
223 flakiness_server: If provider, upload the results to flakiness dashboard 213 flakiness_server: If provider, upload the results to flakiness dashboard
224 with this URL. 214 with this URL.
225 """ 215 """
226 # Output all broken tests or 'passed' if none broken. 216 # Output all broken tests or 'passed' if none broken.
227 logging.critical('*' * 80) 217 logging.critical('*' * 80)
228 logging.critical('Final result:') 218 logging.critical('Final result:')
229 if self.failed: 219 if self.failed:
230 logging.critical('Failed:') 220 logging.critical('Failed:')
231 self._Log(sorted(self.failed)) 221 self._Log(sorted(self.failed))
232 if self.crashed: 222 if self.crashed:
233 logging.critical('Crashed:') 223 logging.critical('Crashed:')
234 self._Log(sorted(self.crashed)) 224 self._Log(sorted(self.crashed))
235 if self.timed_out: 225 if self.timed_out:
236 logging.critical('Timed out:') 226 logging.critical('Timed out:')
237 self._Log(sorted(self.timed_out)) 227 self._Log(sorted(self.timed_out))
238 if self.unknown: 228 if self.unknown:
239 logging.critical('Unknown:') 229 logging.critical('Unknown:')
240 self._Log(sorted(self.unknown)) 230 self._Log(sorted(self.unknown))
241 if not self.GetAllBroken(): 231 if not self.GetAllBroken():
242 logging.critical('Passed') 232 logging.critical('Passed')
243 233
244 # Summarize in the test output. 234 # Summarize in the test output.
245 logging.critical('*' * 80) 235 num_tests_ran = len(self.GetAll())
246 summary = ['Summary:\n']
247 if all_tests:
248 summary += ['TESTS_TO_RUN=%d\n' % len(all_tests)]
249 num_tests_ran = (len(self.ok) + len(self.failed) +
250 len(self.crashed) + len(self.unknown) +
251 len(self.timed_out))
252 tests_passed = [t.name for t in self.ok] 236 tests_passed = [t.name for t in self.ok]
253 tests_failed = [t.name for t in self.failed] 237 tests_failed = [t.name for t in self.failed]
254 tests_crashed = [t.name for t in self.crashed] 238 tests_crashed = [t.name for t in self.crashed]
239 tests_timed_out = [t.name for t in self.timed_out]
255 tests_unknown = [t.name for t in self.unknown] 240 tests_unknown = [t.name for t in self.unknown]
256 tests_timed_out = [t.name for t in self.timed_out] 241 logging.critical('*' * 80)
242 summary = ['Summary:\n']
257 summary += ['RAN=%d\n' % (num_tests_ran), 243 summary += ['RAN=%d\n' % (num_tests_ran),
258 'PASSED=%d\n' % len(tests_passed), 244 'PASSED=%d\n' % len(tests_passed),
259 'FAILED=%d %s\n' % (len(tests_failed), tests_failed), 245 'FAILED=%d %s\n' % (len(tests_failed), tests_failed),
260 'CRASHED=%d %s\n' % (len(tests_crashed), tests_crashed), 246 'CRASHED=%d %s\n' % (len(tests_crashed), tests_crashed),
261 'TIMEDOUT=%d %s\n' % (len(tests_timed_out), tests_timed_out), 247 'TIMEDOUT=%d %s\n' % (len(tests_timed_out), tests_timed_out),
262 'UNKNOWN=%d %s\n' % (len(tests_unknown), tests_unknown)] 248 'UNKNOWN=%d %s\n' % (len(tests_unknown), tests_unknown)]
263 if all_tests and num_tests_ran != len(all_tests):
264 # Add the list of tests we failed to run.
265 tests_failed_to_run = list(set(all_tests) - set(tests_passed) -
266 set(tests_failed) - set(tests_crashed) -
267 set(tests_unknown) - set(tests_timed_out))
268 summary += ['FAILED_TO_RUN=%d %s\n' % (len(tests_failed_to_run),
269 tests_failed_to_run)]
270 summary_string = ''.join(summary) 249 summary_string = ''.join(summary)
271 logging.critical(summary_string) 250 logging.critical(summary_string)
272 logging.critical('*' * 80) 251 logging.critical('*' * 80)
273 252
274 if os.environ.get('BUILDBOT_BUILDERNAME'): 253 if os.environ.get('BUILDBOT_BUILDERNAME'):
275 # It is possible to have multiple buildbot steps for the same 254 # It is possible to have multiple buildbot steps for the same
276 # instrumenation test package using different annotations. 255 # instrumenation test package using different annotations.
277 if annotation and len(annotation) == 1: 256 if annotation and len(annotation) == 1:
278 test_suite = annotation[0] 257 test_suite = annotation[0]
279 else: 258 else:
280 test_suite = test_package 259 test_suite = test_package
281 self._LogToFile(test_type, test_suite, build_type) 260 self._LogToFile(test_type, test_suite, build_type)
282 261
283 if flakiness_server: 262 if flakiness_server:
284 self._LogToFlakinessDashboard(test_type, test_package, flakiness_server) 263 self._LogToFlakinessDashboard(test_type, test_package, flakiness_server)
285 264
286 def PrintAnnotation(self): 265 def PrintAnnotation(self):
287 """Print buildbot annotations for test results.""" 266 """Print buildbot annotations for test results."""
288 if (self.failed or self.crashed or self.overall_fail or 267 if self.GetAllBroken():
289 self.overall_timed_out):
290 buildbot_report.PrintError() 268 buildbot_report.PrintError()
291 else: 269 else:
292 print 'Step success!' # No annotation needed 270 print 'Step success!' # No annotation needed
OLDNEW
« no previous file with comments | « build/android/pylib/base/base_test_sharder.py ('k') | build/android/pylib/gtest/single_test_runner.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698