OLD | NEW |
| (Empty) |
1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
2 # Use of this source code is governed by a BSD-style license that can be | |
3 # found in the LICENSE file. | |
4 | |
5 | |
6 import json | |
7 import logging | |
8 import os | |
9 import re | |
10 import time | |
11 import traceback | |
12 | |
13 import buildbot_report | |
14 import constants | |
15 from pylib.utils import flakiness_dashboard_results_uploader | |
16 | |
17 | |
18 _STAGING_SERVER = 'chrome-android-staging' | |
19 | |
20 | |
21 class BaseTestResult(object): | |
22 """A single result from a unit test.""" | |
23 | |
24 def __init__(self, name, log): | |
25 self.name = name | |
26 self.log = log.replace('\r', '') | |
27 | |
28 | |
29 class SingleTestResult(BaseTestResult): | |
30 """Result information for a single test. | |
31 | |
32 Args: | |
33 full_name: Full name of the test. | |
34 start_date: Date in milliseconds when the test began running. | |
35 dur: Duration of the test run in milliseconds. | |
36 log: An optional string listing any errors. | |
37 """ | |
38 | |
39 def __init__(self, full_name, start_date, dur, log=''): | |
40 BaseTestResult.__init__(self, full_name, log) | |
41 name_pieces = full_name.rsplit('#') | |
42 if len(name_pieces) > 1: | |
43 self.test_name = name_pieces[1] | |
44 self.class_name = name_pieces[0] | |
45 else: | |
46 self.class_name = full_name | |
47 self.test_name = full_name | |
48 self.start_date = start_date | |
49 self.dur = dur | |
50 | |
51 | |
52 class TestResults(object): | |
53 """Results of a test run.""" | |
54 | |
55 def __init__(self): | |
56 self.ok = [] | |
57 self.failed = [] | |
58 self.crashed = [] | |
59 self.unknown = [] | |
60 self.timed_out = False | |
61 self.overall_fail = False | |
62 self.device_exception = None | |
63 | |
64 @staticmethod | |
65 def FromRun(ok=None, failed=None, crashed=None, timed_out=False, | |
66 overall_fail=False, device_exception=None): | |
67 ret = TestResults() | |
68 ret.ok = ok or [] | |
69 ret.failed = failed or [] | |
70 ret.crashed = crashed or [] | |
71 ret.timed_out = timed_out | |
72 ret.overall_fail = overall_fail | |
73 ret.device_exception = device_exception | |
74 return ret | |
75 | |
76 @staticmethod | |
77 def FromTestResults(results): | |
78 """Combines a list of results in a single TestResults object.""" | |
79 ret = TestResults() | |
80 for t in results: | |
81 ret.ok += t.ok | |
82 ret.failed += t.failed | |
83 ret.crashed += t.crashed | |
84 ret.unknown += t.unknown | |
85 if t.timed_out: | |
86 ret.timed_out = True | |
87 if t.overall_fail: | |
88 ret.overall_fail = True | |
89 return ret | |
90 | |
91 @staticmethod | |
92 def FromPythonException(test_name, start_date_ms, exc_info): | |
93 """Constructs a TestResults with exception information for the given test. | |
94 | |
95 Args: | |
96 test_name: name of the test which raised an exception. | |
97 start_date_ms: the starting time for the test. | |
98 exc_info: exception info, ostensibly from sys.exc_info(). | |
99 | |
100 Returns: | |
101 A TestResults object with a SingleTestResult in the failed list. | |
102 """ | |
103 exc_type, exc_value, exc_traceback = exc_info | |
104 trace_info = ''.join(traceback.format_exception(exc_type, exc_value, | |
105 exc_traceback)) | |
106 log_msg = 'Exception:\n' + trace_info | |
107 duration_ms = (int(time.time()) * 1000) - start_date_ms | |
108 | |
109 exc_result = SingleTestResult( | |
110 full_name='PythonWrapper#' + test_name, | |
111 start_date=start_date_ms, | |
112 dur=duration_ms, | |
113 log=(str(exc_type) + ' ' + log_msg)) | |
114 | |
115 results = TestResults() | |
116 results.failed.append(exc_result) | |
117 return results | |
118 | |
119 @staticmethod | |
120 def DeviceExceptions(results): | |
121 return set(filter(lambda t: t.device_exception, results)) | |
122 | |
123 def _Log(self, sorted_list): | |
124 for t in sorted_list: | |
125 logging.critical(t.name) | |
126 if t.log: | |
127 logging.critical(t.log) | |
128 | |
129 def GetAllBroken(self): | |
130 """Returns the all broken tests including failed, crashed, unknown.""" | |
131 return self.failed + self.crashed + self.unknown | |
132 | |
133 def _LogToFile(self, test_type, test_suite, build_type): | |
134 """Log results to local files which can be used for aggregation later.""" | |
135 # TODO(frankf): Report tests that failed to run here too. | |
136 log_file_path = os.path.join(constants.CHROME_DIR, 'out', | |
137 build_type, 'test_logs') | |
138 if not os.path.exists(log_file_path): | |
139 os.mkdir(log_file_path) | |
140 full_file_name = os.path.join( | |
141 log_file_path, re.sub('\W', '_', test_type).lower() + '.log') | |
142 if not os.path.exists(full_file_name): | |
143 with open(full_file_name, 'w') as log_file: | |
144 print >> log_file, '\n%s results for %s build %s:' % ( | |
145 test_type, os.environ.get('BUILDBOT_BUILDERNAME'), | |
146 os.environ.get('BUILDBOT_BUILDNUMBER')) | |
147 logging.info('Writing results to %s.' % full_file_name) | |
148 log_contents = [' %s result : %d tests ran' % (test_suite, | |
149 len(self.ok) + | |
150 len(self.failed) + | |
151 len(self.crashed) + | |
152 len(self.unknown))] | |
153 content_pairs = [('passed', len(self.ok)), ('failed', len(self.failed)), | |
154 ('crashed', len(self.crashed))] | |
155 for (result, count) in content_pairs: | |
156 if count: | |
157 log_contents.append(', %d tests %s' % (count, result)) | |
158 with open(full_file_name, 'a') as log_file: | |
159 print >> log_file, ''.join(log_contents) | |
160 logging.info('Writing results to %s.' % full_file_name) | |
161 content = {'test_group': test_type, | |
162 'ok': [t.name for t in self.ok], | |
163 'failed': [t.name for t in self.failed], | |
164 'crashed': [t.name for t in self.failed], | |
165 'unknown': [t.name for t in self.unknown],} | |
166 json_file_path = os.path.join(log_file_path, 'results.json') | |
167 with open(json_file_path, 'a') as json_file: | |
168 print >> json_file, json.dumps(content) | |
169 logging.info('Writing results to %s.' % json_file_path) | |
170 | |
171 def _LogToFlakinessDashboard(self, test_type, test_package, flakiness_server): | |
172 """Upload results to the flakiness dashboard""" | |
173 logging.info('Upload results for test type "%s", test package "%s" to %s' % | |
174 (test_type, test_package, flakiness_server)) | |
175 | |
176 # TODO(frankf): Enable uploading for gtests. | |
177 if test_type != 'Instrumentation': | |
178 logging.warning('Invalid test type.') | |
179 return | |
180 | |
181 try: | |
182 # TODO(frankf): Temp server for initial testing upstream. | |
183 # Use http://test-results.appspot.com once we're confident this works. | |
184 if _STAGING_SERVER in flakiness_server: | |
185 assert test_package in ['ContentShellTest', | |
186 'ChromiumTestShellTest', | |
187 'AndroidWebViewTest'] | |
188 dashboard_test_type = ('%s_instrumentation_tests' % | |
189 test_package.lower().rstrip('test')) | |
190 # Downstream prod server. | |
191 else: | |
192 dashboard_test_type = 'Chromium_Android_Instrumentation' | |
193 | |
194 flakiness_dashboard_results_uploader.Upload( | |
195 flakiness_server, dashboard_test_type, self) | |
196 except Exception as e: | |
197 logging.error(e) | |
198 | |
199 def LogFull(self, test_type, test_package, annotation=None, | |
200 build_type='Debug', all_tests=None, flakiness_server=None): | |
201 """Log the tests results for the test suite. | |
202 | |
203 The results will be logged three different ways: | |
204 1. Log to stdout. | |
205 2. Log to local files for aggregating multiple test steps | |
206 (on buildbots only). | |
207 3. Log to flakiness dashboard (on buildbots only). | |
208 | |
209 Args: | |
210 test_type: Type of the test (e.g. 'Instrumentation', 'Unit test', etc.). | |
211 test_package: Test package name (e.g. 'ipc_tests' for gtests, | |
212 'ContentShellTest' for instrumentation tests) | |
213 annotation: If instrumenation test type, this is a list of annotations | |
214 (e.g. ['Smoke', 'SmallTest']). | |
215 build_type: Release/Debug | |
216 all_tests: A list of all tests that were supposed to run. | |
217 This is used to determine which tests have failed to run. | |
218 If None, we assume all tests ran. | |
219 flakiness_server: If provider, upload the results to flakiness dashboard | |
220 with this URL. | |
221 """ | |
222 # Output all broken tests or 'passed' if none broken. | |
223 logging.critical('*' * 80) | |
224 logging.critical('Final result:') | |
225 if self.failed: | |
226 logging.critical('Failed:') | |
227 self._Log(sorted(self.failed)) | |
228 if self.crashed: | |
229 logging.critical('Crashed:') | |
230 self._Log(sorted(self.crashed)) | |
231 if self.unknown: | |
232 logging.critical('Unknown:') | |
233 self._Log(sorted(self.unknown)) | |
234 if not self.GetAllBroken(): | |
235 logging.critical('Passed') | |
236 | |
237 # Summarize in the test output. | |
238 logging.critical('*' * 80) | |
239 summary = ['Summary:\n'] | |
240 if all_tests: | |
241 summary += ['TESTS_TO_RUN=%d\n' % len(all_tests)] | |
242 num_tests_ran = (len(self.ok) + len(self.failed) + | |
243 len(self.crashed) + len(self.unknown)) | |
244 tests_passed = [t.name for t in self.ok] | |
245 tests_failed = [t.name for t in self.failed] | |
246 tests_crashed = [t.name for t in self.crashed] | |
247 tests_unknown = [t.name for t in self.unknown] | |
248 summary += ['RAN=%d\n' % (num_tests_ran), | |
249 'PASSED=%d\n' % len(tests_passed), | |
250 'FAILED=%d %s\n' % (len(tests_failed), tests_failed), | |
251 'CRASHED=%d %s\n' % (len(tests_crashed), tests_crashed), | |
252 'UNKNOWN=%d %s\n' % (len(tests_unknown), tests_unknown)] | |
253 if all_tests and num_tests_ran != len(all_tests): | |
254 # Add the list of tests we failed to run. | |
255 tests_failed_to_run = list(set(all_tests) - set(tests_passed) - | |
256 set(tests_failed) - set(tests_crashed) - | |
257 set(tests_unknown)) | |
258 summary += ['FAILED_TO_RUN=%d %s\n' % (len(tests_failed_to_run), | |
259 tests_failed_to_run)] | |
260 summary_string = ''.join(summary) | |
261 logging.critical(summary_string) | |
262 logging.critical('*' * 80) | |
263 | |
264 if os.environ.get('BUILDBOT_BUILDERNAME'): | |
265 # It is possible to have multiple buildbot steps for the same | |
266 # instrumenation test package using different annotations. | |
267 if annotation and len(annotation) == 1: | |
268 test_suite = annotation[0] | |
269 else: | |
270 test_suite = test_package | |
271 self._LogToFile(test_type, test_suite, build_type) | |
272 | |
273 if flakiness_server: | |
274 self._LogToFlakinessDashboard(test_type, test_package, flakiness_server) | |
275 | |
276 def PrintAnnotation(self): | |
277 """Print buildbot annotations for test results.""" | |
278 if self.failed or self.crashed or self.overall_fail or self.timed_out: | |
279 buildbot_report.PrintError() | |
280 else: | |
281 print 'Step success!' # No annotation needed | |
OLD | NEW |