OLD | NEW |
1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 | 5 |
6 import json | 6 import json |
7 import logging | 7 import logging |
8 import os | 8 import os |
9 import time | 9 import time |
10 import traceback | 10 import traceback |
11 | 11 |
12 import buildbot_report | 12 import buildbot_report |
13 import constants | 13 import constants |
| 14 import flakiness_dashboard_results_uploader |
14 | 15 |
15 | 16 |
16 class BaseTestResult(object): | 17 class BaseTestResult(object): |
17 """A single result from a unit test.""" | 18 """A single result from a unit test.""" |
18 | 19 |
19 def __init__(self, name, log): | 20 def __init__(self, name, log): |
20 self.name = name | 21 self.name = name |
21 self.log = log.replace('\r', '') | 22 self.log = log.replace('\r', '') |
22 | 23 |
23 | 24 |
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
118 def _Log(self, sorted_list): | 119 def _Log(self, sorted_list): |
119 for t in sorted_list: | 120 for t in sorted_list: |
120 logging.critical(t.name) | 121 logging.critical(t.name) |
121 if t.log: | 122 if t.log: |
122 logging.critical(t.log) | 123 logging.critical(t.log) |
123 | 124 |
124 def GetAllBroken(self): | 125 def GetAllBroken(self): |
125 """Returns the all broken tests including failed, crashed, unknown.""" | 126 """Returns the all broken tests including failed, crashed, unknown.""" |
126 return self.failed + self.crashed + self.unknown | 127 return self.failed + self.crashed + self.unknown |
127 | 128 |
128 def LogFull(self, test_group, test_suite, build_type, tests_to_run): | 129 def _LogToFile(self, test_type, test_suite, build_type): |
129 """Output broken test logs, summarize in a log file and the test output.""" | 130 """Log results to local files which can be used for aggregation later.""" |
| 131 # TODO(frankf): Report tests that failed to run here too. |
| 132 log_file_path = os.path.join(constants.CHROME_DIR, 'out', |
| 133 build_type, 'test_logs') |
| 134 if not os.path.exists(log_file_path): |
| 135 os.mkdir(log_file_path) |
| 136 full_file_name = os.path.join(log_file_path, test_type) |
| 137 if not os.path.exists(full_file_name): |
| 138 with open(full_file_name, 'w') as log_file: |
| 139 print >> log_file, '\n%s results for %s build %s:' % ( |
| 140 test_type, os.environ.get('BUILDBOT_BUILDERNAME'), |
| 141 os.environ.get('BUILDBOT_BUILDNUMBER')) |
| 142 logging.info('Writing results to %s.' % full_file_name) |
| 143 log_contents = [' %s result : %d tests ran' % (test_suite, |
| 144 len(self.ok) + |
| 145 len(self.failed) + |
| 146 len(self.crashed) + |
| 147 len(self.unknown))] |
| 148 content_pairs = [('passed', len(self.ok)), ('failed', len(self.failed)), |
| 149 ('crashed', len(self.crashed))] |
| 150 for (result, count) in content_pairs: |
| 151 if count: |
| 152 log_contents.append(', %d tests %s' % (count, result)) |
| 153 with open(full_file_name, 'a') as log_file: |
| 154 print >> log_file, ''.join(log_contents) |
| 155 logging.info('Writing results to %s.' % full_file_name) |
| 156 content = {'test_group': test_type, |
| 157 'ok': [t.name for t in self.ok], |
| 158 'failed': [t.name for t in self.failed], |
| 159 'crashed': [t.name for t in self.failed], |
| 160 'unknown': [t.name for t in self.unknown],} |
| 161 json_file_path = os.path.join(log_file_path, 'results.json') |
| 162 with open(json_file_path, 'a') as json_file: |
| 163 print >> json_file, json.dumps(content) |
| 164 logging.info('Writing results to %s.' % json_file_path) |
| 165 |
| 166 def _LogToFlakinessDashboard(self, test_type, test_package, flakiness_server): |
| 167 """Upload results to the flakiness dashboard""" |
| 168 # TODO(frankf): Fix upstream/downstream reporting for both test types. |
| 169 logging.info('Upload %s %s to %s' % (test_type, test_package, |
| 170 flakiness_server)) |
| 171 flakiness_dashboard_results_uploader.Upload( |
| 172 flakiness_server, 'Chromium_Android_Instrumentation', self) |
| 173 |
| 174 def LogFull(self, test_type, test_package, annotation=None, |
| 175 build_type='Debug', all_tests=None, flakiness_server=None): |
| 176 """Log the tests results for the test suite. |
| 177 |
| 178 The results will be logged three different ways: |
| 179 1. Log to stdout. |
| 180 2. Log to local files for aggregating multiple test steps |
| 181 (on buildbots only). |
| 182 3. Log to flakiness dashboard (on buildbots only). |
| 183 |
| 184 Args: |
| 185 test_type: Type of the test (e.g. 'Instrumentation', 'Unit test', etc.). |
| 186 test_package: Test package name (e.g. 'ipc_tests' for gtests, |
| 187 'ContentShellTest' for instrumentation tests) |
| 188 annotation: If instrumenation test type, this is a list of annotations |
| 189 (e.g. ['Smoke', 'SmallTest']). |
| 190 build_type: Release/Debug |
| 191 all_tests: A list of all tests that were supposed to run. |
| 192 This is used to determine which tests have failed to run. |
| 193 If None, we assume all tests ran. |
| 194 flakiness_server: If provider, upload the results to flakiness dashboard |
| 195 with this URL. |
| 196 """ |
130 # Output all broken tests or 'passed' if none broken. | 197 # Output all broken tests or 'passed' if none broken. |
131 logging.critical('*' * 80) | 198 logging.critical('*' * 80) |
132 logging.critical('Final result') | 199 logging.critical('Final result:') |
133 if self.failed: | 200 if self.failed: |
134 logging.critical('Failed:') | 201 logging.critical('Failed:') |
135 self._Log(sorted(self.failed)) | 202 self._Log(sorted(self.failed)) |
136 if self.crashed: | 203 if self.crashed: |
137 logging.critical('Crashed:') | 204 logging.critical('Crashed:') |
138 self._Log(sorted(self.crashed)) | 205 self._Log(sorted(self.crashed)) |
139 if self.unknown: | 206 if self.unknown: |
140 logging.critical('Unknown:') | 207 logging.critical('Unknown:') |
141 self._Log(sorted(self.unknown)) | 208 self._Log(sorted(self.unknown)) |
142 if not self.GetAllBroken(): | 209 if not self.GetAllBroken(): |
143 logging.critical('Passed') | 210 logging.critical('Passed') |
144 logging.critical('*' * 80) | |
145 | |
146 # Summarize in a log file, if tests are running on bots. | |
147 if test_group and test_suite and os.environ.get('BUILDBOT_BUILDERNAME'): | |
148 log_file_path = os.path.join(constants.CHROME_DIR, 'out', | |
149 build_type, 'test_logs') | |
150 if not os.path.exists(log_file_path): | |
151 os.mkdir(log_file_path) | |
152 full_file_name = os.path.join(log_file_path, test_group) | |
153 if not os.path.exists(full_file_name): | |
154 with open(full_file_name, 'w') as log_file: | |
155 print >> log_file, '\n%s results for %s build %s:' % ( | |
156 test_group, os.environ.get('BUILDBOT_BUILDERNAME'), | |
157 os.environ.get('BUILDBOT_BUILDNUMBER')) | |
158 log_contents = [' %s result : %d tests ran' % (test_suite, | |
159 len(self.ok) + | |
160 len(self.failed) + | |
161 len(self.crashed) + | |
162 len(self.unknown))] | |
163 content_pairs = [('passed', len(self.ok)), ('failed', len(self.failed)), | |
164 ('crashed', len(self.crashed))] | |
165 for (result, count) in content_pairs: | |
166 if count: | |
167 log_contents.append(', %d tests %s' % (count, result)) | |
168 with open(full_file_name, 'a') as log_file: | |
169 print >> log_file, ''.join(log_contents) | |
170 content = {'test_group': test_group, | |
171 'ok': [t.name for t in self.ok], | |
172 'failed': [t.name for t in self.failed], | |
173 'crashed': [t.name for t in self.failed], | |
174 'unknown': [t.name for t in self.unknown],} | |
175 with open(os.path.join(log_file_path, 'results.json'), 'a') as json_file: | |
176 print >> json_file, json.dumps(content) | |
177 | 211 |
178 # Summarize in the test output. | 212 # Summarize in the test output. |
| 213 logging.critical('*' * 80) |
179 summary = ['Summary:\n'] | 214 summary = ['Summary:\n'] |
180 if tests_to_run: | 215 if all_tests: |
181 summary += ['TESTS_TO_RUN=%d\n' % (len(tests_to_run))] | 216 summary += ['TESTS_TO_RUN=%d\n' % len(all_tests)] |
182 num_tests_ran = (len(self.ok) + len(self.failed) + | 217 num_tests_ran = (len(self.ok) + len(self.failed) + |
183 len(self.crashed) + len(self.unknown)) | 218 len(self.crashed) + len(self.unknown)) |
184 tests_passed = [t.name for t in self.ok] | 219 tests_passed = [t.name for t in self.ok] |
185 tests_failed = [t.name for t in self.failed] | 220 tests_failed = [t.name for t in self.failed] |
186 tests_crashed = [t.name for t in self.crashed] | 221 tests_crashed = [t.name for t in self.crashed] |
187 tests_unknown = [t.name for t in self.unknown] | 222 tests_unknown = [t.name for t in self.unknown] |
188 summary += ['RAN=%d\n' % (num_tests_ran), | 223 summary += ['RAN=%d\n' % (num_tests_ran), |
189 'PASSED=%d\n' % len(tests_passed), | 224 'PASSED=%d\n' % len(tests_passed), |
190 'FAILED=%d %s\n' % (len(tests_failed), tests_failed), | 225 'FAILED=%d %s\n' % (len(tests_failed), tests_failed), |
191 'CRASHED=%d %s\n' % (len(tests_crashed), tests_crashed), | 226 'CRASHED=%d %s\n' % (len(tests_crashed), tests_crashed), |
192 'UNKNOWN=%d %s\n' % (len(tests_unknown), tests_unknown)] | 227 'UNKNOWN=%d %s\n' % (len(tests_unknown), tests_unknown)] |
193 if tests_to_run and num_tests_ran != len(tests_to_run): | 228 if all_tests and num_tests_ran != len(all_tests): |
194 # Add the list of tests we failed to run. | 229 # Add the list of tests we failed to run. |
195 tests_failed_to_run = list(set(tests_to_run) - set(tests_passed) - | 230 tests_failed_to_run = list(set(all_tests) - set(tests_passed) - |
196 set(tests_failed) - set(tests_crashed) - | 231 set(tests_failed) - set(tests_crashed) - |
197 set(tests_unknown)) | 232 set(tests_unknown)) |
198 summary += ['FAILED_TO_RUN=%d %s\n' % (len(tests_failed_to_run), | 233 summary += ['FAILED_TO_RUN=%d %s\n' % (len(tests_failed_to_run), |
199 tests_failed_to_run)] | 234 tests_failed_to_run)] |
200 summary_string = ''.join(summary) | 235 summary_string = ''.join(summary) |
201 logging.critical(summary_string) | 236 logging.critical(summary_string) |
202 return summary_string | 237 logging.critical('*' * 80) |
| 238 |
| 239 if os.environ.get('BUILDBOT_BUILDERNAME'): |
| 240 # It is possible to have multiple buildbot steps for the same |
| 241 # instrumenation test package using different annotations. |
| 242 if annotation and len(annotation) == 1: |
| 243 test_suite = annotation[0] |
| 244 else: |
| 245 test_suite = test_package |
| 246 self._LogToFile(test_type, test_suite, build_type) |
| 247 |
| 248 if flakiness_server: |
| 249 self._LogToFlakinessDashboard(test_type, test_package, flakiness_server) |
203 | 250 |
204 def PrintAnnotation(self): | 251 def PrintAnnotation(self): |
205 """Print buildbot annotations for test results.""" | 252 """Print buildbot annotations for test results.""" |
206 if self.failed or self.crashed or self.overall_fail or self.timed_out: | 253 if self.failed or self.crashed or self.overall_fail or self.timed_out: |
207 buildbot_report.PrintError() | 254 buildbot_report.PrintError() |
208 else: | 255 else: |
209 print 'Step success!' # No annotation needed | 256 print 'Step success!' # No annotation needed |
OLD | NEW |