| OLD | NEW |
| (Empty) |
| 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
| 2 # Use of this source code is governed by a BSD-style license that can be | |
| 3 # found in the LICENSE file. | |
| 4 | |
| 5 | |
| 6 import json | |
| 7 import logging | |
| 8 import os | |
| 9 import re | |
| 10 import time | |
| 11 import traceback | |
| 12 | |
| 13 from pylib import buildbot_report | |
| 14 from pylib import constants | |
| 15 from pylib.utils import flakiness_dashboard_results_uploader | |
| 16 | |
| 17 | |
| 18 class BaseTestResult(object): | |
| 19 """A single result from a unit test.""" | |
| 20 | |
| 21 def __init__(self, name, log): | |
| 22 self.name = name | |
| 23 self.log = log.replace('\r', '') | |
| 24 | |
| 25 | |
| 26 class SingleTestResult(BaseTestResult): | |
| 27 """Result information for a single test. | |
| 28 | |
| 29 Args: | |
| 30 full_name: Full name of the test. | |
| 31 start_date: Date in milliseconds when the test began running. | |
| 32 dur: Duration of the test run in milliseconds. | |
| 33 log: An optional string listing any errors. | |
| 34 """ | |
| 35 | |
| 36 def __init__(self, full_name, start_date, dur, log=''): | |
| 37 BaseTestResult.__init__(self, full_name, log) | |
| 38 name_pieces = full_name.rsplit('#') | |
| 39 if len(name_pieces) > 1: | |
| 40 self.test_name = name_pieces[1] | |
| 41 self.class_name = name_pieces[0] | |
| 42 else: | |
| 43 self.class_name = full_name | |
| 44 self.test_name = full_name | |
| 45 self.start_date = start_date | |
| 46 self.dur = dur | |
| 47 | |
| 48 | |
| 49 class TestResults(object): | |
| 50 """Results of a test run.""" | |
| 51 | |
| 52 def __init__(self): | |
| 53 self.ok = [] | |
| 54 self.failed = [] | |
| 55 self.crashed = [] | |
| 56 self.unknown = [] | |
| 57 self.timed_out = [] | |
| 58 self.device_exception = None | |
| 59 | |
| 60 @staticmethod | |
| 61 def FromRun(ok=None, failed=None, crashed=None, timed_out=None): | |
| 62 ret = TestResults() | |
| 63 ret.ok = ok or [] | |
| 64 ret.failed = failed or [] | |
| 65 ret.crashed = crashed or [] | |
| 66 ret.timed_out = timed_out or [] | |
| 67 return ret | |
| 68 | |
| 69 @staticmethod | |
| 70 def FromTestResults(results): | |
| 71 """Combines a list of results in a single TestResults object.""" | |
| 72 ret = TestResults() | |
| 73 for t in results: | |
| 74 ret.ok += t.ok | |
| 75 ret.failed += t.failed | |
| 76 ret.crashed += t.crashed | |
| 77 ret.unknown += t.unknown | |
| 78 ret.timed_out += t.timed_out | |
| 79 return ret | |
| 80 | |
| 81 @staticmethod | |
| 82 def FromPythonException(test_name, start_date_ms, exc_info): | |
| 83 """Constructs a TestResults with exception information for the given test. | |
| 84 | |
| 85 Args: | |
| 86 test_name: name of the test which raised an exception. | |
| 87 start_date_ms: the starting time for the test. | |
| 88 exc_info: exception info, ostensibly from sys.exc_info(). | |
| 89 | |
| 90 Returns: | |
| 91 A TestResults object with a SingleTestResult in the failed list. | |
| 92 """ | |
| 93 exc_type, exc_value, exc_traceback = exc_info | |
| 94 trace_info = ''.join(traceback.format_exception(exc_type, exc_value, | |
| 95 exc_traceback)) | |
| 96 log_msg = 'Exception:\n' + trace_info | |
| 97 duration_ms = (int(time.time()) * 1000) - start_date_ms | |
| 98 | |
| 99 exc_result = SingleTestResult( | |
| 100 full_name='PythonWrapper#' + test_name, | |
| 101 start_date=start_date_ms, | |
| 102 dur=duration_ms, | |
| 103 log=(str(exc_type) + ' ' + log_msg)) | |
| 104 | |
| 105 results = TestResults() | |
| 106 results.failed.append(exc_result) | |
| 107 return results | |
| 108 | |
| 109 @staticmethod | |
| 110 def DeviceExceptions(results): | |
| 111 return set(filter(lambda t: t.device_exception, results)) | |
| 112 | |
| 113 def _Log(self, sorted_list): | |
| 114 for t in sorted_list: | |
| 115 logging.critical(t.name) | |
| 116 if t.log: | |
| 117 logging.critical(t.log) | |
| 118 | |
| 119 def GetAllBroken(self): | |
| 120 """Returns the all broken tests.""" | |
| 121 return self.failed + self.crashed + self.unknown + self.timed_out | |
| 122 | |
| 123 def GetAll(self): | |
| 124 """Returns the all tests.""" | |
| 125 return self.ok + self.GetAllBroken() | |
| 126 | |
| 127 def _LogToFile(self, test_type, test_suite, build_type): | |
| 128 """Log results to local files which can be used for aggregation later.""" | |
| 129 # TODO(frankf): Report tests that failed to run here too. | |
| 130 log_file_path = os.path.join(constants.CHROME_DIR, 'out', | |
| 131 build_type, 'test_logs') | |
| 132 if not os.path.exists(log_file_path): | |
| 133 os.mkdir(log_file_path) | |
| 134 full_file_name = os.path.join( | |
| 135 log_file_path, re.sub('\W', '_', test_type).lower() + '.log') | |
| 136 if not os.path.exists(full_file_name): | |
| 137 with open(full_file_name, 'w') as log_file: | |
| 138 print >> log_file, '\n%s results for %s build %s:' % ( | |
| 139 test_type, os.environ.get('BUILDBOT_BUILDERNAME'), | |
| 140 os.environ.get('BUILDBOT_BUILDNUMBER')) | |
| 141 logging.info('Writing results to %s.' % full_file_name) | |
| 142 log_contents = [' %s result : %d tests ran' % (test_suite, | |
| 143 len(self.ok) + | |
| 144 len(self.failed) + | |
| 145 len(self.crashed) + | |
| 146 len(self.timed_out) + | |
| 147 len(self.unknown))] | |
| 148 content_pairs = [('passed', len(self.ok)), | |
| 149 ('failed', len(self.failed)), | |
| 150 ('crashed', len(self.crashed)), | |
| 151 ('timed_out', len(self.timed_out)), | |
| 152 ('unknown', len(self.unknown))] | |
| 153 for (result, count) in content_pairs: | |
| 154 if count: | |
| 155 log_contents.append(', %d tests %s' % (count, result)) | |
| 156 with open(full_file_name, 'a') as log_file: | |
| 157 print >> log_file, ''.join(log_contents) | |
| 158 logging.info('Writing results to %s.' % full_file_name) | |
| 159 content = {'test_group': test_type, | |
| 160 'ok': [t.name for t in self.ok], | |
| 161 'failed': [t.name for t in self.failed], | |
| 162 'crashed': [t.name for t in self.failed], | |
| 163 'timed_out': [t.name for t in self.timed_out], | |
| 164 'unknown': [t.name for t in self.unknown],} | |
| 165 json_file_path = os.path.join(log_file_path, 'results.json') | |
| 166 with open(json_file_path, 'a') as json_file: | |
| 167 print >> json_file, json.dumps(content) | |
| 168 logging.info('Writing results to %s.' % json_file_path) | |
| 169 | |
| 170 def _LogToFlakinessDashboard(self, test_type, test_package, flakiness_server): | |
| 171 """Upload results to the flakiness dashboard""" | |
| 172 logging.info('Upload results for test type "%s", test package "%s" to %s' % | |
| 173 (test_type, test_package, flakiness_server)) | |
| 174 | |
| 175 # TODO(frankf): Enable uploading for gtests. | |
| 176 if test_type != 'Instrumentation': | |
| 177 logging.warning('Invalid test type.') | |
| 178 return | |
| 179 | |
| 180 try: | |
| 181 if flakiness_server == constants.UPSTREAM_FLAKINESS_SERVER: | |
| 182 assert test_package in ['ContentShellTest', | |
| 183 'ChromiumTestShellTest', | |
| 184 'AndroidWebViewTest'] | |
| 185 dashboard_test_type = ('%s_instrumentation_tests' % | |
| 186 test_package.lower().rstrip('test')) | |
| 187 # Downstream server. | |
| 188 else: | |
| 189 dashboard_test_type = 'Chromium_Android_Instrumentation' | |
| 190 | |
| 191 flakiness_dashboard_results_uploader.Upload( | |
| 192 flakiness_server, dashboard_test_type, self) | |
| 193 except Exception as e: | |
| 194 logging.error(e) | |
| 195 | |
| 196 def LogFull(self, test_type, test_package, annotation=None, | |
| 197 build_type='Debug', flakiness_server=None): | |
| 198 """Log the tests results for the test suite. | |
| 199 | |
| 200 The results will be logged three different ways: | |
| 201 1. Log to stdout. | |
| 202 2. Log to local files for aggregating multiple test steps | |
| 203 (on buildbots only). | |
| 204 3. Log to flakiness dashboard (on buildbots only). | |
| 205 | |
| 206 Args: | |
| 207 test_type: Type of the test (e.g. 'Instrumentation', 'Unit test', etc.). | |
| 208 test_package: Test package name (e.g. 'ipc_tests' for gtests, | |
| 209 'ContentShellTest' for instrumentation tests) | |
| 210 annotation: If instrumenation test type, this is a list of annotations | |
| 211 (e.g. ['Smoke', 'SmallTest']). | |
| 212 build_type: Release/Debug | |
| 213 flakiness_server: If provider, upload the results to flakiness dashboard | |
| 214 with this URL. | |
| 215 """ | |
| 216 # Output all broken tests or 'passed' if none broken. | |
| 217 logging.critical('*' * 80) | |
| 218 logging.critical('Final result:') | |
| 219 if self.failed: | |
| 220 logging.critical('Failed:') | |
| 221 self._Log(sorted(self.failed)) | |
| 222 if self.crashed: | |
| 223 logging.critical('Crashed:') | |
| 224 self._Log(sorted(self.crashed)) | |
| 225 if self.timed_out: | |
| 226 logging.critical('Timed out:') | |
| 227 self._Log(sorted(self.timed_out)) | |
| 228 if self.unknown: | |
| 229 logging.critical('Unknown:') | |
| 230 self._Log(sorted(self.unknown)) | |
| 231 if not self.GetAllBroken(): | |
| 232 logging.critical('Passed') | |
| 233 | |
| 234 # Summarize in the test output. | |
| 235 num_tests_ran = len(self.GetAll()) | |
| 236 tests_passed = [t.name for t in self.ok] | |
| 237 tests_failed = [t.name for t in self.failed] | |
| 238 tests_crashed = [t.name for t in self.crashed] | |
| 239 tests_timed_out = [t.name for t in self.timed_out] | |
| 240 tests_unknown = [t.name for t in self.unknown] | |
| 241 logging.critical('*' * 80) | |
| 242 summary = ['Summary:\n'] | |
| 243 summary += ['RAN=%d\n' % (num_tests_ran), | |
| 244 'PASSED=%d\n' % len(tests_passed), | |
| 245 'FAILED=%d %s\n' % (len(tests_failed), tests_failed), | |
| 246 'CRASHED=%d %s\n' % (len(tests_crashed), tests_crashed), | |
| 247 'TIMEDOUT=%d %s\n' % (len(tests_timed_out), tests_timed_out), | |
| 248 'UNKNOWN=%d %s\n' % (len(tests_unknown), tests_unknown)] | |
| 249 summary_string = ''.join(summary) | |
| 250 logging.critical(summary_string) | |
| 251 logging.critical('*' * 80) | |
| 252 | |
| 253 if os.environ.get('BUILDBOT_BUILDERNAME'): | |
| 254 # It is possible to have multiple buildbot steps for the same | |
| 255 # instrumenation test package using different annotations. | |
| 256 if annotation and len(annotation) == 1: | |
| 257 test_suite = annotation[0] | |
| 258 else: | |
| 259 test_suite = test_package | |
| 260 self._LogToFile(test_type, test_suite, build_type) | |
| 261 | |
| 262 if flakiness_server: | |
| 263 self._LogToFlakinessDashboard(test_type, test_package, flakiness_server) | |
| 264 | |
| 265 def PrintAnnotation(self): | |
| 266 """Print buildbot annotations for test results.""" | |
| 267 if self.GetAllBroken(): | |
| 268 buildbot_report.PrintError() | |
| 269 else: | |
| 270 print 'Step success!' # No annotation needed | |
| OLD | NEW |