| OLD | NEW |
| 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 | 4 |
| 5 | 5 |
| 6 import json | 6 import json |
| 7 import logging | 7 import logging |
| 8 import os | 8 import os |
| 9 import re | 9 import re |
| 10 import time | 10 import time |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 48 | 48 |
| 49 class TestResults(object): | 49 class TestResults(object): |
| 50 """Results of a test run.""" | 50 """Results of a test run.""" |
| 51 | 51 |
| 52 def __init__(self): | 52 def __init__(self): |
| 53 self.ok = [] | 53 self.ok = [] |
| 54 self.failed = [] | 54 self.failed = [] |
| 55 self.crashed = [] | 55 self.crashed = [] |
| 56 self.unknown = [] | 56 self.unknown = [] |
| 57 self.timed_out = [] | 57 self.timed_out = [] |
| 58 self.overall_timed_out = False |
| 59 self.overall_fail = False |
| 58 self.device_exception = None | 60 self.device_exception = None |
| 59 | 61 |
| 60 @staticmethod | 62 @staticmethod |
| 61 def FromRun(ok=None, failed=None, crashed=None, timed_out=None): | 63 def FromRun(ok=None, failed=None, crashed=None, timed_out=None, |
| 64 overall_timed_out=False, overall_fail=False, |
| 65 device_exception=None): |
| 62 ret = TestResults() | 66 ret = TestResults() |
| 63 ret.ok = ok or [] | 67 ret.ok = ok or [] |
| 64 ret.failed = failed or [] | 68 ret.failed = failed or [] |
| 65 ret.crashed = crashed or [] | 69 ret.crashed = crashed or [] |
| 66 ret.timed_out = timed_out or [] | 70 ret.timed_out = timed_out or [] |
| 71 ret.overall_timed_out = overall_timed_out |
| 72 ret.overall_fail = overall_fail |
| 73 ret.device_exception = device_exception |
| 67 return ret | 74 return ret |
| 68 | 75 |
| 69 @staticmethod | 76 @staticmethod |
| 70 def FromTestResults(results): | 77 def FromTestResults(results): |
| 71 """Combines a list of results in a single TestResults object.""" | 78 """Combines a list of results in a single TestResults object.""" |
| 72 ret = TestResults() | 79 ret = TestResults() |
| 73 for t in results: | 80 for t in results: |
| 74 ret.ok += t.ok | 81 ret.ok += t.ok |
| 75 ret.failed += t.failed | 82 ret.failed += t.failed |
| 76 ret.crashed += t.crashed | 83 ret.crashed += t.crashed |
| 77 ret.unknown += t.unknown | 84 ret.unknown += t.unknown |
| 78 ret.timed_out += t.timed_out | 85 ret.timed_out += t.timed_out |
| 86 if t.overall_timed_out: |
| 87 ret.overall_timed_out = True |
| 88 if t.overall_fail: |
| 89 ret.overall_fail = True |
| 79 return ret | 90 return ret |
| 80 | 91 |
| 81 @staticmethod | 92 @staticmethod |
| 82 def FromPythonException(test_name, start_date_ms, exc_info): | 93 def FromPythonException(test_name, start_date_ms, exc_info): |
| 83 """Constructs a TestResults with exception information for the given test. | 94 """Constructs a TestResults with exception information for the given test. |
| 84 | 95 |
| 85 Args: | 96 Args: |
| 86 test_name: name of the test which raised an exception. | 97 test_name: name of the test which raised an exception. |
| 87 start_date_ms: the starting time for the test. | 98 start_date_ms: the starting time for the test. |
| 88 exc_info: exception info, ostensibly from sys.exc_info(). | 99 exc_info: exception info, ostensibly from sys.exc_info(). |
| (...skipping 21 matching lines...) Expand all Loading... |
| 110 def DeviceExceptions(results): | 121 def DeviceExceptions(results): |
| 111 return set(filter(lambda t: t.device_exception, results)) | 122 return set(filter(lambda t: t.device_exception, results)) |
| 112 | 123 |
| 113 def _Log(self, sorted_list): | 124 def _Log(self, sorted_list): |
| 114 for t in sorted_list: | 125 for t in sorted_list: |
| 115 logging.critical(t.name) | 126 logging.critical(t.name) |
| 116 if t.log: | 127 if t.log: |
| 117 logging.critical(t.log) | 128 logging.critical(t.log) |
| 118 | 129 |
| 119 def GetAllBroken(self): | 130 def GetAllBroken(self): |
| 120 """Returns all the broken tests.""" | 131 """Returns the all broken tests.""" |
| 121 return self.failed + self.crashed + self.unknown + self.timed_out | 132 return self.failed + self.crashed + self.unknown + self.timed_out |
| 122 | 133 |
| 123 def GetAll(self): | |
| 124 """Returns all the tests.""" | |
| 125 return self.ok + self.GetAllBroken() | |
| 126 | |
| 127 def _LogToFile(self, test_type, test_suite, build_type): | 134 def _LogToFile(self, test_type, test_suite, build_type): |
| 128 """Log results to local files which can be used for aggregation later.""" | 135 """Log results to local files which can be used for aggregation later.""" |
| 129 # TODO(frankf): Report tests that failed to run here too. | 136 # TODO(frankf): Report tests that failed to run here too. |
| 130 log_file_path = os.path.join(constants.CHROME_DIR, 'out', | 137 log_file_path = os.path.join(constants.CHROME_DIR, 'out', |
| 131 build_type, 'test_logs') | 138 build_type, 'test_logs') |
| 132 if not os.path.exists(log_file_path): | 139 if not os.path.exists(log_file_path): |
| 133 os.mkdir(log_file_path) | 140 os.mkdir(log_file_path) |
| 134 full_file_name = os.path.join( | 141 full_file_name = os.path.join( |
| 135 log_file_path, re.sub('\W', '_', test_type).lower() + '.log') | 142 log_file_path, re.sub('\W', '_', test_type).lower() + '.log') |
| 136 if not os.path.exists(full_file_name): | 143 if not os.path.exists(full_file_name): |
| (...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 187 # Downstream server. | 194 # Downstream server. |
| 188 else: | 195 else: |
| 189 dashboard_test_type = 'Chromium_Android_Instrumentation' | 196 dashboard_test_type = 'Chromium_Android_Instrumentation' |
| 190 | 197 |
| 191 flakiness_dashboard_results_uploader.Upload( | 198 flakiness_dashboard_results_uploader.Upload( |
| 192 flakiness_server, dashboard_test_type, self) | 199 flakiness_server, dashboard_test_type, self) |
| 193 except Exception as e: | 200 except Exception as e: |
| 194 logging.error(e) | 201 logging.error(e) |
| 195 | 202 |
| 196 def LogFull(self, test_type, test_package, annotation=None, | 203 def LogFull(self, test_type, test_package, annotation=None, |
| 197 build_type='Debug', flakiness_server=None): | 204 build_type='Debug', all_tests=None, flakiness_server=None): |
| 198 """Log the tests results for the test suite. | 205 """Log the tests results for the test suite. |
| 199 | 206 |
| 200 The results will be logged three different ways: | 207 The results will be logged three different ways: |
| 201 1. Log to stdout. | 208 1. Log to stdout. |
| 202 2. Log to local files for aggregating multiple test steps | 209 2. Log to local files for aggregating multiple test steps |
| 203 (on buildbots only). | 210 (on buildbots only). |
| 204 3. Log to flakiness dashboard (on buildbots only). | 211 3. Log to flakiness dashboard (on buildbots only). |
| 205 | 212 |
| 206 Args: | 213 Args: |
| 207 test_type: Type of the test (e.g. 'Instrumentation', 'Unit test', etc.). | 214 test_type: Type of the test (e.g. 'Instrumentation', 'Unit test', etc.). |
| 208 test_package: Test package name (e.g. 'ipc_tests' for gtests, | 215 test_package: Test package name (e.g. 'ipc_tests' for gtests, |
| 209 'ContentShellTest' for instrumentation tests) | 216 'ContentShellTest' for instrumentation tests) |
| 210 annotation: If instrumenation test type, this is a list of annotations | 217 annotation: If instrumenation test type, this is a list of annotations |
| 211 (e.g. ['Smoke', 'SmallTest']). | 218 (e.g. ['Smoke', 'SmallTest']). |
| 212 build_type: Release/Debug | 219 build_type: Release/Debug |
| 220 all_tests: A list of all tests that were supposed to run. |
| 221 This is used to determine which tests have failed to run. |
| 222 If None, we assume all tests ran. |
| 213 flakiness_server: If provider, upload the results to flakiness dashboard | 223 flakiness_server: If provider, upload the results to flakiness dashboard |
| 214 with this URL. | 224 with this URL. |
| 215 """ | 225 """ |
| 216 # Output all broken tests or 'passed' if none broken. | 226 # Output all broken tests or 'passed' if none broken. |
| 217 logging.critical('*' * 80) | 227 logging.critical('*' * 80) |
| 218 logging.critical('Final result:') | 228 logging.critical('Final result:') |
| 219 if self.failed: | 229 if self.failed: |
| 220 logging.critical('Failed:') | 230 logging.critical('Failed:') |
| 221 self._Log(sorted(self.failed)) | 231 self._Log(sorted(self.failed)) |
| 222 if self.crashed: | 232 if self.crashed: |
| 223 logging.critical('Crashed:') | 233 logging.critical('Crashed:') |
| 224 self._Log(sorted(self.crashed)) | 234 self._Log(sorted(self.crashed)) |
| 225 if self.timed_out: | 235 if self.timed_out: |
| 226 logging.critical('Timed out:') | 236 logging.critical('Timed out:') |
| 227 self._Log(sorted(self.timed_out)) | 237 self._Log(sorted(self.timed_out)) |
| 228 if self.unknown: | 238 if self.unknown: |
| 229 logging.critical('Unknown:') | 239 logging.critical('Unknown:') |
| 230 self._Log(sorted(self.unknown)) | 240 self._Log(sorted(self.unknown)) |
| 231 if not self.GetAllBroken(): | 241 if not self.GetAllBroken(): |
| 232 logging.critical('Passed') | 242 logging.critical('Passed') |
| 233 | 243 |
| 234 # Summarize in the test output. | 244 # Summarize in the test output. |
| 235 num_tests_ran = len(self.GetAll()) | 245 logging.critical('*' * 80) |
| 246 summary = ['Summary:\n'] |
| 247 if all_tests: |
| 248 summary += ['TESTS_TO_RUN=%d\n' % len(all_tests)] |
| 249 num_tests_ran = (len(self.ok) + len(self.failed) + |
| 250 len(self.crashed) + len(self.unknown) + |
| 251 len(self.timed_out)) |
| 236 tests_passed = [t.name for t in self.ok] | 252 tests_passed = [t.name for t in self.ok] |
| 237 tests_failed = [t.name for t in self.failed] | 253 tests_failed = [t.name for t in self.failed] |
| 238 tests_crashed = [t.name for t in self.crashed] | 254 tests_crashed = [t.name for t in self.crashed] |
| 255 tests_unknown = [t.name for t in self.unknown] |
| 239 tests_timed_out = [t.name for t in self.timed_out] | 256 tests_timed_out = [t.name for t in self.timed_out] |
| 240 tests_unknown = [t.name for t in self.unknown] | |
| 241 logging.critical('*' * 80) | |
| 242 summary = ['Summary:\n'] | |
| 243 summary += ['RAN=%d\n' % (num_tests_ran), | 257 summary += ['RAN=%d\n' % (num_tests_ran), |
| 244 'PASSED=%d\n' % len(tests_passed), | 258 'PASSED=%d\n' % len(tests_passed), |
| 245 'FAILED=%d %s\n' % (len(tests_failed), tests_failed), | 259 'FAILED=%d %s\n' % (len(tests_failed), tests_failed), |
| 246 'CRASHED=%d %s\n' % (len(tests_crashed), tests_crashed), | 260 'CRASHED=%d %s\n' % (len(tests_crashed), tests_crashed), |
| 247 'TIMEDOUT=%d %s\n' % (len(tests_timed_out), tests_timed_out), | 261 'TIMEDOUT=%d %s\n' % (len(tests_timed_out), tests_timed_out), |
| 248 'UNKNOWN=%d %s\n' % (len(tests_unknown), tests_unknown)] | 262 'UNKNOWN=%d %s\n' % (len(tests_unknown), tests_unknown)] |
| 263 if all_tests and num_tests_ran != len(all_tests): |
| 264 # Add the list of tests we failed to run. |
| 265 tests_failed_to_run = list(set(all_tests) - set(tests_passed) - |
| 266 set(tests_failed) - set(tests_crashed) - |
| 267 set(tests_unknown) - set(tests_timed_out)) |
| 268 summary += ['FAILED_TO_RUN=%d %s\n' % (len(tests_failed_to_run), |
| 269 tests_failed_to_run)] |
| 249 summary_string = ''.join(summary) | 270 summary_string = ''.join(summary) |
| 250 logging.critical(summary_string) | 271 logging.critical(summary_string) |
| 251 logging.critical('*' * 80) | 272 logging.critical('*' * 80) |
| 252 | 273 |
| 253 if os.environ.get('BUILDBOT_BUILDERNAME'): | 274 if os.environ.get('BUILDBOT_BUILDERNAME'): |
| 254 # It is possible to have multiple buildbot steps for the same | 275 # It is possible to have multiple buildbot steps for the same |
| 255 # instrumenation test package using different annotations. | 276 # instrumenation test package using different annotations. |
| 256 if annotation and len(annotation) == 1: | 277 if annotation and len(annotation) == 1: |
| 257 test_suite = annotation[0] | 278 test_suite = annotation[0] |
| 258 else: | 279 else: |
| 259 test_suite = test_package | 280 test_suite = test_package |
| 260 self._LogToFile(test_type, test_suite, build_type) | 281 self._LogToFile(test_type, test_suite, build_type) |
| 261 | 282 |
| 262 if flakiness_server: | 283 if flakiness_server: |
| 263 self._LogToFlakinessDashboard(test_type, test_package, flakiness_server) | 284 self._LogToFlakinessDashboard(test_type, test_package, flakiness_server) |
| 264 | 285 |
| 265 def PrintAnnotation(self): | 286 def PrintAnnotation(self): |
| 266 """Print buildbot annotations for test results.""" | 287 """Print buildbot annotations for test results.""" |
| 267 if self.GetAllBroken(): | 288 if (self.failed or self.crashed or self.overall_fail or |
| 289 self.overall_timed_out): |
| 268 buildbot_report.PrintError() | 290 buildbot_report.PrintError() |
| 269 else: | 291 else: |
| 270 print 'Step success!' # No annotation needed | 292 print 'Step success!' # No annotation needed |
| OLD | NEW |