Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(26)

Side by Side Diff: build/android/pylib/test_result.py

Issue 11616010: Refactor android test results logging. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 8 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 4
5 5
6 import json 6 import json
7 import logging 7 import logging
8 import os 8 import os
9 import time 9 import time
10 import traceback 10 import traceback
11 11
12 import buildbot_report 12 import buildbot_report
13 import constants 13 import constants
14 import flakiness_dashboard_results_uploader
14 15
15 16
16 class BaseTestResult(object): 17 class BaseTestResult(object):
17 """A single result from a unit test.""" 18 """A single result from a unit test."""
18 19
19 def __init__(self, name, log): 20 def __init__(self, name, log):
20 self.name = name 21 self.name = name
21 self.log = log.replace('\r', '') 22 self.log = log.replace('\r', '')
22 23
23 24
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after
118 def _Log(self, sorted_list): 119 def _Log(self, sorted_list):
119 for t in sorted_list: 120 for t in sorted_list:
120 logging.critical(t.name) 121 logging.critical(t.name)
121 if t.log: 122 if t.log:
122 logging.critical(t.log) 123 logging.critical(t.log)
123 124
124 def GetAllBroken(self): 125 def GetAllBroken(self):
125 """Returns the all broken tests including failed, crashed, unknown.""" 126 """Returns the all broken tests including failed, crashed, unknown."""
126 return self.failed + self.crashed + self.unknown 127 return self.failed + self.crashed + self.unknown
127 128
128 def LogFull(self, test_group, test_suite, build_type, tests_to_run): 129 def _LogToFile(self, test_type, test_suite, build_type):
129 """Output broken test logs, summarize in a log file and the test output.""" 130 """Log results to local files which can be used for aggregation later."""
131 # TODO(frankf): Report tests that failed to run here too.
132 log_file_path = os.path.join(constants.CHROME_DIR, 'out',
133 build_type, 'test_logs')
134 if not os.path.exists(log_file_path):
135 os.mkdir(log_file_path)
craigdh 2012/12/18 17:41:59 Consider os.makedirs if you want this to always su
frankf 2012/12/18 21:33:50 Yes, it needs to exists. Even on the bots where we
136 full_file_name = os.path.join(log_file_path, test_type)
137 if not os.path.exists(full_file_name):
138 with open(full_file_name, 'w') as log_file:
139 print >> log_file, '\n%s results for %s build %s:' % (
140 test_type, os.environ.get('BUILDBOT_BUILDERNAME'),
141 os.environ.get('BUILDBOT_BUILDNUMBER'))
142 logging.info('Writing results to %s.' % full_file_name)
143 log_contents = [' %s result : %d tests ran' % (test_suite,
144 len(self.ok) +
145 len(self.failed) +
146 len(self.crashed) +
147 len(self.unknown))]
148 content_pairs = [('passed', len(self.ok)), ('failed', len(self.failed)),
149 ('crashed', len(self.crashed))]
150 for (result, count) in content_pairs:
151 if count:
152 log_contents.append(', %d tests %s' % (count, result))
153 with open(full_file_name, 'a') as log_file:
154 print >> log_file, ''.join(log_contents)
155 logging.info('Writing results to %s.' % full_file_name)
156 content = {'test_group': test_type,
157 'ok': [t.name for t in self.ok],
158 'failed': [t.name for t in self.failed],
159 'crashed': [t.name for t in self.failed],
160 'unknown': [t.name for t in self.unknown],}
161 json_file_path = os.path.join(log_file_path, 'results.json')
162 with open(json_file_path, 'a') as json_file:
163 print >> json_file, json.dumps(content)
164 logging.info('Writing results to %s.' % json_file_path)
165
166 def _LogToFlakinessDashboard(self, test_type, test_package, flakiness_server):
167 """Upload results to the flakiness dashboard"""
168 # TODO(frankf): Fix upstream/downstream reporting for both test types.
169 logging.info('Upload %s %s to %s' % (test_type, test_package,
170 flakiness_server))
171 flakiness_dashboard_results_uploader.Upload(
172 flakiness_server, 'Chromium_Android_Instrumentation', self)
173
174 def LogFull(self, test_type, test_package, annotation=None,
175 build_type='Debug', all_tests=None, flakiness_server=None):
176 """Log the tests results for the test suite.
177
178 The results will be logged three different ways:
179 1. Log to stdout.
180 2. Log to local files for aggregating multiple test steps
181 (on buildbots only).
182 3. Log to flakiness dashboard (on buildbots only).
183
184 Args:
185 test_type: Type of the test (e.g. 'Instrumentation', 'Unit test', etc.).
186 test_package: Test package name (e.g. 'ipc_tests' for gtests,
187 'ContentShellTest' for instrumentation tests)
188 annotation: If instrumenation test type, this is a list of annotations
189 (e.g. ['Smoke', 'SmallTest']).
190 build_type: Release/Debug
191 all_test: A list of all tests in this test suite. This is used to
nilesh 2012/12/18 18:08:31 s/all_test/all_tests The comment is not true. all
frankf 2012/12/18 21:33:50 Reworded the description. all_tests is the final f
192 determine which tests have unknown results.
193 flakiness_server: Flakiness dashboard server URL.
194 """
130 # Output all broken tests or 'passed' if none broken. 195 # Output all broken tests or 'passed' if none broken.
131 logging.critical('*' * 80) 196 logging.critical('*' * 80)
132 logging.critical('Final result') 197 logging.critical('Final result:')
133 if self.failed: 198 if self.failed:
134 logging.critical('Failed:') 199 logging.critical('Failed:')
135 self._Log(sorted(self.failed)) 200 self._Log(sorted(self.failed))
136 if self.crashed: 201 if self.crashed:
137 logging.critical('Crashed:') 202 logging.critical('Crashed:')
138 self._Log(sorted(self.crashed)) 203 self._Log(sorted(self.crashed))
139 if self.unknown: 204 if self.unknown:
140 logging.critical('Unknown:') 205 logging.critical('Unknown:')
141 self._Log(sorted(self.unknown)) 206 self._Log(sorted(self.unknown))
142 if not self.GetAllBroken(): 207 if not self.GetAllBroken():
143 logging.critical('Passed') 208 logging.critical('Passed')
144 logging.critical('*' * 80)
145
146 # Summarize in a log file, if tests are running on bots.
147 if test_group and test_suite and os.environ.get('BUILDBOT_BUILDERNAME'):
148 log_file_path = os.path.join(constants.CHROME_DIR, 'out',
149 build_type, 'test_logs')
150 if not os.path.exists(log_file_path):
151 os.mkdir(log_file_path)
152 full_file_name = os.path.join(log_file_path, test_group)
153 if not os.path.exists(full_file_name):
154 with open(full_file_name, 'w') as log_file:
155 print >> log_file, '\n%s results for %s build %s:' % (
156 test_group, os.environ.get('BUILDBOT_BUILDERNAME'),
157 os.environ.get('BUILDBOT_BUILDNUMBER'))
158 log_contents = [' %s result : %d tests ran' % (test_suite,
159 len(self.ok) +
160 len(self.failed) +
161 len(self.crashed) +
162 len(self.unknown))]
163 content_pairs = [('passed', len(self.ok)), ('failed', len(self.failed)),
164 ('crashed', len(self.crashed))]
165 for (result, count) in content_pairs:
166 if count:
167 log_contents.append(', %d tests %s' % (count, result))
168 with open(full_file_name, 'a') as log_file:
169 print >> log_file, ''.join(log_contents)
170 content = {'test_group': test_group,
171 'ok': [t.name for t in self.ok],
172 'failed': [t.name for t in self.failed],
173 'crashed': [t.name for t in self.failed],
174 'unknown': [t.name for t in self.unknown],}
175 with open(os.path.join(log_file_path, 'results.json'), 'a') as json_file:
176 print >> json_file, json.dumps(content)
177 209
178 # Summarize in the test output. 210 # Summarize in the test output.
211 logging.critical('*' * 80)
179 summary = ['Summary:\n'] 212 summary = ['Summary:\n']
180 if tests_to_run: 213 if all_tests:
181 summary += ['TESTS_TO_RUN=%d\n' % (len(tests_to_run))] 214 summary += ['TESTS_TO_RUN=%d\n' % (len(all_tests))]
craigdh 2012/12/18 17:41:59 nit: no need for the () around len()
frankf 2012/12/18 21:33:50 Done.
182 num_tests_ran = (len(self.ok) + len(self.failed) + 215 num_tests_ran = (len(self.ok) + len(self.failed) +
183 len(self.crashed) + len(self.unknown)) 216 len(self.crashed) + len(self.unknown))
184 tests_passed = [t.name for t in self.ok] 217 tests_passed = [t.name for t in self.ok]
185 tests_failed = [t.name for t in self.failed] 218 tests_failed = [t.name for t in self.failed]
186 tests_crashed = [t.name for t in self.crashed] 219 tests_crashed = [t.name for t in self.crashed]
187 tests_unknown = [t.name for t in self.unknown] 220 tests_unknown = [t.name for t in self.unknown]
188 summary += ['RAN=%d\n' % (num_tests_ran), 221 summary += ['RAN=%d\n' % (num_tests_ran),
189 'PASSED=%d\n' % len(tests_passed), 222 'PASSED=%d\n' % len(tests_passed),
190 'FAILED=%d %s\n' % (len(tests_failed), tests_failed), 223 'FAILED=%d %s\n' % (len(tests_failed), tests_failed),
191 'CRASHED=%d %s\n' % (len(tests_crashed), tests_crashed), 224 'CRASHED=%d %s\n' % (len(tests_crashed), tests_crashed),
192 'UNKNOWN=%d %s\n' % (len(tests_unknown), tests_unknown)] 225 'UNKNOWN=%d %s\n' % (len(tests_unknown), tests_unknown)]
193 if tests_to_run and num_tests_ran != len(tests_to_run): 226 if all_tests and num_tests_ran != len(all_tests):
194 # Add the list of tests we failed to run. 227 # Add the list of tests we failed to run.
195 tests_failed_to_run = list(set(tests_to_run) - set(tests_passed) - 228 tests_failed_to_run = list(set(all_tests) - set(tests_passed) -
196 set(tests_failed) - set(tests_crashed) - 229 set(tests_failed) - set(tests_crashed) -
197 set(tests_unknown)) 230 set(tests_unknown))
198 summary += ['FAILED_TO_RUN=%d %s\n' % (len(tests_failed_to_run), 231 summary += ['FAILED_TO_RUN=%d %s\n' % (len(tests_failed_to_run),
199 tests_failed_to_run)] 232 tests_failed_to_run)]
200 summary_string = ''.join(summary) 233 summary_string = ''.join(summary)
201 logging.critical(summary_string) 234 logging.critical(summary_string)
202 return summary_string 235 logging.critical('*' * 80)
236
237 if os.environ.get('BUILDBOT_BUILDERNAME'):
238 # It is possible to have multiple buildbot steps for the same
239 # instrumenation test package using different annotations.
240 if annotation and len(annotation) == 1:
241 test_suite = annotation[0]
242 else:
243 test_suite = test_package
244 self._LogToFile(test_type, test_suite, build_type)
245
246 if flakiness_server:
247 self._LogToFlakinessDashboard(test_type, test_package, flakiness_server)
203 248
204 def PrintAnnotation(self): 249 def PrintAnnotation(self):
205 """Print buildbot annotations for test results.""" 250 """Print buildbot annotations for test results."""
206 if self.failed or self.crashed or self.overall_fail or self.timed_out: 251 if self.failed or self.crashed or self.overall_fail or self.timed_out:
207 buildbot_report.PrintError() 252 buildbot_report.PrintError()
208 else: 253 else:
209 print 'Step success!' # No annotation needed 254 print 'Step success!' # No annotation needed
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698