OLD | NEW |
| (Empty) |
1 #!/usr/bin/python | |
2 # Copyright (c) 2010 The Chromium OS Authors. All rights reserved. | |
3 # Use of this source code is governed by a BSD-style license that can be | |
4 # found in the LICENSE file. | |
5 | |
6 | |
7 """Parses and displays the contents of one or more autoserv result directories. | |
8 | |
9 This script parses the contents of one or more autoserv results folders and | |
10 generates test reports. | |
11 """ | |
12 | |
13 | |
14 import glob | |
15 import optparse | |
16 import os | |
17 import re | |
18 import sys | |
19 | |
20 sys.path.append(os.path.join(os.path.dirname(__file__), 'lib')) | |
21 from cros_build_lib import Color, Die | |
22 | |
23 _STDOUT_IS_TTY = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty() | |
24 | |
25 # List of crashes which are okay to ignore. This list should almost always be | |
26 # empty. If you add an entry, mark it with a TODO(<your name>) and the issue | |
27 # filed for the crash. | |
28 _CRASH_WHITELIST = {} | |
29 | |
30 class ReportGenerator(object): | |
31 """Collects and displays data from autoserv results directories. | |
32 | |
33 This class collects status and performance data from one or more autoserv | |
34 result directories and generates test reports. | |
35 """ | |
36 | |
37 _KEYVAL_INDENT = 2 | |
38 | |
39 def __init__(self, options, args): | |
40 self._options = options | |
41 self._args = args | |
42 self._color = Color(options.color) | |
43 | |
44 def _CollectPerf(self, testdir): | |
45 """Parses keyval file under testdir. | |
46 | |
47 If testdir contains a result folder, process the keyval file and return | |
48 a dictionary of perf keyval pairs. | |
49 | |
50 Args: | |
51 testdir: The autoserv test result directory. | |
52 | |
53 Returns: | |
54 If the perf option is disabled or the there's no keyval file under | |
55 testdir, returns an empty dictionary. Otherwise, returns a dictionary of | |
56 parsed keyvals. Duplicate keys are uniquified by their instance number. | |
57 """ | |
58 | |
59 perf = {} | |
60 if not self._options.perf: | |
61 return perf | |
62 | |
63 keyval_file = os.path.join(testdir, 'results', 'keyval') | |
64 if not os.path.isfile(keyval_file): | |
65 return perf | |
66 | |
67 instances = {} | |
68 | |
69 for line in open(keyval_file): | |
70 match = re.search(r'^(.+){perf}=(.+)$', line) | |
71 if match: | |
72 key = match.group(1) | |
73 val = match.group(2) | |
74 | |
75 # If the same key name was generated multiple times, uniquify all | |
76 # instances other than the first one by adding the instance count | |
77 # to the key name. | |
78 key_inst = key | |
79 instance = instances.get(key, 0) | |
80 if instance: | |
81 key_inst = '%s{%d}' % (key, instance) | |
82 instances[key] = instance + 1 | |
83 | |
84 perf[key_inst] = val | |
85 | |
86 return perf | |
87 | |
88 def _CollectResult(self, testdir): | |
89 """Adds results stored under testdir to the self._results dictionary. | |
90 | |
91 If testdir contains 'status.log' or 'status' files, assume it's a test | |
92 result directory and add the results data to the self._results dictionary. | |
93 The test directory name is used as a key into the results dictionary. | |
94 | |
95 Args: | |
96 testdir: The autoserv test result directory. | |
97 """ | |
98 | |
99 status_file = os.path.join(testdir, 'status.log') | |
100 if not os.path.isfile(status_file): | |
101 status_file = os.path.join(testdir, 'status') | |
102 if not os.path.isfile(status_file): | |
103 return | |
104 | |
105 # Remove false positives that are missing a debug dir. | |
106 if not os.path.exists(os.path.join(testdir, 'debug')): | |
107 return | |
108 | |
109 status_raw = open(status_file, 'r').read() | |
110 status = 'FAIL' | |
111 if (re.search(r'GOOD.+completed successfully', status_raw) and | |
112 not re.search(r'ABORT|ERROR|FAIL|TEST_NA', status_raw)): | |
113 status = 'PASS' | |
114 | |
115 perf = self._CollectPerf(testdir) | |
116 | |
117 if testdir.startswith(self._options.strip): | |
118 testdir = testdir.replace(self._options.strip, '', 1) | |
119 | |
120 crashes = [] | |
121 regex = re.compile('Received crash notification for ([-\w]+).+ (sig \d+)') | |
122 for match in regex.finditer(status_raw): | |
123 if (match.group(1) in _CRASH_WHITELIST and | |
124 match.group(2) in _CRASH_WHITELIST[match.group(1)]): | |
125 continue | |
126 crashes.append('%s %s' % match.groups()) | |
127 | |
128 self._results[testdir] = {'crashes': crashes, | |
129 'status': status, | |
130 'perf': perf} | |
131 | |
132 def _CollectResultsRec(self, resdir): | |
133 """Recursively collect results into the self._results dictionary. | |
134 | |
135 Args: | |
136 resdir: results/test directory to parse results from and recurse into. | |
137 """ | |
138 | |
139 self._CollectResult(resdir) | |
140 for testdir in glob.glob(os.path.join(resdir, '*')): | |
141 self._CollectResultsRec(testdir) | |
142 | |
143 def _CollectResults(self): | |
144 """Parses results into the self._results dictionary. | |
145 | |
146 Initializes a dictionary (self._results) with test folders as keys and | |
147 result data (status, perf keyvals) as values. | |
148 """ | |
149 self._results = {} | |
150 for resdir in self._args: | |
151 if not os.path.isdir(resdir): | |
152 Die('\'%s\' does not exist' % resdir) | |
153 self._CollectResultsRec(resdir) | |
154 | |
155 if not self._results: | |
156 Die('no test directories found') | |
157 | |
158 def GetTestColumnWidth(self): | |
159 """Returns the test column width based on the test data. | |
160 | |
161 Aligns the test results by formatting the test directory entry based on | |
162 the longest test directory or perf key string stored in the self._results | |
163 dictionary. | |
164 | |
165 Returns: | |
166 The width for the test columnt. | |
167 """ | |
168 width = len(max(self._results, key=len)) | |
169 for result in self._results.values(): | |
170 perf = result['perf'] | |
171 if perf: | |
172 perf_key_width = len(max(perf, key=len)) | |
173 width = max(width, perf_key_width + self._KEYVAL_INDENT) | |
174 return width + 1 | |
175 | |
176 def _GenerateReportText(self): | |
177 """Prints a result report to stdout. | |
178 | |
179 Prints a result table to stdout. Each row of the table contains the test | |
180 result directory and the test result (PASS, FAIL). If the perf option is | |
181 enabled, each test entry is followed by perf keyval entries from the test | |
182 results. | |
183 """ | |
184 tests = self._results.keys() | |
185 tests.sort() | |
186 | |
187 tests_with_errors = [] | |
188 | |
189 width = self.GetTestColumnWidth() | |
190 line = ''.ljust(width + 5, '-') | |
191 | |
192 crashes = {} | |
193 tests_pass = 0 | |
194 print line | |
195 for test in tests: | |
196 # Emit the test/status entry first | |
197 test_entry = test.ljust(width) | |
198 result = self._results[test] | |
199 status_entry = result['status'] | |
200 if status_entry == 'PASS': | |
201 color = Color.GREEN | |
202 tests_pass += 1 | |
203 else: | |
204 color = Color.RED | |
205 tests_with_errors.append(test) | |
206 | |
207 status_entry = self._color.Color(color, status_entry) | |
208 print test_entry + status_entry | |
209 | |
210 # Emit the perf keyvals entries. There will be no entries if the | |
211 # --no-perf option is specified. | |
212 perf = result['perf'] | |
213 perf_keys = perf.keys() | |
214 perf_keys.sort() | |
215 | |
216 for perf_key in perf_keys: | |
217 perf_key_entry = perf_key.ljust(width - self._KEYVAL_INDENT) | |
218 perf_key_entry = perf_key_entry.rjust(width) | |
219 perf_value_entry = self._color.Color(Color.BOLD, perf[perf_key]) | |
220 print perf_key_entry + perf_value_entry | |
221 | |
222 # Ignore top-level entry, since it's just a combination of all the | |
223 # individual results. | |
224 if result['crashes'] and test != tests[0]: | |
225 for crash in result['crashes']: | |
226 if not crash in crashes: | |
227 crashes[crash] = set([]) | |
228 crashes[crash].add(test) | |
229 | |
230 print line | |
231 | |
232 total_tests = len(tests) | |
233 percent_pass = 100 * tests_pass / total_tests | |
234 pass_str = '%d/%d (%d%%)' % (tests_pass, total_tests, percent_pass) | |
235 print 'Total PASS: ' + self._color.Color(Color.BOLD, pass_str) | |
236 | |
237 if self._options.crash_detection: | |
238 print '' | |
239 if crashes: | |
240 print self._color.Color(Color.RED, 'Crashes detected during testing:') | |
241 print line | |
242 | |
243 for crash_name, crashed_tests in sorted(crashes.iteritems()): | |
244 print self._color.Color(Color.RED, crash_name) | |
245 for crashed_test in crashed_tests: | |
246 print ' '*self._KEYVAL_INDENT + crashed_test | |
247 | |
248 print line | |
249 print 'Total unique crashes: ' + self._color.Color(Color.BOLD, | |
250 str(len(crashes))) | |
251 else: | |
252 print self._color.Color(Color.GREEN, | |
253 'No crashes detected during testing.') | |
254 | |
255 # Print out error log for failed tests. | |
256 if self._options.print_debug: | |
257 for test in tests_with_errors: | |
258 debug_file_regex = os.path.join(self._options.strip, test, 'debug', | |
259 '%s*.ERROR' % os.path.basename(test)) | |
260 for path in glob.glob(debug_file_regex): | |
261 try: | |
262 fh = open(path) | |
263 print >> sys.stderr, ( | |
264 '\n========== ERROR FILE %s FOR TEST %s ==============\n' % ( | |
265 path, test)) | |
266 out = fh.read() | |
267 while out: | |
268 print >> sys.stderr, out | |
269 out = fh.read() | |
270 print >> sys.stderr, ( | |
271 '\n=========== END ERROR FILE %s FOR TEST %s ===========\n' % ( | |
272 path, test)) | |
273 fh.close() | |
274 except: | |
275 print 'Could not open %s' % path | |
276 | |
277 # Sometimes the builders exit before these buffers are flushed. | |
278 sys.stderr.flush() | |
279 sys.stdout.flush() | |
280 | |
281 def Run(self): | |
282 """Runs report generation.""" | |
283 self._CollectResults() | |
284 self._GenerateReportText() | |
285 for v in self._results.itervalues(): | |
286 if v['status'] != 'PASS' or (self._options.crash_detection | |
287 and v['crashes']): | |
288 sys.exit(1) | |
289 | |
290 | |
291 def main(): | |
292 usage = 'Usage: %prog [options] result-directories...' | |
293 parser = optparse.OptionParser(usage=usage) | |
294 parser.add_option('--color', dest='color', action='store_true', | |
295 default=_STDOUT_IS_TTY, | |
296 help='Use color for text reports [default if TTY stdout]') | |
297 parser.add_option('--no-color', dest='color', action='store_false', | |
298 help='Don\'t use color for text reports') | |
299 parser.add_option('--no-crash-detection', dest='crash_detection', | |
300 action='store_false', default=True, | |
301 help='Don\'t report crashes or error out when detected') | |
302 parser.add_option('--perf', dest='perf', action='store_true', | |
303 default=True, | |
304 help='Include perf keyvals in the report [default]') | |
305 parser.add_option('--no-perf', dest='perf', action='store_false', | |
306 help='Don\'t include perf keyvals in the report') | |
307 parser.add_option('--strip', dest='strip', type='string', action='store', | |
308 default='results.', | |
309 help='Strip a prefix from test directory names' | |
310 ' [default: \'%default\']') | |
311 parser.add_option('--no-strip', dest='strip', const='', action='store_const', | |
312 help='Don\'t strip a prefix from test directory names') | |
313 parser.add_option('--no-debug', dest='print_debug', action='store_false', | |
314 default=True, | |
315 help='Don\'t print out logs when tests fail.') | |
316 (options, args) = parser.parse_args() | |
317 | |
318 if not args: | |
319 parser.print_help() | |
320 Die('no result directories provided') | |
321 | |
322 generator = ReportGenerator(options, args) | |
323 generator.Run() | |
324 | |
325 | |
326 if __name__ == '__main__': | |
327 main() | |
OLD | NEW |