Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 #!/usr/bin/python | 1 #!/usr/bin/python |
| 2 # Copyright (c) 2010 The Chromium OS Authors. All rights reserved. | 2 # Copyright (c) 2010 The Chromium OS Authors. All rights reserved. |
| 3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
| 4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
| 5 | 5 |
| 6 | 6 |
| 7 """Parses and displays the contents of one or more autoserv result directories. | 7 """Parses and displays the contents of one or more autoserv result directories. |
| 8 | 8 |
| 9 This script parses the contents of one or more autoserv results folders and | 9 This script parses the contents of one or more autoserv results folders and |
| 10 generates test reports. | 10 generates test reports. |
| (...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 106 status = 'FAIL' | 106 status = 'FAIL' |
| 107 if (re.search(r'GOOD.+completed successfully', status_raw) and | 107 if (re.search(r'GOOD.+completed successfully', status_raw) and |
| 108 not re.search(r'ABORT|ERROR|FAIL|TEST_NA', status_raw)): | 108 not re.search(r'ABORT|ERROR|FAIL|TEST_NA', status_raw)): |
| 109 status = 'PASS' | 109 status = 'PASS' |
| 110 | 110 |
| 111 perf = self._CollectPerf(testdir) | 111 perf = self._CollectPerf(testdir) |
| 112 | 112 |
| 113 if testdir.startswith(self._options.strip): | 113 if testdir.startswith(self._options.strip): |
| 114 testdir = testdir.replace(self._options.strip, '', 1) | 114 testdir = testdir.replace(self._options.strip, '', 1) |
| 115 | 115 |
| 116 self._results[testdir] = {'status': status, | 116 crashes = [] |
| 117 regex = re.compile('(Received crash notification for .+)') | |
| 118 for match in regex.finditer(status_raw): | |
| 119 crashes.append(match.group(1).split('\t')[0]) | |
|
petkov
2011/02/16 00:53:56
what does this do? group(1) is the whole string, r
DaleCurtis
2011/02/16 00:59:00
Autotest attaches the timetamp and a couple other
petkov
2011/02/16 01:01:17
Sorry, I still don't get what this does... Can you
DaleCurtis
2011/02/16 01:03:54
Line in status.log is this:
"INFO desktopui_Chr
petkov
2011/02/16 01:06:03
Thanks. It seems "Received crash notification for
DaleCurtis
2011/02/16 01:18:49
Done.
| |
| 120 | |
| 121 self._results[testdir] = {'crashes': crashes, | |
| 122 'status': status, | |
| 117 'perf': perf} | 123 'perf': perf} |
| 118 | 124 |
| 119 def _CollectResultsRec(self, resdir): | 125 def _CollectResultsRec(self, resdir): |
| 120 """Recursively collect results into the self._results dictionary. | 126 """Recursively collect results into the self._results dictionary. |
| 121 | 127 |
| 122 Args: | 128 Args: |
| 123 resdir: results/test directory to parse results from and recurse into. | 129 resdir: results/test directory to parse results from and recurse into. |
| 124 """ | 130 """ |
| 125 | 131 |
| 126 self._CollectResult(resdir) | 132 self._CollectResult(resdir) |
| (...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 169 results. | 175 results. |
| 170 """ | 176 """ |
| 171 tests = self._results.keys() | 177 tests = self._results.keys() |
| 172 tests.sort() | 178 tests.sort() |
| 173 | 179 |
| 174 tests_with_errors = [] | 180 tests_with_errors = [] |
| 175 | 181 |
| 176 width = self.GetTestColumnWidth() | 182 width = self.GetTestColumnWidth() |
| 177 line = ''.ljust(width + 5, '-') | 183 line = ''.ljust(width + 5, '-') |
| 178 | 184 |
| 185 crashes_detected = False | |
| 179 tests_pass = 0 | 186 tests_pass = 0 |
| 180 print line | 187 print line |
| 181 for test in tests: | 188 for test in tests: |
| 182 # Emit the test/status entry first | 189 # Emit the test/status entry first |
| 183 test_entry = test.ljust(width) | 190 test_entry = test.ljust(width) |
| 184 result = self._results[test] | 191 result = self._results[test] |
| 185 status_entry = result['status'] | 192 status_entry = result['status'] |
| 186 if status_entry == 'PASS': | 193 if status_entry == 'PASS': |
| 187 color = Color.GREEN | 194 color = Color.GREEN |
| 188 tests_pass += 1 | 195 tests_pass += 1 |
| 189 else: | 196 else: |
| 190 color = Color.RED | 197 color = Color.RED |
| 191 tests_with_errors.append(test) | 198 tests_with_errors.append(test) |
| 192 | 199 |
| 193 status_entry = self._color.Color(color, status_entry) | 200 status_entry = self._color.Color(color, status_entry) |
| 194 print test_entry + status_entry | 201 print test_entry + status_entry |
| 195 | 202 |
| 196 # Emit the perf keyvals entries. There will be no entries if the | 203 # Emit the perf keyvals entries. There will be no entries if the |
| 197 # --no-perf option is specified. | 204 # --no-perf option is specified. |
| 198 perf = result['perf'] | 205 perf = result['perf'] |
| 199 perf_keys = perf.keys() | 206 perf_keys = perf.keys() |
| 200 perf_keys.sort() | 207 perf_keys.sort() |
| 201 | 208 |
| 202 for perf_key in perf_keys: | 209 for perf_key in perf_keys: |
| 203 perf_key_entry = perf_key.ljust(width - self._KEYVAL_INDENT) | 210 perf_key_entry = perf_key.ljust(width - self._KEYVAL_INDENT) |
| 204 perf_key_entry = perf_key_entry.rjust(width) | 211 perf_key_entry = perf_key_entry.rjust(width) |
| 205 perf_value_entry = self._color.Color(Color.BOLD, perf[perf_key]) | 212 perf_value_entry = self._color.Color(Color.BOLD, perf[perf_key]) |
| 206 print perf_key_entry + perf_value_entry | 213 print perf_key_entry + perf_value_entry |
| 207 | 214 |
| 215 if result['crashes']: | |
| 216 crashes_detected = True | |
| 217 | |
| 208 print line | 218 print line |
| 209 | 219 |
| 210 total_tests = len(tests) | 220 total_tests = len(tests) |
| 211 percent_pass = 100 * tests_pass / total_tests | 221 percent_pass = 100 * tests_pass / total_tests |
| 212 pass_str = '%d/%d (%d%%)' % (tests_pass, total_tests, percent_pass) | 222 pass_str = '%d/%d (%d%%)' % (tests_pass, total_tests, percent_pass) |
| 213 print 'Total PASS: ' + self._color.Color(Color.BOLD, pass_str) | 223 print 'Total PASS: ' + self._color.Color(Color.BOLD, pass_str) |
| 214 | 224 |
| 225 if self._options.crash_detection: | |
| 226 print '' | |
|
petkov
2011/02/16 00:53:56
Isn't it better to interleave the crash results in
DaleCurtis
2011/02/16 00:59:00
I thought about this, but cmasone indicated he'd l
| |
| 227 if crashes_detected: | |
| 228 num_crashes = 0 | |
| 229 print self._color.Color(Color.RED, 'Crashes detected during testing:') | |
| 230 print line | |
| 231 | |
| 232 # Ignore top-level entry, since it's just a combination of all the | |
| 233 # individual results. | |
| 234 for test in tests[1:]: | |
| 235 crashes = self._results[test]['crashes'] | |
| 236 if not crashes: | |
| 237 continue | |
| 238 print test | |
| 239 for crash in crashes: | |
| 240 num_crashes += 1 | |
| 241 print ' '*self._KEYVAL_INDENT + self._color.Color(Color.RED, crash) | |
| 242 | |
| 243 print line | |
| 244 print 'Total crashes: ' + self._color.Color(Color.BOLD, | |
| 245 str(num_crashes)) | |
| 246 else: | |
| 247 print self._color.Color(Color.GREEN, | |
| 248 'No crashes detected during testing.') | |
| 249 | |
| 215 # Print out the client debug information for failed tests. | 250 # Print out the client debug information for failed tests. |
| 216 if self._options.print_debug: | 251 if self._options.print_debug: |
| 217 for test in tests_with_errors: | 252 for test in tests_with_errors: |
| 218 debug_file_regex = os.path.join(self._options.strip, test, 'debug', | 253 debug_file_regex = os.path.join(self._options.strip, test, 'debug', |
| 219 '%s*.DEBUG' % os.path.basename(test)) | 254 '%s*.DEBUG' % os.path.basename(test)) |
| 220 for path in glob.glob(debug_file_regex): | 255 for path in glob.glob(debug_file_regex): |
| 221 try: | 256 try: |
| 222 fh = open(path) | 257 fh = open(path) |
| 223 print >> sys.stderr, ( | 258 print >> sys.stderr, ( |
| 224 '\n========== DEBUG FILE %s FOR TEST %s ==============\n' % ( | 259 '\n========== DEBUG FILE %s FOR TEST %s ==============\n' % ( |
| (...skipping 11 matching lines...) Expand all Loading... | |
| 236 | 271 |
| 237 # Sometimes the builders exit before these buffers are flushed. | 272 # Sometimes the builders exit before these buffers are flushed. |
| 238 sys.stderr.flush() | 273 sys.stderr.flush() |
| 239 sys.stdout.flush() | 274 sys.stdout.flush() |
| 240 | 275 |
| 241 def Run(self): | 276 def Run(self): |
| 242 """Runs report generation.""" | 277 """Runs report generation.""" |
| 243 self._CollectResults() | 278 self._CollectResults() |
| 244 self._GenerateReportText() | 279 self._GenerateReportText() |
| 245 for v in self._results.itervalues(): | 280 for v in self._results.itervalues(): |
| 246 if v['status'] != 'PASS': | 281 if v['status'] != 'PASS' or (self._options.crash_detection |
| 282 and v['crashes']): | |
| 247 sys.exit(1) | 283 sys.exit(1) |
| 248 | 284 |
| 249 | 285 |
| 250 def main(): | 286 def main(): |
| 251 usage = 'Usage: %prog [options] result-directories...' | 287 usage = 'Usage: %prog [options] result-directories...' |
| 252 parser = optparse.OptionParser(usage=usage) | 288 parser = optparse.OptionParser(usage=usage) |
| 253 parser.add_option('--color', dest='color', action='store_true', | 289 parser.add_option('--color', dest='color', action='store_true', |
| 254 default=_STDOUT_IS_TTY, | 290 default=_STDOUT_IS_TTY, |
| 255 help='Use color for text reports [default if TTY stdout]') | 291 help='Use color for text reports [default if TTY stdout]') |
| 256 parser.add_option('--no-color', dest='color', action='store_false', | 292 parser.add_option('--no-color', dest='color', action='store_false', |
| 257 help='Don\'t use color for text reports') | 293 help='Don\'t use color for text reports') |
| 294 parser.add_option('--no-crash-detection', dest='crash_detection', | |
| 295 action='store_false', default=True, | |
| 296 help='Don\'t report crashes or error out when detected') | |
| 258 parser.add_option('--perf', dest='perf', action='store_true', | 297 parser.add_option('--perf', dest='perf', action='store_true', |
| 259 default=True, | 298 default=True, |
| 260 help='Include perf keyvals in the report [default]') | 299 help='Include perf keyvals in the report [default]') |
| 261 parser.add_option('--no-perf', dest='perf', action='store_false', | 300 parser.add_option('--no-perf', dest='perf', action='store_false', |
| 262 help='Don\'t include perf keyvals in the report') | 301 help='Don\'t include perf keyvals in the report') |
| 263 parser.add_option('--strip', dest='strip', type='string', action='store', | 302 parser.add_option('--strip', dest='strip', type='string', action='store', |
| 264 default='results.', | 303 default='results.', |
| 265 help='Strip a prefix from test directory names' | 304 help='Strip a prefix from test directory names' |
| 266 ' [default: \'%default\']') | 305 ' [default: \'%default\']') |
| 267 parser.add_option('--no-strip', dest='strip', const='', action='store_const', | 306 parser.add_option('--no-strip', dest='strip', const='', action='store_const', |
| 268 help='Don\'t strip a prefix from test directory names') | 307 help='Don\'t strip a prefix from test directory names') |
| 269 parser.add_option('--no-debug', dest='print_debug', action='store_false', | 308 parser.add_option('--no-debug', dest='print_debug', action='store_false', |
| 270 default=True, | 309 default=True, |
| 271 help='Do not print out the debug log when a test fails.') | 310 help='Don\'t print out the debug log when a test fails.') |
| 272 (options, args) = parser.parse_args() | 311 (options, args) = parser.parse_args() |
| 273 | 312 |
| 274 if not args: | 313 if not args: |
| 275 parser.print_help() | 314 parser.print_help() |
| 276 Die('no result directories provided') | 315 Die('no result directories provided') |
| 277 | 316 |
| 278 generator = ReportGenerator(options, args) | 317 generator = ReportGenerator(options, args) |
| 279 generator.Run() | 318 generator.Run() |
| 280 | 319 |
| 281 | 320 |
| 282 if __name__ == '__main__': | 321 if __name__ == '__main__': |
| 283 main() | 322 main() |
| OLD | NEW |