OLD | NEW |
1 #!/usr/bin/python | 1 #!/usr/bin/python |
2 # Copyright (c) 2010 The Chromium OS Authors. All rights reserved. | 2 # Copyright (c) 2010 The Chromium OS Authors. All rights reserved. |
3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
5 | 5 |
6 | 6 |
7 """Parses and displays the contents of one or more autoserv result directories. | 7 """Parses and displays the contents of one or more autoserv result directories. |
8 | 8 |
9 This script parses the contents of one or more autoserv results folders and | 9 This script parses the contents of one or more autoserv results folders and |
10 generates test reports. | 10 generates test reports. |
(...skipping 149 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
160 """Prints a result report to stdout. | 160 """Prints a result report to stdout. |
161 | 161 |
162 Prints a result table to stdout. Each row of the table contains the test | 162 Prints a result table to stdout. Each row of the table contains the test |
163 result directory and the test result (PASS, FAIL). If the perf option is | 163 result directory and the test result (PASS, FAIL). If the perf option is |
164 enabled, each test entry is followed by perf keyval entries from the test | 164 enabled, each test entry is followed by perf keyval entries from the test |
165 results. | 165 results. |
166 """ | 166 """ |
167 tests = self._results.keys() | 167 tests = self._results.keys() |
168 tests.sort() | 168 tests.sort() |
169 | 169 |
| 170 tests_with_errors = [] |
| 171 |
170 width = self.GetTestColumnWidth() | 172 width = self.GetTestColumnWidth() |
171 line = ''.ljust(width + 5, '-') | 173 line = ''.ljust(width + 5, '-') |
172 | 174 |
173 tests_pass = 0 | 175 tests_pass = 0 |
174 print line | 176 print line |
175 for test in tests: | 177 for test in tests: |
176 # Emit the test/status entry first | 178 # Emit the test/status entry first |
177 test_entry = test.ljust(width) | 179 test_entry = test.ljust(width) |
178 result = self._results[test] | 180 result = self._results[test] |
179 status_entry = result['status'] | 181 status_entry = result['status'] |
180 if status_entry == 'PASS': | 182 if status_entry == 'PASS': |
181 color = Color.GREEN | 183 color = Color.GREEN |
182 tests_pass += 1 | 184 tests_pass += 1 |
183 else: | 185 else: |
184 color = Color.RED | 186 color = Color.RED |
| 187 tests_with_errors.append(test) |
| 188 |
185 status_entry = self._color.Color(color, status_entry) | 189 status_entry = self._color.Color(color, status_entry) |
186 print test_entry + status_entry | 190 print test_entry + status_entry |
187 | 191 |
188 # Emit the perf keyvals entries. There will be no entries if the | 192 # Emit the perf keyvals entries. There will be no entries if the |
189 # --no-perf option is specified. | 193 # --no-perf option is specified. |
190 perf = result['perf'] | 194 perf = result['perf'] |
191 perf_keys = perf.keys() | 195 perf_keys = perf.keys() |
192 perf_keys.sort() | 196 perf_keys.sort() |
193 | 197 |
194 for perf_key in perf_keys: | 198 for perf_key in perf_keys: |
195 perf_key_entry = perf_key.ljust(width - self._KEYVAL_INDENT) | 199 perf_key_entry = perf_key.ljust(width - self._KEYVAL_INDENT) |
196 perf_key_entry = perf_key_entry.rjust(width) | 200 perf_key_entry = perf_key_entry.rjust(width) |
197 perf_value_entry = self._color.Color(Color.BOLD, perf[perf_key]) | 201 perf_value_entry = self._color.Color(Color.BOLD, perf[perf_key]) |
198 print perf_key_entry + perf_value_entry | 202 print perf_key_entry + perf_value_entry |
199 | 203 |
200 print line | 204 print line |
201 | 205 |
202 total_tests = len(tests) | 206 total_tests = len(tests) |
203 percent_pass = 100 * tests_pass / total_tests | 207 percent_pass = 100 * tests_pass / total_tests |
204 pass_str = '%d/%d (%d%%)' % (tests_pass, total_tests, percent_pass) | 208 pass_str = '%d/%d (%d%%)' % (tests_pass, total_tests, percent_pass) |
205 print 'Total PASS: ' + self._color.Color(Color.BOLD, pass_str) | 209 print 'Total PASS: ' + self._color.Color(Color.BOLD, pass_str) |
206 | 210 |
| 211 # Print out the client debug information for failed tests. |
| 212 if self._options.print_debug: |
| 213 for test in tests_with_errors: |
| 214 debug_file_regex = os.path.join(self._options.strip, test, 'debug', |
| 215 'client.*.DEBUG') |
| 216 for path in glob.glob(debug_file_regex): |
| 217 try: |
| 218 fh = open(path) |
| 219 print ('\n========== DEBUG FILE %s FOR TEST %s ==============\n' % ( |
| 220 path, test)) |
| 221 print fh.read() |
| 222 print('\n=========== END DEBUG %s FOR TEST %s ===============\n' % ( |
| 223 path, test)) |
| 224 fh.close() |
| 225 except: |
| 226 print 'Could not open %s' % path |
| 227 |
207 def Run(self): | 228 def Run(self): |
208 """Runs report generation.""" | 229 """Runs report generation.""" |
209 self._CollectResults() | 230 self._CollectResults() |
210 self._GenerateReportText() | 231 self._GenerateReportText() |
211 for v in self._results.itervalues(): | 232 for v in self._results.itervalues(): |
212 if v['status'] != 'PASS': | 233 if v['status'] != 'PASS': |
213 sys.exit(1) | 234 sys.exit(1) |
214 | 235 |
215 | 236 |
216 def main(): | 237 def main(): |
217 usage = 'Usage: %prog [options] result-directories...' | 238 usage = 'Usage: %prog [options] result-directories...' |
218 parser = optparse.OptionParser(usage=usage) | 239 parser = optparse.OptionParser(usage=usage) |
219 parser.add_option('--color', dest='color', action='store_true', | 240 parser.add_option('--color', dest='color', action='store_true', |
220 default=_STDOUT_IS_TTY, | 241 default=_STDOUT_IS_TTY, |
221 help='Use color for text reports [default if TTY stdout]') | 242 help='Use color for text reports [default if TTY stdout]') |
222 parser.add_option('--no-color', dest='color', action='store_false', | 243 parser.add_option('--no-color', dest='color', action='store_false', |
223 help='Don\'t use color for text reports') | 244 help='Don\'t use color for text reports') |
224 parser.add_option('--perf', dest='perf', action='store_true', | 245 parser.add_option('--perf', dest='perf', action='store_true', |
225 default=True, | 246 default=True, |
226 help='Include perf keyvals in the report [default]') | 247 help='Include perf keyvals in the report [default]') |
227 parser.add_option('--no-perf', dest='perf', action='store_false', | 248 parser.add_option('--no-perf', dest='perf', action='store_false', |
228 help='Don\'t include perf keyvals in the report') | 249 help='Don\'t include perf keyvals in the report') |
229 parser.add_option('--strip', dest='strip', type='string', action='store', | 250 parser.add_option('--strip', dest='strip', type='string', action='store', |
230 default='results.', | 251 default='results.', |
231 help='Strip a prefix from test directory names' | 252 help='Strip a prefix from test directory names' |
232 ' [default: \'%default\']') | 253 ' [default: \'%default\']') |
233 parser.add_option('--no-strip', dest='strip', const='', action='store_const', | 254 parser.add_option('--no-strip', dest='strip', const='', action='store_const', |
234 help='Don\'t strip a prefix from test directory names') | 255 help='Don\'t strip a prefix from test directory names') |
| 256 parser.add_option('--no-debug', dest='print_debug', action='store_false', |
| 257 default=True, |
| 258 help='Do not print out the debug log when a test fails.') |
235 (options, args) = parser.parse_args() | 259 (options, args) = parser.parse_args() |
236 | 260 |
237 if not args: | 261 if not args: |
238 parser.print_help() | 262 parser.print_help() |
239 Die('no result directories provided') | 263 Die('no result directories provided') |
240 | 264 |
241 generator = ReportGenerator(options, args) | 265 generator = ReportGenerator(options, args) |
242 generator.Run() | 266 generator.Run() |
243 | 267 |
244 | 268 |
245 if __name__ == '__main__': | 269 if __name__ == '__main__': |
246 main() | 270 main() |
OLD | NEW |