OLD | NEW |
(Empty) | |
| 1 # Copyright 2013 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. |
| 4 |
| 5 """Generates annotated output. |
| 6 |
| 7 TODO(stip): Move the gtest_utils gtest parser selection code from runtest.py |
| 8 to here. |
| 9 TODO(stip): Move the perf dashboard code from runtest.py to here. |
| 10 """ |
| 11 |
| 12 import re |
| 13 |
| 14 from slave import performance_log_processor |
| 15 from slave import slave_utils |
| 16 |
| 17 |
| 18 def getText(result, observer, name): |
| 19 """Generate a text summary for the waterfall. |
| 20 |
| 21 Updates the waterfall with any unusual test output, with a link to logs of |
| 22 failed test steps. |
| 23 """ |
| 24 GTEST_DASHBOARD_BASE = ('http://test-results.appspot.com' |
| 25 '/dashboards/flakiness_dashboard.html') |
| 26 |
| 27 # TODO(xusydoc): unify this with gtest reporting below so getText() is |
| 28 # less confusing |
| 29 if hasattr(observer, 'PerformanceSummary'): |
| 30 basic_info = [name] |
| 31 summary_text = ['<div class="BuildResultInfo">'] |
| 32 summary_text.extend(observer.PerformanceSummary()) |
| 33 summary_text.append('</div>') |
| 34 return basic_info + summary_text |
| 35 |
| 36 # basic_info is an array of lines to display on the waterfall. |
| 37 basic_info = [name] |
| 38 |
| 39 disabled = observer.DisabledTests() |
| 40 if disabled: |
| 41 basic_info.append('%s disabled' % str(disabled)) |
| 42 |
| 43 flaky = observer.FlakyTests() |
| 44 if flaky: |
| 45 basic_info.append('%s flaky' % str(flaky)) |
| 46 |
| 47 failed_test_count = len(observer.FailedTests()) |
| 48 if failed_test_count == 0: |
| 49 if result == performance_log_processor.SUCCESS: |
| 50 return basic_info |
| 51 elif result == performance_log_processor.WARNINGS: |
| 52 return basic_info + ['warnings'] |
| 53 |
| 54 if observer.RunningTests(): |
| 55 basic_info += ['did not complete'] |
| 56 |
| 57 # TODO(xusydoc): see if 'crashed or hung' should be tracked by RunningTests(). |
| 58 if failed_test_count: |
| 59 failure_text = ['failed %d' % failed_test_count] |
| 60 if observer.master_name: |
| 61 # Include the link to the flakiness dashboard. |
| 62 failure_text.append('<div class="BuildResultInfo">') |
| 63 failure_text.append('<a href="%s#testType=%s' |
| 64 '&tests=%s">' % (GTEST_DASHBOARD_BASE, |
| 65 name, |
| 66 ','.join(observer.FailedTests()))) |
| 67 failure_text.append('Flakiness dashboard') |
| 68 failure_text.append('</a>') |
| 69 failure_text.append('</div>') |
| 70 else: |
| 71 failure_text = ['crashed or hung'] |
| 72 return basic_info + failure_text |
| 73 |
| 74 |
| 75 def annotate(test_name, result, log_processor, perf_dashboard_id=None): |
| 76 """Given a test result and tracker, update the waterfall with test results.""" |
| 77 |
| 78 # Always print raw exit code of the subprocess. This is very helpful |
| 79 # for debugging, especially when one gets the "crashed or hung" message |
| 80 # with no output (exit code can have some clues, especially on Windows). |
| 81 print 'exit code (as seen by runtest.py): %d' % result |
| 82 |
| 83 get_text_result = performance_log_processor.SUCCESS |
| 84 |
| 85 for failure in sorted(log_processor.FailedTests()): |
| 86 clean_test_name = re.sub(r'[^\w\.\-]', '_', failure) |
| 87 slave_utils.WriteLogLines(clean_test_name, |
| 88 log_processor.FailureDescription(failure)) |
| 89 for report_hash in sorted(log_processor.MemoryToolReportHashes()): |
| 90 slave_utils.WriteLogLines(report_hash, |
| 91 log_processor.MemoryToolReport(report_hash)) |
| 92 |
| 93 if log_processor.ParsingErrors(): |
| 94 # Generate a log file containing the list of errors. |
| 95 slave_utils.WriteLogLines('log parsing error(s)', |
| 96 log_processor.ParsingErrors()) |
| 97 |
| 98 log_processor.ClearParsingErrors() |
| 99 |
| 100 if hasattr(log_processor, 'evaluateCommand'): |
| 101 parser_result = log_processor.evaluateCommand('command') |
| 102 if parser_result > result: |
| 103 result = parser_result |
| 104 |
| 105 if result == performance_log_processor.SUCCESS: |
| 106 if (len(log_processor.ParsingErrors()) or |
| 107 len(log_processor.FailedTests()) or |
| 108 len(log_processor.MemoryToolReportHashes())): |
| 109 print '@@@STEP_WARNINGS@@@' |
| 110 get_text_result = performance_log_processor.WARNINGS |
| 111 elif result == slave_utils.WARNING_EXIT_CODE: |
| 112 print '@@@STEP_WARNINGS@@@' |
| 113 get_text_result = performance_log_processor.WARNINGS |
| 114 else: |
| 115 print '@@@STEP_FAILURE@@@' |
| 116 get_text_result = performance_log_processor.FAILURE |
| 117 |
| 118 for desc in getText(get_text_result, log_processor, test_name): |
| 119 print '@@@STEP_TEXT@%s@@@' % desc |
| 120 |
| 121 if hasattr(log_processor, 'PerformanceLogs'): |
| 122 if not perf_dashboard_id: |
| 123 raise Exception('runtest.py error: perf step specified but' |
| 124 'no test_id in factory_properties!') |
| 125 for logname, log in log_processor.PerformanceLogs().iteritems(): |
| 126 lines = [str(l).rstrip() for l in log] |
| 127 slave_utils.WriteLogLines(logname, lines, perf=perf_dashboard_id) |
OLD | NEW |