OLD | NEW |
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 # Copyright (C) 2012 Google Inc. All rights reserved. | 2 # Copyright (C) 2012 Google Inc. All rights reserved. |
3 # | 3 # |
4 # Redistribution and use in source and binary forms, with or without | 4 # Redistribution and use in source and binary forms, with or without |
5 # modification, are permitted provided that the following conditions are | 5 # modification, are permitted provided that the following conditions are |
6 # met: | 6 # met: |
7 # | 7 # |
8 # * Redistributions of source code must retain the above copyright | 8 # * Redistributions of source code must retain the above copyright |
9 # notice, this list of conditions and the following disclaimer. | 9 # notice, this list of conditions and the following disclaimer. |
10 # * Redistributions in binary form must reproduce the above | 10 # * Redistributions in binary form must reproduce the above |
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
55 self.stream.write(msg + '\n') | 55 self.stream.write(msg + '\n') |
56 | 56 |
57 def print_run_results(self, run_results): | 57 def print_run_results(self, run_results): |
58 failed = run_results.total_failures | 58 failed = run_results.total_failures |
59 total = run_results.total | 59 total = run_results.total |
60 passed = total - failed - run_results.remaining | 60 passed = total - failed - run_results.remaining |
61 percent_passed = 0.0 | 61 percent_passed = 0.0 |
62 if total > 0: | 62 if total > 0: |
63 percent_passed = float(passed) * 100 / total | 63 percent_passed = float(passed) * 100 / total |
64 | 64 |
65 self._print("=> Results: %d/%d tests passed (%.1f%%)" % (passed, total,
percent_passed)) | 65 self._print('=> Results: %d/%d tests passed (%.1f%%)' % (passed, total,
percent_passed)) |
66 self._print("") | 66 self._print('') |
67 self._print_run_results_entry(run_results, test_expectations.NOW, "Tests
to be fixed") | 67 self._print_run_results_entry(run_results, test_expectations.NOW, 'Tests
to be fixed') |
68 | 68 |
69 self._print("") | 69 self._print('') |
70 # FIXME: We should be skipping anything marked WONTFIX, so we shouldn't
bother logging these stats. | 70 # FIXME: We should be skipping anything marked WONTFIX, so we shouldn't
bother logging these stats. |
71 self._print_run_results_entry(run_results, test_expectations.WONTFIX, | 71 self._print_run_results_entry(run_results, test_expectations.WONTFIX, |
72 "Tests that will only be fixed if they crash (WONTFIX)") | 72 'Tests that will only be fixed if they cra
sh (WONTFIX)') |
73 self._print("") | 73 self._print('') |
74 | 74 |
75 def _print_run_results_entry(self, run_results, timeline, heading): | 75 def _print_run_results_entry(self, run_results, timeline, heading): |
76 total = len(run_results.tests_by_timeline[timeline]) | 76 total = len(run_results.tests_by_timeline[timeline]) |
77 not_passing = (total - | 77 not_passing = (total - |
78 len(run_results.tests_by_expectation[test_expectations.PASS] & | 78 len(run_results.tests_by_expectation[test_expectations.PA
SS] & |
79 run_results.tests_by_timeline[timeline])) | 79 run_results.tests_by_timeline[timeline])) |
80 self._print("=> %s (%d):" % (heading, not_passing)) | 80 self._print('=> %s (%d):' % (heading, not_passing)) |
81 | 81 |
82 for result in TestExpectations.EXPECTATION_DESCRIPTIONS.keys(): | 82 for result in TestExpectations.EXPECTATION_DESCRIPTIONS.keys(): |
83 if result in (test_expectations.PASS, test_expectations.SKIP): | 83 if result in (test_expectations.PASS, test_expectations.SKIP): |
84 continue | 84 continue |
85 results = (run_results.tests_by_expectation[result] & run_results.te
sts_by_timeline[timeline]) | 85 results = (run_results.tests_by_expectation[result] & run_results.te
sts_by_timeline[timeline]) |
86 desc = TestExpectations.EXPECTATION_DESCRIPTIONS[result] | 86 desc = TestExpectations.EXPECTATION_DESCRIPTIONS[result] |
87 if not_passing and len(results): | 87 if not_passing and len(results): |
88 pct = len(results) * 100.0 / not_passing | 88 pct = len(results) * 100.0 / not_passing |
89 self._print(" %5d %-24s (%4.1f%%)" % (len(results), desc, pct)) | 89 self._print(' %5d %-24s (%4.1f%%)' % (len(results), desc, pct)) |
90 | 90 |
91 def print_unexpected_results(self, summarized_results, enabled_pixel_tests_i
n_retry=False): | 91 def print_unexpected_results(self, summarized_results, enabled_pixel_tests_i
n_retry=False): |
92 passes = {} | 92 passes = {} |
93 flaky = {} | 93 flaky = {} |
94 regressions = {} | 94 regressions = {} |
95 | 95 |
96 def add_to_dict_of_lists(dict, key, value): | 96 def add_to_dict_of_lists(dict, key, value): |
97 dict.setdefault(key, []).append(value) | 97 dict.setdefault(key, []).append(value) |
98 | 98 |
99 def add_result(test, results, passes=passes, flaky=flaky, regressions=re
gressions): | 99 def add_result(test, results, passes=passes, flaky=flaky, regressions=re
gressions): |
100 actual = results['actual'].split(" ") | 100 actual = results['actual'].split(' ') |
101 expected = results['expected'].split(" ") | 101 expected = results['expected'].split(' ') |
102 | 102 |
103 if 'is_unexpected' not in results or not results['is_unexpected']: | 103 if 'is_unexpected' not in results or not results['is_unexpected']: |
104 # Don't print anything for tests that ran as expected. | 104 # Don't print anything for tests that ran as expected. |
105 return | 105 return |
106 | 106 |
107 if actual == ['PASS']: | 107 if actual == ['PASS']: |
108 if 'CRASH' in expected: | 108 if 'CRASH' in expected: |
109 add_to_dict_of_lists(passes, 'Expected to crash, but passed'
, test) | 109 add_to_dict_of_lists(passes, 'Expected to crash, but passed'
, test) |
110 elif 'TIMEOUT' in expected: | 110 elif 'TIMEOUT' in expected: |
111 add_to_dict_of_lists(passes, 'Expected to timeout, but passe
d', test) | 111 add_to_dict_of_lists(passes, 'Expected to timeout, but passe
d', test) |
112 else: | 112 else: |
113 add_to_dict_of_lists(passes, 'Expected to fail, but passed',
test) | 113 add_to_dict_of_lists(passes, 'Expected to fail, but passed',
test) |
114 elif enabled_pixel_tests_in_retry and actual == ['TEXT', 'IMAGE+TEXT
']: | 114 elif enabled_pixel_tests_in_retry and actual == ['TEXT', 'IMAGE+TEXT
']: |
115 add_to_dict_of_lists(regressions, actual[0], test) | 115 add_to_dict_of_lists(regressions, actual[0], test) |
116 elif len(actual) > 1: | 116 elif len(actual) > 1: |
117 # We group flaky tests by the first actual result we got. | 117 # We group flaky tests by the first actual result we got. |
118 add_to_dict_of_lists(flaky, actual[0], test) | 118 add_to_dict_of_lists(flaky, actual[0], test) |
119 else: | 119 else: |
120 add_to_dict_of_lists(regressions, results['actual'], test) | 120 add_to_dict_of_lists(regressions, results['actual'], test) |
121 | 121 |
122 layouttestresults.for_each_test(summarized_results['tests'], add_result) | 122 layouttestresults.for_each_test(summarized_results['tests'], add_result) |
123 | 123 |
124 if len(passes) or len(flaky) or len(regressions): | 124 if len(passes) or len(flaky) or len(regressions): |
125 self._print("") | 125 self._print('') |
126 if len(passes): | 126 if len(passes): |
127 for key, tests in passes.iteritems(): | 127 for key, tests in passes.iteritems(): |
128 self._print("%s: (%d)" % (key, len(tests))) | 128 self._print('%s: (%d)' % (key, len(tests))) |
129 tests.sort() | 129 tests.sort() |
130 for test in tests: | 130 for test in tests: |
131 self._print(" %s" % test) | 131 self._print(' %s' % test) |
132 self._print("") | 132 self._print('') |
133 self._print("") | 133 self._print('') |
134 | 134 |
135 if len(flaky): | 135 if len(flaky): |
136 descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS | 136 descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS |
137 for key, tests in flaky.iteritems(): | 137 for key, tests in flaky.iteritems(): |
138 result = TestExpectations.EXPECTATIONS[key.lower()] | 138 result = TestExpectations.EXPECTATIONS[key.lower()] |
139 self._print("Unexpected flakiness: %s (%d)" % (descriptions[resu
lt], len(tests))) | 139 self._print('Unexpected flakiness: %s (%d)' % (descriptions[resu
lt], len(tests))) |
140 tests.sort() | 140 tests.sort() |
141 | 141 |
142 for test in tests: | 142 for test in tests: |
143 result = layouttestresults.result_for_test(summarized_result
s['tests'], test) | 143 result = layouttestresults.result_for_test(summarized_result
s['tests'], test) |
144 actual = result['actual'].split(" ") | 144 actual = result['actual'].split(' ') |
145 expected = result['expected'].split(" ") | 145 expected = result['expected'].split(' ') |
146 result = TestExpectations.EXPECTATIONS[key.lower()] | 146 result = TestExpectations.EXPECTATIONS[key.lower()] |
147 # FIXME: clean this up once the old syntax is gone | 147 # FIXME: clean this up once the old syntax is gone |
148 new_expectations_list = [TestExpectationParser._inverted_exp
ectation_tokens[exp] for exp in list(set(actual) | set(expected))] | 148 new_expectations_list = [TestExpectationParser._inverted_exp
ectation_tokens[exp] |
149 self._print(" %s [ %s ]" % (test, " ".join(new_expectations
_list))) | 149 for exp in list(set(actual) | set(e
xpected))] |
150 self._print("") | 150 self._print(' %s [ %s ]' % (test, ' '.join(new_expectations
_list))) |
151 self._print("") | 151 self._print('') |
| 152 self._print('') |
152 | 153 |
153 if len(regressions): | 154 if len(regressions): |
154 descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS | 155 descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS |
155 for key, tests in regressions.iteritems(): | 156 for key, tests in regressions.iteritems(): |
156 result = TestExpectations.EXPECTATIONS[key.lower()] | 157 result = TestExpectations.EXPECTATIONS[key.lower()] |
157 self._print("Regressions: Unexpected %s (%d)" % (descriptions[re
sult], len(tests))) | 158 self._print('Regressions: Unexpected %s (%d)' % (descriptions[re
sult], len(tests))) |
158 tests.sort() | 159 tests.sort() |
159 for test in tests: | 160 for test in tests: |
160 self._print(" %s [ %s ]" % (test, TestExpectationParser._in
verted_expectation_tokens[key])) | 161 self._print(' %s [ %s ]' % (test, TestExpectationParser._in
verted_expectation_tokens[key])) |
161 self._print("") | 162 self._print('') |
162 | 163 |
163 if len(summarized_results['tests']) and self.debug_logging: | 164 if len(summarized_results['tests']) and self.debug_logging: |
164 self._print("%s" % ("-" * 78)) | 165 self._print('%s' % ('-' * 78)) |
OLD | NEW |