OLD | NEW |
1 # Copyright (C) 2010 Google Inc. All rights reserved. | 1 # Copyright (C) 2010 Google Inc. All rights reserved. |
2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Sze
ged | 2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Sze
ged |
3 # | 3 # |
4 # Redistribution and use in source and binary forms, with or without | 4 # Redistribution and use in source and binary forms, with or without |
5 # modification, are permitted provided that the following conditions are | 5 # modification, are permitted provided that the following conditions are |
6 # met: | 6 # met: |
7 # | 7 # |
8 # * Redistributions of source code must retain the above copyright | 8 # * Redistributions of source code must retain the above copyright |
9 # notice, this list of conditions and the following disclaimer. | 9 # notice, this list of conditions and the following disclaimer. |
10 # * Redistributions in binary form must reproduce the above | 10 # * Redistributions in binary form must reproduce the above |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
60 SYS_DEPS_EXIT_STATUS, | 60 SYS_DEPS_EXIT_STATUS, |
61 NO_TESTS_EXIT_STATUS, | 61 NO_TESTS_EXIT_STATUS, |
62 NO_DEVICES_EXIT_STATUS, | 62 NO_DEVICES_EXIT_STATUS, |
63 UNEXPECTED_ERROR_EXIT_STATUS, | 63 UNEXPECTED_ERROR_EXIT_STATUS, |
64 ) | 64 ) |
65 | 65 |
66 # In order to avoid colliding with the above codes, we put a ceiling on | 66 # In order to avoid colliding with the above codes, we put a ceiling on |
67 # the value returned by num_regressions | 67 # the value returned by num_regressions |
68 MAX_FAILURES_EXIT_STATUS = 101 | 68 MAX_FAILURES_EXIT_STATUS = 101 |
69 | 69 |
| 70 |
70 class TestRunException(Exception): | 71 class TestRunException(Exception): |
| 72 |
71 def __init__(self, code, msg): | 73 def __init__(self, code, msg): |
72 self.code = code | 74 self.code = code |
73 self.msg = msg | 75 self.msg = msg |
74 | 76 |
75 | 77 |
76 class TestRunResults(object): | 78 class TestRunResults(object): |
| 79 |
77 def __init__(self, expectations, num_tests): | 80 def __init__(self, expectations, num_tests): |
78 self.total = num_tests | 81 self.total = num_tests |
79 self.remaining = self.total | 82 self.remaining = self.total |
80 self.expectations = expectations | 83 self.expectations = expectations |
81 self.expected = 0 | 84 self.expected = 0 |
82 self.expected_failures = 0 | 85 self.expected_failures = 0 |
83 self.unexpected = 0 | 86 self.unexpected = 0 |
84 self.unexpected_failures = 0 | 87 self.unexpected_failures = 0 |
85 self.unexpected_crashes = 0 | 88 self.unexpected_crashes = 0 |
86 self.unexpected_timeouts = 0 | 89 self.unexpected_timeouts = 0 |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
127 self.unexpected_failures += 1 | 130 self.unexpected_failures += 1 |
128 if test_result.type == test_expectations.CRASH: | 131 if test_result.type == test_expectations.CRASH: |
129 self.unexpected_crashes += 1 | 132 self.unexpected_crashes += 1 |
130 elif test_result.type == test_expectations.TIMEOUT: | 133 elif test_result.type == test_expectations.TIMEOUT: |
131 self.unexpected_timeouts += 1 | 134 self.unexpected_timeouts += 1 |
132 if test_is_slow: | 135 if test_is_slow: |
133 self.slow_tests.add(test_result.test_name) | 136 self.slow_tests.add(test_result.test_name) |
134 | 137 |
135 | 138 |
136 class RunDetails(object): | 139 class RunDetails(object): |
137 def __init__(self, exit_code, summarized_full_results=None, summarized_faili
ng_results=None, initial_results=None, retry_results=None, enabled_pixel_tests_i
n_retry=False): | 140 |
| 141 def __init__(self, exit_code, summarized_full_results=None, summarized_faili
ng_results=None, |
| 142 initial_results=None, retry_results=None, enabled_pixel_tests_i
n_retry=False): |
138 self.exit_code = exit_code | 143 self.exit_code = exit_code |
139 self.summarized_full_results = summarized_full_results | 144 self.summarized_full_results = summarized_full_results |
140 self.summarized_failing_results = summarized_failing_results | 145 self.summarized_failing_results = summarized_failing_results |
141 self.initial_results = initial_results | 146 self.initial_results = initial_results |
142 self.retry_results = retry_results | 147 self.retry_results = retry_results |
143 self.enabled_pixel_tests_in_retry = enabled_pixel_tests_in_retry | 148 self.enabled_pixel_tests_in_retry = enabled_pixel_tests_in_retry |
144 | 149 |
145 | 150 |
146 def _interpret_test_failures(failures): | 151 def _interpret_test_failures(failures): |
147 test_dict = {} | 152 test_dict = {} |
148 failure_types = [type(failure) for failure in failures] | 153 failure_types = [type(failure) for failure in failures] |
149 # FIXME: get rid of all this is_* values once there is a 1:1 map between | 154 # FIXME: get rid of all this is_* values once there is a 1:1 map between |
150 # TestFailure type and test_expectations.EXPECTATION. | 155 # TestFailure type and test_expectations.EXPECTATION. |
151 if test_failures.FailureMissingAudio in failure_types: | 156 if test_failures.FailureMissingAudio in failure_types: |
152 test_dict['is_missing_audio'] = True | 157 test_dict['is_missing_audio'] = True |
153 | 158 |
154 if test_failures.FailureMissingResult in failure_types: | 159 if test_failures.FailureMissingResult in failure_types: |
155 test_dict['is_missing_text'] = True | 160 test_dict['is_missing_text'] = True |
156 | 161 |
157 if test_failures.FailureMissingImage in failure_types or test_failures.Failu
reMissingImageHash in failure_types: | 162 if test_failures.FailureMissingImage in failure_types or test_failures.Failu
reMissingImageHash in failure_types: |
158 test_dict['is_missing_image'] = True | 163 test_dict['is_missing_image'] = True |
159 | 164 |
160 if test_failures.FailureTestHarnessAssertion in failure_types: | 165 if test_failures.FailureTestHarnessAssertion in failure_types: |
161 test_dict['is_testharness_test'] = True | 166 test_dict['is_testharness_test'] = True |
162 | 167 |
163 return test_dict | 168 return test_dict |
164 | 169 |
165 | 170 |
166 def summarize_results(port_obj, expectations, initial_results, retry_results, en
abled_pixel_tests_in_retry, only_include_failing=False): | 171 def summarize_results(port_obj, expectations, initial_results, retry_results, |
| 172 enabled_pixel_tests_in_retry, only_include_failing=False): |
167 """Returns a dictionary containing a summary of the test runs, with the foll
owing fields: | 173 """Returns a dictionary containing a summary of the test runs, with the foll
owing fields: |
168 'version': a version indicator | 174 'version': a version indicator |
169 'fixable': The number of fixable tests (NOW - PASS) | 175 'fixable': The number of fixable tests (NOW - PASS) |
170 'skipped': The number of skipped tests (NOW & SKIPPED) | 176 'skipped': The number of skipped tests (NOW & SKIPPED) |
171 'num_regressions': The number of non-flaky failures | 177 'num_regressions': The number of non-flaky failures |
172 'num_flaky': The number of flaky failures | 178 'num_flaky': The number of flaky failures |
173 'num_passes': The number of unexpected passes | 179 'num_passes': The number of unexpected passes |
174 'tests': a dict of tests -> {'expected': '...', 'actual': '...'} | 180 'tests': a dict of tests -> {'expected': '...', 'actual': '...'} |
175 """ | 181 """ |
176 results = {} | 182 results = {} |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
209 continue | 215 continue |
210 | 216 |
211 if result_type == test_expectations.PASS: | 217 if result_type == test_expectations.PASS: |
212 num_passes += 1 | 218 num_passes += 1 |
213 if not result.has_stderr and only_include_failing: | 219 if not result.has_stderr and only_include_failing: |
214 continue | 220 continue |
215 elif result_type != test_expectations.SKIP and test_name in initial_resu
lts.unexpected_results_by_name: | 221 elif result_type != test_expectations.SKIP and test_name in initial_resu
lts.unexpected_results_by_name: |
216 if retry_results: | 222 if retry_results: |
217 if test_name not in retry_results.unexpected_results_by_name: | 223 if test_name not in retry_results.unexpected_results_by_name: |
218 # The test failed unexpectedly at first, but ran as expected
the second time -> flaky. | 224 # The test failed unexpectedly at first, but ran as expected
the second time -> flaky. |
219 actual.extend(expectations.get_expectations_string(test_name
).split(" ")) | 225 actual.extend(expectations.get_expectations_string(test_name
).split(' ')) |
220 num_flaky += 1 | 226 num_flaky += 1 |
221 else: | 227 else: |
222 retry_result_type = retry_results.unexpected_results_by_name
[test_name].type | 228 retry_result_type = retry_results.unexpected_results_by_name
[test_name].type |
223 if retry_result_type == test_expectations.PASS: | 229 if retry_result_type == test_expectations.PASS: |
224 # The test failed unexpectedly at first, then passed un
expectedly -> unexpected pass. | 230 # The test failed unexpectedly at first, then passed un
expectedly -> unexpected pass. |
225 num_passes += 1 | 231 num_passes += 1 |
226 if not result.has_stderr and only_include_failing: | 232 if not result.has_stderr and only_include_failing: |
227 continue | 233 continue |
228 else: | 234 else: |
229 # The test failed unexpectedly both times -> regression. | 235 # The test failed unexpectedly both times -> regression. |
(...skipping 14 matching lines...) Expand all Loading... |
244 test_dict['has_stderr'] = True | 250 test_dict['has_stderr'] = True |
245 | 251 |
246 bugs = expectations.model().get_expectation_line(test_name).bugs | 252 bugs = expectations.model().get_expectation_line(test_name).bugs |
247 if bugs: | 253 if bugs: |
248 test_dict['bugs'] = bugs | 254 test_dict['bugs'] = bugs |
249 | 255 |
250 if result.reftest_type: | 256 if result.reftest_type: |
251 test_dict.update(reftest_type=list(result.reftest_type)) | 257 test_dict.update(reftest_type=list(result.reftest_type)) |
252 | 258 |
253 test_dict['expected'] = expected | 259 test_dict['expected'] = expected |
254 test_dict['actual'] = " ".join(actual) | 260 test_dict['actual'] = ' '.join(actual) |
255 | 261 |
256 def is_expected(actual_result): | 262 def is_expected(actual_result): |
257 return expectations.matches_an_expected_result(test_name, result_typ
e, | 263 return expectations.matches_an_expected_result(test_name, result_typ
e, |
258 port_obj.get_option('pixel_tests') or result.reftest_type, | 264 port_obj.get_option('
pixel_tests') or result.reftest_type, |
259 port_obj.get_option('enable_sanitizer')) | 265 port_obj.get_option('
enable_sanitizer')) |
260 | 266 |
261 # To avoid bloating the output results json too much, only add an entry
for whether the failure is unexpected. | 267 # To avoid bloating the output results json too much, only add an entry
for whether the failure is unexpected. |
262 if not all(is_expected(actual_result) for actual_result in actual): | 268 if not all(is_expected(actual_result) for actual_result in actual): |
263 test_dict['is_unexpected'] = True | 269 test_dict['is_unexpected'] = True |
264 | 270 |
265 test_dict.update(_interpret_test_failures(result.failures)) | 271 test_dict.update(_interpret_test_failures(result.failures)) |
266 | 272 |
267 if retry_results: | 273 if retry_results: |
268 retry_result = retry_results.unexpected_results_by_name.get(test_nam
e) | 274 retry_result = retry_results.unexpected_results_by_name.get(test_nam
e) |
269 if retry_result: | 275 if retry_result: |
(...skipping 22 matching lines...) Expand all Loading... |
292 if part not in current_map: | 298 if part not in current_map: |
293 current_map[part] = {} | 299 current_map[part] = {} |
294 current_map = current_map[part] | 300 current_map = current_map[part] |
295 | 301 |
296 results['tests'] = tests | 302 results['tests'] = tests |
297 # FIXME: Remove this. It is redundant with results['num_failures_by_type']. | 303 # FIXME: Remove this. It is redundant with results['num_failures_by_type']. |
298 results['num_passes'] = num_passes | 304 results['num_passes'] = num_passes |
299 results['num_flaky'] = num_flaky | 305 results['num_flaky'] = num_flaky |
300 # FIXME: Remove this. It is redundant with results['num_failures_by_type']. | 306 # FIXME: Remove this. It is redundant with results['num_failures_by_type']. |
301 results['num_regressions'] = num_regressions | 307 results['num_regressions'] = num_regressions |
302 results['interrupted'] = initial_results.interrupted # Does results.html ha
ve enough information to compute this itself? (by checking total number of resul
ts vs. total number of tests?) | 308 # Does results.html have enough information to compute this itself? (by |
| 309 # checking total number of results vs. total number of tests?) |
| 310 results['interrupted'] = initial_results.interrupted |
303 results['layout_tests_dir'] = port_obj.layout_tests_dir() | 311 results['layout_tests_dir'] = port_obj.layout_tests_dir() |
304 results['has_wdiff'] = port_obj.wdiff_available() | 312 results['has_wdiff'] = port_obj.wdiff_available() |
305 results['has_pretty_patch'] = port_obj.pretty_patch_available() | 313 results['has_pretty_patch'] = port_obj.pretty_patch_available() |
306 results['pixel_tests_enabled'] = port_obj.get_option('pixel_tests') | 314 results['pixel_tests_enabled'] = port_obj.get_option('pixel_tests') |
307 results['seconds_since_epoch'] = int(time.time()) | 315 results['seconds_since_epoch'] = int(time.time()) |
308 results['build_number'] = port_obj.get_option('build_number') | 316 results['build_number'] = port_obj.get_option('build_number') |
309 results['builder_name'] = port_obj.get_option('builder_name') | 317 results['builder_name'] = port_obj.get_option('builder_name') |
310 | 318 |
311 # Don't do this by default since it takes >100ms. | 319 # Don't do this by default since it takes >100ms. |
312 # It's only used for uploading data to the flakiness dashboard. | 320 # It's only used for uploading data to the flakiness dashboard. |
313 results['chromium_revision'] = '' | 321 results['chromium_revision'] = '' |
314 results['blink_revision'] = '' | 322 results['blink_revision'] = '' |
315 if port_obj.get_option('builder_name'): | 323 if port_obj.get_option('builder_name'): |
316 for (name, path) in port_obj.repository_paths(): | 324 for (name, path) in port_obj.repository_paths(): |
317 scm = port_obj.host.scm_for_path(path) | 325 scm = port_obj.host.scm_for_path(path) |
318 if scm: | 326 if scm: |
319 rev = scm.svn_revision(path) | 327 rev = scm.svn_revision(path) |
320 if rev: | 328 if rev: |
321 results[name.lower() + '_revision'] = rev | 329 results[name.lower() + '_revision'] = rev |
322 else: | 330 else: |
323 _log.warn('Failed to determine svn revision for %s, ' | 331 _log.warn('Failed to determine svn revision for %s, ' |
324 'leaving "%s_revision" key blank in full_results.json.
' | 332 'leaving "%s_revision" key blank in full_results.json.
' |
325 % (path, name)) | 333 % (path, name)) |
326 | 334 |
327 return results | 335 return results |
OLD | NEW |