OLD | NEW |
| (Empty) |
1 #!/usr/bin/env python | |
2 # Copyright (c) 2009 The Chromium Authors. All rights reserved. | |
3 # Use of this source code is governed by a BSD-style license that can be | |
4 # found in the LICENSE file. | |
5 | |
6 """Script to read in updates in JSON form from the layout test dashboard | |
7 and apply them to test_expectations.txt. | |
8 | |
9 Usage: | |
10 1. Go to http://src.chromium.org/viewvc/chrome/trunk/src/webkit/tools/ | |
11 layout_tests/flakiness_dashboard.html#expectationsUpdate=true | |
12 2. Copy-paste that JSON into a local file. | |
13 3. python update_expectations_from_dashboard.py path/to/local/file | |
14 """ | |
15 | |
16 import logging | |
17 import os | |
18 import sys | |
19 | |
20 from layout_package import path_utils | |
21 from layout_package import test_expectations | |
22 | |
23 sys.path.append(path_utils.PathFromBase('third_party')) | |
24 import simplejson | |
25 | |
26 | |
27 def UpdateExpectations(expectations, updates): | |
28 expectations = ExpectationsUpdater(None, None, | |
29 'WIN', False, False, expectations, True) | |
30 return expectations.UpdateBasedOnJSON(updates) | |
31 | |
32 | |
33 class OptionsAndExpectationsHolder(object): | |
34 """Container for a list of options and a list of expectations for a given | |
35 test.""" | |
36 | |
37 def __init__(self, options, expectations): | |
38 self.options = options | |
39 self.expectations = expectations | |
40 | |
41 | |
42 class BuildInfo(OptionsAndExpectationsHolder): | |
43 """Container for a list of options and expectations for a given test as | |
44 well as a map from build_type (e.g. debug/release) to a list of platforms | |
45 (e.g. ["win", "linux"]). | |
46 """ | |
47 | |
48 def __init__(self, options, expectations, build_info): | |
49 OptionsAndExpectationsHolder.__init__(self, options, expectations) | |
50 self.build_info = build_info | |
51 | |
52 | |
53 class ExpectationsUpdater(test_expectations.TestExpectationsFile): | |
54 """Class to update test_expectations.txt based on updates in the following | |
55 form: | |
56 {"test1.html": { | |
57 "WIN RELEASE": {"missing": "FAIL TIMEOUT", "extra": "CRASH"}} | |
58 "WIN DEBUG": {"missing": "FAIL TIMEOUT"}} | |
59 "test2.html": ... | |
60 } | |
61 """ | |
62 | |
63 def _GetBuildTypesAndPlatforms(self, options): | |
64 """Splits up the options list into three lists: platforms, | |
65 build_types and other_options.""" | |
66 platforms = [] | |
67 build_types = [] | |
68 other_options = [] | |
69 for option in options: | |
70 if option in self.PLATFORMS: | |
71 platforms.append(option) | |
72 elif option in self.BUILD_TYPES: | |
73 build_types.append(option) | |
74 else: | |
75 other_options.append(option) | |
76 | |
77 if not len(build_types): | |
78 build_types = self.BUILD_TYPES | |
79 | |
80 if not len(platforms): | |
81 # If there are no platforms specified, use the most generic version | |
82 # of each platform name so we don't have to dedup them later. | |
83 platforms = self.BASE_PLATFORMS | |
84 | |
85 return (platforms, build_types, other_options) | |
86 | |
87 def _ApplyUpdatesToResults(self, test, results, update_json, expectations, | |
88 other_options): | |
89 """Applies the updates from the JSON to the existing results in | |
90 test_expectations. | |
91 Args: | |
92 test: The test to update. | |
93 results: The results object to update. | |
94 update_json: The parsed JSON object with the updates. | |
95 expectations: The existing expectatons for this test. | |
96 other_options: The existing modifiers for this test | |
97 excluding platforms and build_types. | |
98 """ | |
99 updates = update_json[test] | |
100 for build_info in updates: | |
101 platform, build_type = build_info.lower().split(' ') | |
102 | |
103 # If the platform/build_type is not currently listed for the test, | |
104 # skip it as this platform/build_type may be listed in another | |
105 # line. | |
106 if platform not in results or build_type not in results[platform]: | |
107 continue | |
108 | |
109 these_results = results[platform][build_type] | |
110 these_updates = updates[build_info] | |
111 these_expectations = these_results.expectations | |
112 these_options = these_results.options | |
113 | |
114 self._ApplyExtraUpdates(these_updates, these_options, | |
115 these_expectations) | |
116 self._ApplyMissingUpdates(test, these_updates, these_options, | |
117 these_expectations) | |
118 | |
119 def _ApplyExtraUpdates(self, updates, options, expectations): | |
120 """Remove extraneous expectations/options in the updates object to | |
121 the given options/expectations lists. | |
122 """ | |
123 if "extra" not in updates: | |
124 return | |
125 | |
126 items = updates["extra"].lower().split(' ') | |
127 for item in items: | |
128 if item in self.EXPECTATIONS: | |
129 if item in expectations: | |
130 expectations.remove(item) | |
131 else: | |
132 if item in options: | |
133 options.remove(item) | |
134 | |
135 def _ApplyMissingUpdates(self, test, updates, options, expectations): | |
136 """Apply an addition expectations/options in the updates object to | |
137 the given options/expectations lists. | |
138 """ | |
139 if "missing" not in updates: | |
140 return | |
141 | |
142 items = updates["missing"].lower().split(' ') | |
143 for item in items: | |
144 if item == 'other': | |
145 continue | |
146 | |
147 # Don't add TIMEOUT to SLOW tests. Automating that is too | |
148 # complicated instead, print out tests that need manual attention. | |
149 if ((item == "timeout" and | |
150 ("slow" in options or "slow" in items)) or | |
151 (item == "slow" and | |
152 ("timeout" in expectations or "timeout" in items))): | |
153 logging.info("NEEDS MANUAL ATTENTION: %s may need " | |
154 "to be marked TIMEOUT or SLOW." % test) | |
155 elif item in self.EXPECTATIONS: | |
156 if item not in expectations: | |
157 expectations.append(item) | |
158 if ("fail" in expectations and | |
159 (item == "image+text" or item == "image" or | |
160 item == "text")): | |
161 expectations.remove("fail") | |
162 else: | |
163 if item not in options: | |
164 options.append(item) | |
165 | |
166 def _AppendPlatform(self, item, build_type, platform): | |
167 """Appends the give build_type and platform to the BuildInfo item. | |
168 """ | |
169 build_info = item.build_info | |
170 if build_type not in build_info: | |
171 build_info[build_type] = [] | |
172 build_info[build_type].append(platform) | |
173 | |
174 def _GetUpdatesDedupedByMatchingOptionsAndExpectations(self, results): | |
175 """Converts the results, which is | |
176 results[platforms][build_type] = OptionsAndExpectationsHolder | |
177 to BuildInfo objects, which dedupes platform/build_types that | |
178 have the same expectations and options. | |
179 """ | |
180 updates = [] | |
181 for platform in results: | |
182 for build_type in results[platform]: | |
183 options = results[platform][build_type].options | |
184 expectations = results[platform][build_type].expectations | |
185 | |
186 found_match = False | |
187 for update in updates: | |
188 if (update.options == options and | |
189 update.expectations == expectations): | |
190 self._AppendPlatform(update, build_type, platform) | |
191 found_match = True | |
192 break | |
193 | |
194 if found_match: | |
195 continue | |
196 | |
197 update = BuildInfo(options, expectations, {}) | |
198 self._AppendPlatform(update, build_type, platform) | |
199 updates.append(update) | |
200 | |
201 return self._RoundUpFlakyUpdates(updates) | |
202 | |
203 def _HasMajorityBuildConfigurations(self, candidate, candidate2): | |
204 """Returns true if the candidate BuildInfo represents all build | |
205 configurations except the single one listed in candidate2. | |
206 For example, if a test is FAIL TIMEOUT on all bots except WIN-Release, | |
207 where it is just FAIL. Or if a test is FAIL TIMEOUT on MAC-Release, | |
208 Mac-Debug and Linux-Release, but only FAIL on Linux-Debug. | |
209 """ | |
210 build_types = self.BUILD_TYPES[:] | |
211 build_info = candidate.build_info | |
212 if "release" not in build_info or "debug" not in build_info: | |
213 return None | |
214 | |
215 release_set = set(build_info["release"]) | |
216 debug_set = set(build_info["debug"]) | |
217 if len(release_set - debug_set) is 1: | |
218 full_set = release_set | |
219 partial_set = debug_set | |
220 needed_build_type = "debug" | |
221 elif len(debug_set - release_set) is 1: | |
222 full_set = debug_set | |
223 partial_set = release_set | |
224 needed_build_type = "release" | |
225 else: | |
226 return None | |
227 | |
228 build_info2 = candidate2.build_info | |
229 if needed_build_type not in build_info2: | |
230 return None | |
231 | |
232 build_type = None | |
233 for this_build_type in build_info2: | |
234 # Can only work if this candidate has one build_type. | |
235 if build_type: | |
236 return None | |
237 build_type = this_build_type | |
238 | |
239 if set(build_info2[needed_build_type]) == full_set - partial_set: | |
240 return full_set | |
241 else: | |
242 return None | |
243 | |
244 def _RoundUpFlakyUpdates(self, updates): | |
245 """Consolidates the updates into one update if 5/6 results are | |
246 flaky and the is a subset of the flaky results 6th just not | |
247 happening to flake or 3/4 results are flaky and the 4th has a | |
248 subset of the flaky results. | |
249 """ | |
250 if len(updates) is not 2: | |
251 return updates | |
252 | |
253 item1, item2 = updates | |
254 candidate = None | |
255 candidate_platforms = self._HasMajorityBuildConfigurations(item1, | |
256 item2) | |
257 if candidate_platforms: | |
258 candidate = item1 | |
259 else: | |
260 candidate_platforms = self._HasMajorityBuildConfigurations(item1, | |
261 item2) | |
262 if candidate_platforms: | |
263 candidate = item2 | |
264 | |
265 if candidate: | |
266 options1 = set(item1.options) | |
267 options2 = set(item2.options) | |
268 expectations1 = set(item1.expectations) | |
269 if not len(expectations1): | |
270 expectations1.add("pass") | |
271 expectations2 = set(item2.expectations) | |
272 if not len(expectations2): | |
273 expectations2.add("pass") | |
274 | |
275 options_union = options1 | options2 | |
276 expectations_union = expectations1 | expectations2 | |
277 # If the options and expectations are equal to their respective | |
278 # unions then we can round up to include the 6th platform. | |
279 if (candidate == item1 and options1 == options_union and | |
280 expectations1 == expectations_union and len(expectations2) or | |
281 candidate == item2 and options2 == options_union and | |
282 expectations2 == expectations_union and len(expectations1)): | |
283 for build_type in self.BUILD_TYPES: | |
284 candidate.build_info[build_type] = list( | |
285 candidate_platforms) | |
286 updates = [candidate] | |
287 return updates | |
288 | |
289 def UpdateBasedOnJSON(self, update_json): | |
290 """Updates the expectations based on the update_json, which is of the | |
291 following form: | |
292 {"1.html": { | |
293 "WIN DEBUG": {"extra": "FAIL", "missing", "PASS"}, | |
294 "WIN RELEASE": {"extra": "FAIL"} | |
295 }} | |
296 """ | |
297 output = [] | |
298 | |
299 comment_lines = [] | |
300 removed_test_on_previous_line = False | |
301 lineno = 0 | |
302 for line in self._GetIterableExpectations(): | |
303 lineno += 1 | |
304 test, options, expectations = self.ParseExpectationsLine(line, | |
305 lineno) | |
306 | |
307 # If there are no updates for this test, then output the line | |
308 # unmodified. | |
309 if (test not in update_json): | |
310 if test: | |
311 self._WriteCompletedLines(output, comment_lines, line) | |
312 else: | |
313 if removed_test_on_previous_line: | |
314 removed_test_on_previous_line = False | |
315 comment_lines = [] | |
316 comment_lines.append(line) | |
317 continue | |
318 | |
319 platforms, build_types, other_options = \ | |
320 self._GetBuildTypesAndPlatforms(options) | |
321 | |
322 updates = update_json[test] | |
323 has_updates_for_this_line = False | |
324 for build_info in updates: | |
325 platform, build_type = build_info.lower().split(' ') | |
326 if platform in platforms and build_type in build_types: | |
327 has_updates_for_this_line = True | |
328 | |
329 # If the updates for this test don't apply for the platforms / | |
330 # build-types listed in this line, then output the line unmodified. | |
331 if not has_updates_for_this_line: | |
332 self._WriteCompletedLines(output, comment_lines, line) | |
333 continue | |
334 | |
335 results = {} | |
336 for platform in platforms: | |
337 results[platform] = {} | |
338 for build_type in build_types: | |
339 results[platform][build_type] = \ | |
340 OptionsAndExpectationsHolder(other_options[:], | |
341 expectations[:]) | |
342 | |
343 self._ApplyUpdatesToResults(test, results, update_json, | |
344 expectations, other_options) | |
345 | |
346 deduped_updates = \ | |
347 self._GetUpdatesDedupedByMatchingOptionsAndExpectations( | |
348 results) | |
349 removed_test_on_previous_line = not self._WriteUpdates(output, | |
350 comment_lines, test, deduped_updates) | |
351 # Append any comment/whitespace lines at the end of test_expectations. | |
352 output.extend(comment_lines) | |
353 return "".join(output) | |
354 | |
355 def _WriteUpdates(self, output, comment_lines, test, updates): | |
356 """Writes the updates to the output. | |
357 Args: | |
358 output: List to append updates to. | |
359 comment_lines: Comments that come before this test that should be | |
360 prepending iff any tests lines are written out. | |
361 test: The test being updating. | |
362 updates: List of BuildInfo instances that represent the final values | |
363 for this test line.. | |
364 """ | |
365 wrote_any_lines = False | |
366 for update in updates: | |
367 options = update.options | |
368 expectations = update.expectations | |
369 | |
370 has_meaningful_modifier = False | |
371 for option in options: | |
372 if option in self.MODIFIERS: | |
373 has_meaningful_modifier = True | |
374 break | |
375 | |
376 has_non_pass_expectation = False | |
377 for expectation in expectations: | |
378 if expectation != "pass": | |
379 has_non_pass_expectation = True | |
380 break | |
381 | |
382 # If this test is only left with platform, build_type, bug number | |
383 # and a PASS or no expectation, then we can exclude it from | |
384 # test_expectations. | |
385 if not has_meaningful_modifier and not has_non_pass_expectation: | |
386 continue | |
387 | |
388 if not has_non_pass_expectation: | |
389 expectations = ["pass"] | |
390 | |
391 missing_build_types = list(self.BUILD_TYPES) | |
392 sentinal = None | |
393 for build_type in update.build_info: | |
394 if not sentinal: | |
395 sentinal = update.build_info[build_type] | |
396 # Remove build_types where the list of platforms is equal. | |
397 if sentinal == update.build_info[build_type]: | |
398 missing_build_types.remove(build_type) | |
399 | |
400 has_all_build_types = not len(missing_build_types) | |
401 if has_all_build_types: | |
402 self._WriteLine(output, comment_lines, update, options, | |
403 build_type, expectations, test, | |
404 has_all_build_types) | |
405 wrote_any_lines = True | |
406 else: | |
407 for build_type in update.build_info: | |
408 self._WriteLine(output, comment_lines, update, options, | |
409 build_type, expectations, test, | |
410 has_all_build_types) | |
411 wrote_any_lines = True | |
412 | |
413 return wrote_any_lines | |
414 | |
415 def _WriteCompletedLines(self, output, comment_lines, test_line=None): | |
416 """Writes the comment_lines and test_line to the output and empties | |
417 out the comment_lines.""" | |
418 output.extend(comment_lines) | |
419 del comment_lines[:] | |
420 if test_line: | |
421 output.append(test_line) | |
422 | |
423 def _GetPlatform(self, platforms): | |
424 """Returns the platform to use. If all platforms are listed, then | |
425 return the empty string as that's what we want to list in | |
426 test_expectations.txt. | |
427 | |
428 Args: | |
429 platforms: List of lower-case platform names. | |
430 """ | |
431 platforms.sort() | |
432 if platforms == list(self.BASE_PLATFORMS): | |
433 return "" | |
434 else: | |
435 return " ".join(platforms) | |
436 | |
437 def _WriteLine(self, output, comment_lines, update, options, build_type, | |
438 expectations, test, exclude_build_type): | |
439 """Writes a test_expectations.txt line. | |
440 Args: | |
441 output: List to append new lines to. | |
442 comment_lines: List of lines to prepend before the new line. | |
443 update: The update object. | |
444 """ | |
445 line = options[:] | |
446 | |
447 platforms = self._GetPlatform(update.build_info[build_type]) | |
448 if platforms: | |
449 line.append(platforms) | |
450 if not exclude_build_type: | |
451 line.append(build_type) | |
452 | |
453 line = [x.upper() for x in line] | |
454 expectations = [x.upper() for x in expectations] | |
455 | |
456 line = line + [":", test, "="] + expectations | |
457 self._WriteCompletedLines(output, comment_lines, " ".join(line) + "\n") | |
458 | |
459 | |
460 def main(): | |
461 logging.basicConfig(level=logging.INFO, | |
462 format='%(message)s') | |
463 | |
464 updates = simplejson.load(open(sys.argv[1])) | |
465 | |
466 path_to_expectations = path_utils.GetAbsolutePath( | |
467 os.path.dirname(sys.argv[0])) | |
468 path_to_expectations = os.path.join(path_to_expectations, | |
469 "test_expectations.txt") | |
470 | |
471 old_expectations = open(path_to_expectations).read() | |
472 new_expectations = UpdateExpectations(old_expectations, updates) | |
473 open(path_to_expectations, 'w').write(new_expectations) | |
474 | |
475 if '__main__' == __name__: | |
476 main() | |
OLD | NEW |