| OLD | NEW |
| (Empty) |
| 1 # Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. | |
| 2 # Use of this source code is governed by a BSD-style license that can be | |
| 3 # found in the LICENSE file. | |
| 4 | |
| 5 """A helper class for reading in and dealing with tests expectations | |
| 6 for layout tests. | |
| 7 """ | |
| 8 | |
| 9 import logging | |
| 10 import os | |
| 11 import re | |
| 12 import sys | |
| 13 import time | |
| 14 import path_utils | |
| 15 | |
| 16 sys.path.append(path_utils.PathFromBase('third_party')) | |
| 17 import simplejson | |
| 18 | |
| 19 # Test expectation and modifier constants. | |
| 20 (PASS, FAIL, TEXT, IMAGE, IMAGE_PLUS_TEXT, TIMEOUT, CRASH, SKIP, WONTFIX, | |
| 21 DEFER, SLOW, REBASELINE, MISSING, FLAKY, NOW, NONE) = range(16) | |
| 22 | |
| 23 # Test expectation file update action constants | |
| 24 (NO_CHANGE, REMOVE_TEST, REMOVE_PLATFORM, ADD_PLATFORMS_EXCEPT_THIS) = range(4) | |
| 25 | |
| 26 | |
| 27 class TestExpectations: | |
| 28 TEST_LIST = "test_expectations.txt" | |
| 29 | |
| 30 def __init__(self, tests, directory, platform, is_debug_mode, is_lint_mode, | |
| 31 tests_are_present=True): | |
| 32 """Reads the test expectations files from the given directory.""" | |
| 33 path = os.path.join(directory, self.TEST_LIST) | |
| 34 self._expected_failures = TestExpectationsFile(path, tests, platform, | |
| 35 is_debug_mode, is_lint_mode, tests_are_present=tests_are_present) | |
| 36 | |
| 37 # TODO(ojan): Allow for removing skipped tests when getting the list of | |
| 38 # tests to run, but not when getting metrics. | |
| 39 # TODO(ojan): Replace the Get* calls here with the more sane API exposed | |
| 40 # by TestExpectationsFile below. Maybe merge the two classes entirely? | |
| 41 | |
| 42 def GetExpectationsJsonForAllPlatforms(self): | |
| 43 return self._expected_failures.GetExpectationsJsonForAllPlatforms() | |
| 44 | |
| 45 def GetRebaseliningFailures(self): | |
| 46 return (self._expected_failures.GetTestSet(REBASELINE, FAIL) | | |
| 47 self._expected_failures.GetTestSet(REBASELINE, IMAGE) | | |
| 48 self._expected_failures.GetTestSet(REBASELINE, TEXT) | | |
| 49 self._expected_failures.GetTestSet(REBASELINE, | |
| 50 IMAGE_PLUS_TEXT)) | |
| 51 | |
| 52 def GetOptions(self, test): | |
| 53 return self._expected_failures.GetOptions(test) | |
| 54 | |
| 55 def GetExpectations(self, test): | |
| 56 return self._expected_failures.GetExpectations(test) | |
| 57 | |
| 58 def GetExpectationsString(self, test): | |
| 59 """Returns the expectatons for the given test as an uppercase string. | |
| 60 If there are no expectations for the test, then "PASS" is returned.""" | |
| 61 expectations = self.GetExpectations(test) | |
| 62 retval = [] | |
| 63 | |
| 64 for expectation in expectations: | |
| 65 for item in TestExpectationsFile.EXPECTATIONS.items(): | |
| 66 if item[1] == expectation: | |
| 67 retval.append(item[0]) | |
| 68 break | |
| 69 | |
| 70 return " ".join(retval).upper() | |
| 71 | |
| 72 def GetTimelineForTest(self, test): | |
| 73 return self._expected_failures.GetTimelineForTest(test) | |
| 74 | |
| 75 def GetTestsWithResultType(self, result_type): | |
| 76 return self._expected_failures.GetTestsWithResultType(result_type) | |
| 77 | |
| 78 def GetTestsWithTimeline(self, timeline): | |
| 79 return self._expected_failures.GetTestsWithTimeline(timeline) | |
| 80 | |
| 81 def MatchesAnExpectedResult(self, test, result): | |
| 82 """Returns whether we got one of the expected results for this test.""" | |
| 83 return (result in self._expected_failures.GetExpectations(test) or | |
| 84 (result in (IMAGE, TEXT, IMAGE_PLUS_TEXT) and | |
| 85 FAIL in self._expected_failures.GetExpectations(test)) or | |
| 86 result == MISSING and self.IsRebaselining(test) or | |
| 87 result == SKIP and self._expected_failures.HasModifier(test, | |
| 88 SKIP)) | |
| 89 | |
| 90 def IsRebaselining(self, test): | |
| 91 return self._expected_failures.HasModifier(test, REBASELINE) | |
| 92 | |
| 93 def HasModifier(self, test, modifier): | |
| 94 return self._expected_failures.HasModifier(test, modifier) | |
| 95 | |
| 96 def RemovePlatformFromFile(self, tests, platform, backup=False): | |
| 97 return self._expected_failures.RemovePlatformFromFile(tests, | |
| 98 platform, | |
| 99 backup) | |
| 100 | |
| 101 | |
| 102 def StripComments(line): | |
| 103 """Strips comments from a line and return None if the line is empty | |
| 104 or else the contents of line with leading and trailing spaces removed | |
| 105 and all other whitespace collapsed""" | |
| 106 | |
| 107 commentIndex = line.find('//') | |
| 108 if commentIndex is -1: | |
| 109 commentIndex = len(line) | |
| 110 | |
| 111 line = re.sub(r'\s+', ' ', line[:commentIndex].strip()) | |
| 112 if line == '': | |
| 113 return None | |
| 114 else: | |
| 115 return line | |
| 116 | |
| 117 | |
| 118 class ModifiersAndExpectations: | |
| 119 """A holder for modifiers and expectations on a test that serializes to | |
| 120 JSON.""" | |
| 121 | |
| 122 def __init__(self, modifiers, expectations): | |
| 123 self.modifiers = modifiers | |
| 124 self.expectations = expectations | |
| 125 | |
| 126 | |
| 127 class ExpectationsJsonEncoder(simplejson.JSONEncoder): | |
| 128 """JSON encoder that can handle ModifiersAndExpectations objects. | |
| 129 """ | |
| 130 | |
| 131 def default(self, obj): | |
| 132 if isinstance(obj, ModifiersAndExpectations): | |
| 133 return {"modifiers": obj.modifiers, | |
| 134 "expectations": obj.expectations} | |
| 135 else: | |
| 136 return JSONEncoder.default(self, obj) | |
| 137 | |
| 138 | |
| 139 class TestExpectationsFile: | |
| 140 """Test expectation files consist of lines with specifications of what | |
| 141 to expect from layout test cases. The test cases can be directories | |
| 142 in which case the expectations apply to all test cases in that | |
| 143 directory and any subdirectory. The format of the file is along the | |
| 144 lines of: | |
| 145 | |
| 146 LayoutTests/fast/js/fixme.js = FAIL | |
| 147 LayoutTests/fast/js/flaky.js = FAIL PASS | |
| 148 LayoutTests/fast/js/crash.js = CRASH TIMEOUT FAIL PASS | |
| 149 ... | |
| 150 | |
| 151 To add other options: | |
| 152 SKIP : LayoutTests/fast/js/no-good.js = TIMEOUT PASS | |
| 153 DEBUG : LayoutTests/fast/js/no-good.js = TIMEOUT PASS | |
| 154 DEBUG SKIP : LayoutTests/fast/js/no-good.js = TIMEOUT PASS | |
| 155 LINUX DEBUG SKIP : LayoutTests/fast/js/no-good.js = TIMEOUT PASS | |
| 156 DEFER LINUX WIN : LayoutTests/fast/js/no-good.js = TIMEOUT PASS | |
| 157 | |
| 158 SKIP: Doesn't run the test. | |
| 159 SLOW: The test takes a long time to run, but does not timeout indefinitely. | |
| 160 WONTFIX: For tests that we never intend to pass on a given platform. | |
| 161 DEFER: Test does not count in our statistics for the current release. | |
| 162 DEBUG: Expectations apply only to the debug build. | |
| 163 RELEASE: Expectations apply only to release build. | |
| 164 LINUX/WIN/WIN-XP/WIN-VISTA/WIN-7/MAC: Expectations apply only to these | |
| 165 platforms. | |
| 166 | |
| 167 Notes: | |
| 168 -A test cannot be both SLOW and TIMEOUT | |
| 169 -A test cannot be both DEFER and WONTFIX | |
| 170 -A test should only be one of IMAGE, TEXT, IMAGE+TEXT, or FAIL. FAIL is | |
| 171 a migratory state that currently means either IMAGE, TEXT, or | |
| 172 IMAGE+TEXT. Once we have finished migrating the expectations, we will | |
| 173 change FAIL to have the meaning of IMAGE+TEXT and remove the IMAGE+TEXT | |
| 174 identifier. | |
| 175 -A test can be included twice, but not via the same path. | |
| 176 -If a test is included twice, then the more precise path wins. | |
| 177 -CRASH tests cannot be DEFER or WONTFIX | |
| 178 """ | |
| 179 | |
| 180 EXPECTATIONS = {'pass': PASS, | |
| 181 'fail': FAIL, | |
| 182 'text': TEXT, | |
| 183 'image': IMAGE, | |
| 184 'image+text': IMAGE_PLUS_TEXT, | |
| 185 'timeout': TIMEOUT, | |
| 186 'crash': CRASH, | |
| 187 'missing': MISSING} | |
| 188 | |
| 189 EXPECTATION_DESCRIPTIONS = {SKIP: ('skipped', 'skipped'), | |
| 190 PASS: ('pass', 'passes'), | |
| 191 FAIL: ('failure', 'failures'), | |
| 192 TEXT: ('text diff mismatch', | |
| 193 'text diff mismatch'), | |
| 194 IMAGE: ('image mismatch', 'image mismatch'), | |
| 195 IMAGE_PLUS_TEXT: ('image and text mismatch', | |
| 196 'image and text mismatch'), | |
| 197 CRASH: ('test shell crash', | |
| 198 'test shell crashes'), | |
| 199 TIMEOUT: ('test timed out', 'tests timed out'), | |
| 200 MISSING: ('no expected result found', | |
| 201 'no expected results found')} | |
| 202 | |
| 203 EXPECTATION_ORDER = (PASS, CRASH, TIMEOUT, MISSING, IMAGE_PLUS_TEXT, | |
| 204 TEXT, IMAGE, FAIL, SKIP) | |
| 205 | |
| 206 BASE_PLATFORMS = ('linux', 'mac', 'win') | |
| 207 PLATFORMS = BASE_PLATFORMS + ('win-xp', 'win-vista', 'win-7') | |
| 208 | |
| 209 BUILD_TYPES = ('debug', 'release') | |
| 210 | |
| 211 MODIFIERS = {'skip': SKIP, | |
| 212 'wontfix': WONTFIX, | |
| 213 'defer': DEFER, | |
| 214 'slow': SLOW, | |
| 215 'rebaseline': REBASELINE, | |
| 216 'none': NONE} | |
| 217 | |
| 218 TIMELINES = {'wontfix': WONTFIX, | |
| 219 'now': NOW, | |
| 220 'defer': DEFER} | |
| 221 | |
| 222 RESULT_TYPES = {'skip': SKIP, | |
| 223 'pass': PASS, | |
| 224 'fail': FAIL, | |
| 225 'flaky': FLAKY} | |
| 226 | |
| 227 def __init__(self, path, full_test_list, platform, is_debug_mode, | |
| 228 is_lint_mode, expectations_as_str=None, suppress_errors=False, | |
| 229 tests_are_present=True): | |
| 230 """ | |
| 231 path: The path to the expectation file. An error is thrown if a test is | |
| 232 listed more than once. | |
| 233 full_test_list: The list of all tests to be run pending processing of | |
| 234 the expections for those tests. | |
| 235 platform: Which platform from self.PLATFORMS to filter tests for. | |
| 236 is_debug_mode: Whether we testing a test_shell built debug mode. | |
| 237 is_lint_mode: Whether this is just linting test_expecatations.txt. | |
| 238 expectations_as_str: Contents of the expectations file. Used instead of | |
| 239 the path. This makes unittesting sane. | |
| 240 suppress_errors: Whether to suppress lint errors. | |
| 241 tests_are_present: Whether the test files are present in the local | |
| 242 filesystem. The LTTF Dashboard uses False here to avoid having to | |
| 243 keep a local copy of the tree. | |
| 244 """ | |
| 245 | |
| 246 self._path = path | |
| 247 self._expectations_as_str = expectations_as_str | |
| 248 self._is_lint_mode = is_lint_mode | |
| 249 self._tests_are_present = tests_are_present | |
| 250 self._full_test_list = full_test_list | |
| 251 self._suppress_errors = suppress_errors | |
| 252 self._errors = [] | |
| 253 self._non_fatal_errors = [] | |
| 254 self._platform = self.ToTestPlatformName(platform) | |
| 255 if self._platform is None: | |
| 256 raise Exception("Unknown platform '%s'" % (platform)) | |
| 257 self._is_debug_mode = is_debug_mode | |
| 258 | |
| 259 # Maps relative test paths as listed in the expectations file to a | |
| 260 # list of maps containing modifiers and expectations for each time | |
| 261 # the test is listed in the expectations file. | |
| 262 self._all_expectations = {} | |
| 263 | |
| 264 # Maps a test to its list of expectations. | |
| 265 self._test_to_expectations = {} | |
| 266 | |
| 267 # Maps a test to its list of options (string values) | |
| 268 self._test_to_options = {} | |
| 269 | |
| 270 # Maps a test to its list of modifiers: the constants associated with | |
| 271 # the options minus any bug or platform strings | |
| 272 self._test_to_modifiers = {} | |
| 273 | |
| 274 # Maps a test to the base path that it was listed with in the list. | |
| 275 self._test_list_paths = {} | |
| 276 | |
| 277 self._modifier_to_tests = self._DictOfSets(self.MODIFIERS) | |
| 278 self._expectation_to_tests = self._DictOfSets(self.EXPECTATIONS) | |
| 279 self._timeline_to_tests = self._DictOfSets(self.TIMELINES) | |
| 280 self._result_type_to_tests = self._DictOfSets(self.RESULT_TYPES) | |
| 281 | |
| 282 self._Read(self._GetIterableExpectations()) | |
| 283 | |
| 284 def _DictOfSets(self, strings_to_constants): | |
| 285 """Takes a dict of strings->constants and returns a dict mapping | |
| 286 each constant to an empty set.""" | |
| 287 d = {} | |
| 288 for c in strings_to_constants.values(): | |
| 289 d[c] = set() | |
| 290 return d | |
| 291 | |
| 292 def _GetIterableExpectations(self): | |
| 293 """Returns an object that can be iterated over. Allows for not caring | |
| 294 about whether we're iterating over a file or a new-line separated | |
| 295 string.""" | |
| 296 if self._expectations_as_str: | |
| 297 iterable = [x + "\n" for x in | |
| 298 self._expectations_as_str.split("\n")] | |
| 299 # Strip final entry if it's empty to avoid added in an extra | |
| 300 # newline. | |
| 301 if iterable[len(iterable) - 1] == "\n": | |
| 302 return iterable[:len(iterable) - 1] | |
| 303 return iterable | |
| 304 else: | |
| 305 return open(self._path) | |
| 306 | |
| 307 def ToTestPlatformName(self, name): | |
| 308 """Returns the test expectation platform that will be used for a | |
| 309 given platform name, or None if there is no match.""" | |
| 310 chromium_prefix = 'chromium-' | |
| 311 name = name.lower() | |
| 312 if name.startswith(chromium_prefix): | |
| 313 name = name[len(chromium_prefix):] | |
| 314 if name in self.PLATFORMS: | |
| 315 return name | |
| 316 return None | |
| 317 | |
| 318 def GetTestSet(self, modifier, expectation=None, include_skips=True): | |
| 319 if expectation is None: | |
| 320 tests = self._modifier_to_tests[modifier] | |
| 321 else: | |
| 322 tests = (self._expectation_to_tests[expectation] & | |
| 323 self._modifier_to_tests[modifier]) | |
| 324 | |
| 325 if not include_skips: | |
| 326 tests = tests - self.GetTestSet(SKIP, expectation) | |
| 327 | |
| 328 return tests | |
| 329 | |
| 330 def GetTestsWithResultType(self, result_type): | |
| 331 return self._result_type_to_tests[result_type] | |
| 332 | |
| 333 def GetTestsWithTimeline(self, timeline): | |
| 334 return self._timeline_to_tests[timeline] | |
| 335 | |
| 336 def GetOptions(self, test): | |
| 337 """This returns the entire set of options for the given test | |
| 338 (the modifiers plus the BUGXXXX identifier). This is used by the | |
| 339 LTTF dashboard.""" | |
| 340 return self._test_to_options[test] | |
| 341 | |
| 342 def HasModifier(self, test, modifier): | |
| 343 return test in self._modifier_to_tests[modifier] | |
| 344 | |
| 345 def GetExpectations(self, test): | |
| 346 return self._test_to_expectations[test] | |
| 347 | |
| 348 def GetExpectationsJsonForAllPlatforms(self): | |
| 349 # Specify separators in order to get compact encoding. | |
| 350 return ExpectationsJsonEncoder(separators=(',', ':')).encode( | |
| 351 self._all_expectations) | |
| 352 | |
| 353 def Contains(self, test): | |
| 354 return test in self._test_to_expectations | |
| 355 | |
| 356 def RemovePlatformFromFile(self, tests, platform, backup=False): | |
| 357 """Remove the platform option from test expectations file. | |
| 358 | |
| 359 If a test is in the test list and has an option that matches the given | |
| 360 platform, remove the matching platform and save the updated test back | |
| 361 to the file. If no other platforms remaining after removal, delete the | |
| 362 test from the file. | |
| 363 | |
| 364 Args: | |
| 365 tests: list of tests that need to update.. | |
| 366 platform: which platform option to remove. | |
| 367 backup: if true, the original test expectations file is saved as | |
| 368 [self.TEST_LIST].orig.YYYYMMDDHHMMSS | |
| 369 | |
| 370 Returns: | |
| 371 no | |
| 372 """ | |
| 373 | |
| 374 new_file = self._path + '.new' | |
| 375 logging.debug('Original file: "%s"', self._path) | |
| 376 logging.debug('New file: "%s"', new_file) | |
| 377 f_orig = self._GetIterableExpectations() | |
| 378 f_new = open(new_file, 'w') | |
| 379 | |
| 380 tests_removed = 0 | |
| 381 tests_updated = 0 | |
| 382 lineno = 0 | |
| 383 for line in f_orig: | |
| 384 lineno += 1 | |
| 385 action = self._GetPlatformUpdateAction(line, lineno, tests, | |
| 386 platform) | |
| 387 if action == NO_CHANGE: | |
| 388 # Save the original line back to the file | |
| 389 logging.debug('No change to test: %s', line) | |
| 390 f_new.write(line) | |
| 391 elif action == REMOVE_TEST: | |
| 392 tests_removed += 1 | |
| 393 logging.info('Test removed: %s', line) | |
| 394 elif action == REMOVE_PLATFORM: | |
| 395 parts = line.split(':') | |
| 396 new_options = parts[0].replace(platform.upper() + ' ', '', 1) | |
| 397 new_line = ('%s:%s' % (new_options, parts[1])) | |
| 398 f_new.write(new_line) | |
| 399 tests_updated += 1 | |
| 400 logging.info('Test updated: ') | |
| 401 logging.info(' old: %s', line) | |
| 402 logging.info(' new: %s', new_line) | |
| 403 elif action == ADD_PLATFORMS_EXCEPT_THIS: | |
| 404 parts = line.split(':') | |
| 405 new_options = parts[0] | |
| 406 for p in self.PLATFORMS: | |
| 407 if not p == platform: | |
| 408 new_options += p.upper() + ' ' | |
| 409 new_line = ('%s:%s' % (new_options, parts[1])) | |
| 410 f_new.write(new_line) | |
| 411 tests_updated += 1 | |
| 412 logging.info('Test updated: ') | |
| 413 logging.info(' old: %s', line) | |
| 414 logging.info(' new: %s', new_line) | |
| 415 else: | |
| 416 logging.error('Unknown update action: %d; line: %s', | |
| 417 action, line) | |
| 418 | |
| 419 logging.info('Total tests removed: %d', tests_removed) | |
| 420 logging.info('Total tests updated: %d', tests_updated) | |
| 421 | |
| 422 f_orig.close() | |
| 423 f_new.close() | |
| 424 | |
| 425 if backup: | |
| 426 date_suffix = time.strftime('%Y%m%d%H%M%S', | |
| 427 time.localtime(time.time())) | |
| 428 backup_file = ('%s.orig.%s' % (self._path, date_suffix)) | |
| 429 if os.path.exists(backup_file): | |
| 430 os.remove(backup_file) | |
| 431 logging.info('Saving original file to "%s"', backup_file) | |
| 432 os.rename(self._path, backup_file) | |
| 433 else: | |
| 434 os.remove(self._path) | |
| 435 | |
| 436 logging.debug('Saving new file to "%s"', self._path) | |
| 437 os.rename(new_file, self._path) | |
| 438 return True | |
| 439 | |
| 440 def ParseExpectationsLine(self, line, lineno): | |
| 441 """Parses a line from test_expectations.txt and returns a tuple | |
| 442 with the test path, options as a list, expectations as a list.""" | |
| 443 line = StripComments(line) | |
| 444 if not line: | |
| 445 return (None, None, None) | |
| 446 | |
| 447 options = [] | |
| 448 if line.find(":") is -1: | |
| 449 test_and_expectation = line.split("=") | |
| 450 else: | |
| 451 parts = line.split(":") | |
| 452 options = self._GetOptionsList(parts[0]) | |
| 453 test_and_expectation = parts[1].split('=') | |
| 454 | |
| 455 test = test_and_expectation[0].strip() | |
| 456 if (len(test_and_expectation) is not 2): | |
| 457 self._AddError(lineno, "Missing expectations.", | |
| 458 test_and_expectation) | |
| 459 expectations = None | |
| 460 else: | |
| 461 expectations = self._GetOptionsList(test_and_expectation[1]) | |
| 462 | |
| 463 return (test, options, expectations) | |
| 464 | |
| 465 def _GetPlatformUpdateAction(self, line, lineno, tests, platform): | |
| 466 """Check the platform option and return the action needs to be taken. | |
| 467 | |
| 468 Args: | |
| 469 line: current line in test expectations file. | |
| 470 lineno: current line number of line | |
| 471 tests: list of tests that need to update.. | |
| 472 platform: which platform option to remove. | |
| 473 | |
| 474 Returns: | |
| 475 NO_CHANGE: no change to the line (comments, test not in the list etc) | |
| 476 REMOVE_TEST: remove the test from file. | |
| 477 REMOVE_PLATFORM: remove this platform option from the test. | |
| 478 ADD_PLATFORMS_EXCEPT_THIS: add all the platforms except this one. | |
| 479 """ | |
| 480 test, options, expectations = self.ParseExpectationsLine(line, lineno) | |
| 481 if not test or test not in tests: | |
| 482 return NO_CHANGE | |
| 483 | |
| 484 has_any_platform = False | |
| 485 for option in options: | |
| 486 if option in self.PLATFORMS: | |
| 487 has_any_platform = True | |
| 488 if not option == platform: | |
| 489 return REMOVE_PLATFORM | |
| 490 | |
| 491 # If there is no platform specified, then it means apply to all | |
| 492 # platforms. Return the action to add all the platforms except this | |
| 493 # one. | |
| 494 if not has_any_platform: | |
| 495 return ADD_PLATFORMS_EXCEPT_THIS | |
| 496 | |
| 497 return REMOVE_TEST | |
| 498 | |
| 499 def _HasValidModifiersForCurrentPlatform(self, options, lineno, | |
| 500 test_and_expectations, modifiers): | |
| 501 """Returns true if the current platform is in the options list or if | |
| 502 no platforms are listed and if there are no fatal errors in the | |
| 503 options list. | |
| 504 | |
| 505 Args: | |
| 506 options: List of lowercase options. | |
| 507 lineno: The line in the file where the test is listed. | |
| 508 test_and_expectations: The path and expectations for the test. | |
| 509 modifiers: The set to populate with modifiers. | |
| 510 """ | |
| 511 has_any_platform = False | |
| 512 has_bug_id = False | |
| 513 for option in options: | |
| 514 if option in self.MODIFIERS: | |
| 515 modifiers.add(option) | |
| 516 elif option in self.PLATFORMS: | |
| 517 has_any_platform = True | |
| 518 elif option.startswith('bug'): | |
| 519 has_bug_id = True | |
| 520 elif option not in self.BUILD_TYPES: | |
| 521 self._AddError(lineno, 'Invalid modifier for test: %s' % | |
| 522 option, test_and_expectations) | |
| 523 | |
| 524 if has_any_platform and not self._MatchPlatform(options): | |
| 525 return False | |
| 526 | |
| 527 if not has_bug_id and 'wontfix' not in options: | |
| 528 # TODO(ojan): Turn this into an AddError call once all the | |
| 529 # tests have BUG identifiers. | |
| 530 self._LogNonFatalError(lineno, 'Test lacks BUG modifier.', | |
| 531 test_and_expectations) | |
| 532 | |
| 533 if 'release' in options or 'debug' in options: | |
| 534 if self._is_debug_mode and 'debug' not in options: | |
| 535 return False | |
| 536 if not self._is_debug_mode and 'release' not in options: | |
| 537 return False | |
| 538 | |
| 539 if 'wontfix' in options and 'defer' in options: | |
| 540 self._AddError(lineno, 'Test cannot be both DEFER and WONTFIX.', | |
| 541 test_and_expectations) | |
| 542 | |
| 543 if self._is_lint_mode and 'rebaseline' in options: | |
| 544 self._AddError(lineno, 'REBASELINE should only be used for running' | |
| 545 'rebaseline.py. Cannot be checked in.', test_and_expectations) | |
| 546 | |
| 547 return True | |
| 548 | |
| 549 def _MatchPlatform(self, options): | |
| 550 """Match the list of options against our specified platform. If any | |
| 551 of the options prefix-match self._platform, return True. This handles | |
| 552 the case where a test is marked WIN and the platform is WIN-VISTA. | |
| 553 | |
| 554 Args: | |
| 555 options: list of options | |
| 556 """ | |
| 557 for opt in options: | |
| 558 if self._platform.startswith(opt): | |
| 559 return True | |
| 560 return False | |
| 561 | |
| 562 def _AddToAllExpectations(self, test, options, expectations): | |
| 563 # Make all paths unix-style so the dashboard doesn't need to. | |
| 564 test = test.replace('\\', '/') | |
| 565 if not test in self._all_expectations: | |
| 566 self._all_expectations[test] = [] | |
| 567 self._all_expectations[test].append( | |
| 568 ModifiersAndExpectations(options, expectations)) | |
| 569 | |
| 570 def _Read(self, expectations): | |
| 571 """For each test in an expectations iterable, generate the | |
| 572 expectations for it.""" | |
| 573 lineno = 0 | |
| 574 for line in expectations: | |
| 575 lineno += 1 | |
| 576 | |
| 577 test_list_path, options, expectations = \ | |
| 578 self.ParseExpectationsLine(line, lineno) | |
| 579 if not expectations: | |
| 580 continue | |
| 581 | |
| 582 self._AddToAllExpectations(test_list_path, | |
| 583 " ".join(options).upper(), | |
| 584 " ".join(expectations).upper()) | |
| 585 | |
| 586 modifiers = set() | |
| 587 if options and not self._HasValidModifiersForCurrentPlatform( | |
| 588 options, lineno, test_list_path, modifiers): | |
| 589 continue | |
| 590 | |
| 591 expectations = self._ParseExpectations(expectations, lineno, | |
| 592 test_list_path) | |
| 593 | |
| 594 if 'slow' in options and TIMEOUT in expectations: | |
| 595 self._AddError(lineno, | |
| 596 'A test can not be both slow and timeout. If it times out ' | |
| 597 'indefinitely, then it should be just timeout.', | |
| 598 test_list_path) | |
| 599 | |
| 600 full_path = os.path.join(path_utils.LayoutTestsDir(), | |
| 601 test_list_path) | |
| 602 full_path = os.path.normpath(full_path) | |
| 603 # WebKit's way of skipping tests is to add a -disabled suffix. | |
| 604 # So we should consider the path existing if the path or the | |
| 605 # -disabled version exists. | |
| 606 if (self._tests_are_present and not os.path.exists(full_path) | |
| 607 and not os.path.exists(full_path + '-disabled')): | |
| 608 # Log a non fatal error here since you hit this case any | |
| 609 # time you update test_expectations.txt without syncing | |
| 610 # the LayoutTests directory | |
| 611 self._LogNonFatalError(lineno, 'Path does not exist.', | |
| 612 test_list_path) | |
| 613 continue | |
| 614 | |
| 615 if not self._full_test_list: | |
| 616 tests = [test_list_path] | |
| 617 else: | |
| 618 tests = self._ExpandTests(test_list_path) | |
| 619 | |
| 620 self._AddTests(tests, expectations, test_list_path, lineno, | |
| 621 modifiers, options) | |
| 622 | |
| 623 if not self._suppress_errors and ( | |
| 624 len(self._errors) or len(self._non_fatal_errors)): | |
| 625 if self._is_debug_mode: | |
| 626 build_type = 'DEBUG' | |
| 627 else: | |
| 628 build_type = 'RELEASE' | |
| 629 print "\nFAILURES FOR PLATFORM: %s, BUILD_TYPE: %s" \ | |
| 630 % (self._platform.upper(), build_type) | |
| 631 | |
| 632 for error in self._non_fatal_errors: | |
| 633 logging.error(error) | |
| 634 if len(self._errors): | |
| 635 raise SyntaxError('\n'.join(map(str, self._errors))) | |
| 636 | |
| 637 # Now add in the tests that weren't present in the expectations file | |
| 638 expectations = set([PASS]) | |
| 639 options = [] | |
| 640 modifiers = [] | |
| 641 if self._full_test_list: | |
| 642 for test in self._full_test_list: | |
| 643 if not test in self._test_list_paths: | |
| 644 self._AddTest(test, modifiers, expectations, options) | |
| 645 | |
| 646 def _GetOptionsList(self, listString): | |
| 647 return [part.strip().lower() for part in listString.strip().split(' ')] | |
| 648 | |
| 649 def _ParseExpectations(self, expectations, lineno, test_list_path): | |
| 650 result = set() | |
| 651 for part in expectations: | |
| 652 if not part in self.EXPECTATIONS: | |
| 653 self._AddError(lineno, 'Unsupported expectation: %s' % part, | |
| 654 test_list_path) | |
| 655 continue | |
| 656 expectation = self.EXPECTATIONS[part] | |
| 657 result.add(expectation) | |
| 658 return result | |
| 659 | |
| 660 def _ExpandTests(self, test_list_path): | |
| 661 """Convert the test specification to an absolute, normalized | |
| 662 path and make sure directories end with the OS path separator.""" | |
| 663 path = os.path.join(path_utils.LayoutTestsDir(), test_list_path) | |
| 664 path = os.path.normpath(path) | |
| 665 path = self._FixDir(path) | |
| 666 | |
| 667 result = [] | |
| 668 for test in self._full_test_list: | |
| 669 if test.startswith(path): | |
| 670 result.append(test) | |
| 671 return result | |
| 672 | |
| 673 def _FixDir(self, path): | |
| 674 """Check to see if the path points to a directory, and if so, append | |
| 675 the directory separator if necessary.""" | |
| 676 if self._tests_are_present: | |
| 677 if os.path.isdir(path): | |
| 678 path = os.path.join(path, '') | |
| 679 else: | |
| 680 # If we can't check the filesystem to see if this is a directory, | |
| 681 # we assume that files w/o an extension are directories. | |
| 682 # TODO(dpranke): What happens w/ LayoutTests/css2.1 ? | |
| 683 if os.path.splitext(path)[1] == '': | |
| 684 path = os.path.join(path, '') | |
| 685 return path | |
| 686 | |
| 687 def _AddTests(self, tests, expectations, test_list_path, lineno, modifiers, | |
| 688 options): | |
| 689 for test in tests: | |
| 690 if self._AlreadySeenTest(test, test_list_path, lineno): | |
| 691 continue | |
| 692 | |
| 693 self._ClearExpectationsForTest(test, test_list_path) | |
| 694 self._AddTest(test, modifiers, expectations, options) | |
| 695 | |
| 696 def _AddTest(self, test, modifiers, expectations, options): | |
| 697 """Sets the expected state for a given test. | |
| 698 | |
| 699 This routine assumes the test has not been added before. If it has, | |
| 700 use _ClearExpectationsForTest() to reset the state prior to | |
| 701 calling this. | |
| 702 | |
| 703 Args: | |
| 704 test: test to add | |
| 705 modifiers: sequence of modifier keywords ('wontfix', 'slow', etc.) | |
| 706 expectations: sequence of expectations (PASS, IMAGE, etc.) | |
| 707 options: sequence of keywords and bug identifiers.""" | |
| 708 self._test_to_expectations[test] = expectations | |
| 709 for expectation in expectations: | |
| 710 self._expectation_to_tests[expectation].add(test) | |
| 711 | |
| 712 self._test_to_options[test] = options | |
| 713 self._test_to_modifiers[test] = set() | |
| 714 for modifier in modifiers: | |
| 715 mod_value = self.MODIFIERS[modifier] | |
| 716 self._modifier_to_tests[mod_value].add(test) | |
| 717 self._test_to_modifiers[test].add(mod_value) | |
| 718 | |
| 719 if 'wontfix' in modifiers: | |
| 720 self._timeline_to_tests[WONTFIX].add(test) | |
| 721 elif 'defer' in modifiers: | |
| 722 self._timeline_to_tests[DEFER].add(test) | |
| 723 else: | |
| 724 self._timeline_to_tests[NOW].add(test) | |
| 725 | |
| 726 if 'skip' in modifiers: | |
| 727 self._result_type_to_tests[SKIP].add(test) | |
| 728 elif expectations == set([PASS]): | |
| 729 self._result_type_to_tests[PASS].add(test) | |
| 730 elif len(expectations) > 1: | |
| 731 self._result_type_to_tests[FLAKY].add(test) | |
| 732 else: | |
| 733 self._result_type_to_tests[FAIL].add(test) | |
| 734 | |
| 735 def _ClearExpectationsForTest(self, test, test_list_path): | |
| 736 """Remove prexisting expectations for this test. | |
| 737 This happens if we are seeing a more precise path | |
| 738 than a previous listing. | |
| 739 """ | |
| 740 if test in self._test_list_paths: | |
| 741 self._test_to_expectations.pop(test, '') | |
| 742 self._RemoveFromSets(test, self._expectation_to_tests) | |
| 743 self._RemoveFromSets(test, self._modifier_to_tests) | |
| 744 self._RemoveFromSets(test, self._timeline_to_tests) | |
| 745 self._RemoveFromSets(test, self._result_type_to_tests) | |
| 746 | |
| 747 self._test_list_paths[test] = os.path.normpath(test_list_path) | |
| 748 | |
| 749 def _RemoveFromSets(self, test, dict): | |
| 750 """Removes the given test from the sets in the dictionary. | |
| 751 | |
| 752 Args: | |
| 753 test: test to look for | |
| 754 dict: dict of sets of files""" | |
| 755 for set_of_tests in dict.itervalues(): | |
| 756 if test in set_of_tests: | |
| 757 set_of_tests.remove(test) | |
| 758 | |
| 759 def _AlreadySeenTest(self, test, test_list_path, lineno): | |
| 760 """Returns true if we've already seen a more precise path for this test | |
| 761 than the test_list_path. | |
| 762 """ | |
| 763 if not test in self._test_list_paths: | |
| 764 return False | |
| 765 | |
| 766 prev_base_path = self._test_list_paths[test] | |
| 767 if (prev_base_path == os.path.normpath(test_list_path)): | |
| 768 self._AddError(lineno, 'Duplicate expectations.', test) | |
| 769 return True | |
| 770 | |
| 771 # Check if we've already seen a more precise path. | |
| 772 return prev_base_path.startswith(os.path.normpath(test_list_path)) | |
| 773 | |
| 774 def _AddError(self, lineno, msg, path): | |
| 775 """Reports an error that will prevent running the tests. Does not | |
| 776 immediately raise an exception because we'd like to aggregate all the | |
| 777 errors so they can all be printed out.""" | |
| 778 self._errors.append('\nLine:%s %s %s' % (lineno, msg, path)) | |
| 779 | |
| 780 def _LogNonFatalError(self, lineno, msg, path): | |
| 781 """Reports an error that will not prevent running the tests. These are | |
| 782 still errors, but not bad enough to warrant breaking test running.""" | |
| 783 self._non_fatal_errors.append('Line:%s %s %s' % (lineno, msg, path)) | |
| OLD | NEW |