| OLD | NEW |
| (Empty) |
| 1 # Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. | |
| 2 # Use of this source code is governed by a BSD-style license that can be | |
| 3 # found in the LICENSE file. | |
| 4 | |
| 5 # TODO(gwilson): 1. Change text differs to use external utils. | |
| 6 # 2. Change text_expectations parsing to existing | |
| 7 # logic in layout_pagckage.test_expectations. | |
| 8 | |
| 9 import difflib | |
| 10 import errno | |
| 11 import os | |
| 12 import path_utils | |
| 13 import platform_utils | |
| 14 import re | |
| 15 import shutil | |
| 16 import subprocess | |
| 17 import sys | |
| 18 import urllib2 | |
| 19 import zipfile | |
| 20 | |
| 21 from failure import Failure | |
| 22 | |
| 23 WEBKIT_TRAC_HOSTNAME = "trac.webkit.org" | |
| 24 WEBKIT_LAYOUT_TEST_BASE_URL = ("http://svn.webkit.org/repository/" | |
| 25 "webkit/trunk/LayoutTests/") | |
| 26 WEBKIT_PLATFORM_BASELINE_URL = (WEBKIT_LAYOUT_TEST_BASE_URL + | |
| 27 "platform/%s/") | |
| 28 | |
| 29 BUILDBOT_BASE = "http://build.chromium.org/buildbot/" | |
| 30 WEBKIT_BUILDER_BASE = BUILDBOT_BASE + "waterfall/builders/%s" | |
| 31 FYI_BUILDER_BASE = BUILDBOT_BASE + "waterfall.fyi/builders/%s" | |
| 32 RESULTS_URL_BASE = "/builds/%s/steps/webkit_tests/logs/stdio" | |
| 33 ARCHIVE_URL_BASE = "/builds/%s/steps/archive_webkit_tests_results/logs/stdio" | |
| 34 ZIP_FILE_URL_BASE = (BUILDBOT_BASE + | |
| 35 "layout_test_results/%s/%s/layout-test-results.zip") | |
| 36 CHROMIUM_SRC_HOME = "http://src.chromium.org/viewvc/chrome/trunk/src/webkit/" | |
| 37 LAYOUT_TEST_REPO_BASE_URL = CHROMIUM_SRC_HOME + "data/layout_tests/" | |
| 38 | |
| 39 # TODO(gwilson): Put flaky test dashboard URL here when ready. | |
| 40 FLAKY_TEST_URL = "" | |
| 41 FLAKY_TEST_REGEX = "%s</a></td><td align=right>(\d+)</td>" | |
| 42 | |
| 43 TEST_EXPECTATIONS_URL = (CHROMIUM_SRC_HOME + | |
| 44 "tools/layout_tests/test_expectations.txt") | |
| 45 | |
| 46 # Failure types as found in builder stdio. | |
| 47 TEXT_DIFF_MISMATCH = "Text diff mismatch" | |
| 48 IMAGE_MISMATCH = "Image mismatch" | |
| 49 TEST_TIMED_OUT = "Test timed out" | |
| 50 TEST_SHELL_CRASHED = "Test shell crashed" | |
| 51 | |
| 52 CHROMIUM_WIN = "chromium-win" | |
| 53 CHROMIUM_WIN_XP = "chromium-win-xp" | |
| 54 CHROMIUM_WIN_VISTA = "chromium-win-vista" | |
| 55 CHROMIUM_WIN_7 = "chromium-win-7" | |
| 56 CHROMIUM_MAC = "chromium-mac" | |
| 57 CHROMIUM_LINUX = "chromium-linux" | |
| 58 PLATFORM = "platform" | |
| 59 LAYOUTTESTS = "LayoutTests" | |
| 60 | |
| 61 # These platform dirs must be in order of their precedence. | |
| 62 # TODO(gwilson): This is not the same fallback order as test_shell. This list | |
| 63 # should be reversed, and we need to add detection for the type of OS that | |
| 64 # the given builder is running. | |
| 65 WEBKIT_MAC_PLATFORM_DIRS = ["mac-leopard", "mac-snowleopard", "mac"] | |
| 66 WEBKIT_WIN_PLATFORM_DIRS = ["win", "mac"] | |
| 67 CHROMIUM_MAC_PLATFORM_DIRS = [CHROMIUM_MAC] | |
| 68 CHROMIUM_WIN_PLATFORM_DIRS = [CHROMIUM_WIN_XP, CHROMIUM_WIN_VISTA, | |
| 69 CHROMIUM_WIN_7, CHROMIUM_WIN] | |
| 70 CHROMIUM_LINUX_PLATFORM_DIRS = [CHROMIUM_LINUX, CHROMIUM_WIN] | |
| 71 | |
| 72 ARCHIVE_URL_REGEX = "last.*change: (\d+)" | |
| 73 BUILD_NAME_REGEX = "build name: ([^\s]*)" | |
| 74 CHROMIUM_FILE_AGE_REGEX = '<br />\s*Modified\s*<em>.*</em> \((.*)\) by' | |
| 75 TEST_PATH_REGEX = "[^\s]+?" | |
| 76 FAILED_REGEX = ("DEBUG (" + TEST_PATH_REGEX + ") failed:\s*" | |
| 77 "(" + TEXT_DIFF_MISMATCH + ")?\s*" | |
| 78 "(" + IMAGE_MISMATCH + ")?\s*" | |
| 79 "(" + TEST_TIMED_OUT + ")?\s*" | |
| 80 "(" + TEST_SHELL_CRASHED + ")?") | |
| 81 FAILED_UNEXPECTED_REGEX = " [^\s]+(?: = .*?)?\n" | |
| 82 LAST_BUILD_REGEX = ("<h2>Recent Builds:</h2>" | |
| 83 "[\s\S]*?<a href=\"../builders/.*?/builds/(\d+)\">") | |
| 84 # Sometimes the lines of hyphens gets interrupted with multiple processes | |
| 85 # outputting to stdio, so don't rely on them being contiguous. | |
| 86 SUMMARY_REGEX = ("\d+ tests ran as expected, " | |
| 87 "\d+ didn't:(.*?)-{78}") # -{78} --> 78 dashes in a row. | |
| 88 SUMMARY_REGRESSIONS = "Regressions:.*?\n((?: [^\s]+(?: = .*?)?\n)+)" | |
| 89 TEST_EXPECTATIONS_PLATFORM_REGEX = "((WONTFIX |BUG.* )+.* %s.* : %s = [^\n]*)" | |
| 90 TEST_EXPECTATIONS_NO_PLATFORM_REGEX = ("((WONTFIX |BUG.* )+.*" | |
| 91 "(?!WIN)(?!LINUX)(?!MAC).* :" | |
| 92 " %s = [^\n]*)") | |
| 93 | |
| 94 WEBKIT_FILE_AGE_REGEX = ('<a class="file" title="View File" href="%s">.*?</a>.' | |
| 95 '*?<td class="age" .*?>\s*' | |
| 96 '<a class="timeline" href=".*?" title=".*?">(.*?)</a>') | |
| 97 | |
| 98 LOCAL_BASELINE_REGEXES = [ | |
| 99 ".*/third_party/Webkit/LayoutTests/platform/.*?(/.*)", | |
| 100 ".*/third_party/Webkit/LayoutTests(/.*)", | |
| 101 ".*/webkit/data/layout_tests/platform/.*?/LayoutTests(/.*)", | |
| 102 ".*/webkit/data/layout_tests/platform/.*?(/.*)", | |
| 103 ".*/webkit/data/layout_tests(/.*)", | |
| 104 "(/.*)"] | |
| 105 | |
| 106 UPSTREAM_IMAGE_FILE_ENDING = "-upstream.png" | |
| 107 | |
| 108 TEST_EXPECTATIONS_WONTFIX = "WONTFIX" | |
| 109 | |
| 110 TEMP_ZIP_DIR = "temp-zip-dir" | |
| 111 | |
| 112 TARGETS = ["Release", "Debug"] | |
| 113 | |
| 114 | |
| 115 def GetURLBase(use_fyi): | |
| 116 if use_fyi: | |
| 117 return FYI_BUILDER_BASE | |
| 118 return WEBKIT_BUILDER_BASE | |
| 119 | |
| 120 | |
| 121 def GetResultsURL(build, platform, use_fyi=False): | |
| 122 return (GetURLBase(use_fyi) + RESULTS_URL_BASE) % (platform, build) | |
| 123 | |
| 124 | |
| 125 def GetArchiveURL(build, platform, use_fyi=False): | |
| 126 return (GetURLBase(use_fyi) + ARCHIVE_URL_BASE) % (platform, build) | |
| 127 | |
| 128 | |
| 129 def GetZipFileURL(build, platform): | |
| 130 return ZIP_FILE_URL_BASE % (platform, build) | |
| 131 | |
| 132 | |
| 133 def GetBuilderURL(platform, use_fyi=False): | |
| 134 return GetURLBase(use_fyi) % platform | |
| 135 | |
| 136 | |
| 137 # TODO(gwilson): Once the new flakiness dashboard is done, connect it here. | |
| 138 def GetFlakyTestURL(platform): | |
| 139 return "" | |
| 140 | |
| 141 | |
| 142 # TODO(gwilson): can we refactor these into the resourcegatherer? | |
| 143 def IsLinuxPlatform(platform): | |
| 144 return (platform and platform.find("Linux") > -1) | |
| 145 | |
| 146 | |
| 147 def IsMacPlatform(platform): | |
| 148 return (platform and platform.find("Mac") > -1) | |
| 149 | |
| 150 | |
| 151 def CreateDirectory(dir): | |
| 152 """ | |
| 153 Method that creates the directory structure given. | |
| 154 This will create directories recursively until the given dir exists. | |
| 155 """ | |
| 156 if not os.path.exists(dir): | |
| 157 os.makedirs(dir, 0777) | |
| 158 | |
| 159 | |
| 160 def ExtractFirstValue(string, regex): | |
| 161 m = re.search(regex, string) | |
| 162 if m and m.group(1): | |
| 163 return m.group(1) | |
| 164 return None | |
| 165 | |
| 166 | |
| 167 def ExtractSingleRegexAtURL(url, regex): | |
| 168 content = ScrapeURL(url) | |
| 169 m = re.search(regex, content, re.DOTALL) | |
| 170 if m and m.group(1): | |
| 171 return m.group(1) | |
| 172 return None | |
| 173 | |
| 174 | |
| 175 def ScrapeURL(url): | |
| 176 return urllib2.urlopen(urllib2.Request(url)).read() | |
| 177 | |
| 178 | |
| 179 def GetImageDiffExecutable(): | |
| 180 for target in TARGETS: | |
| 181 try: | |
| 182 return path_utils.ImageDiffPath(target) | |
| 183 except Exception, e: | |
| 184 continue | |
| 185 # This build target did not exist, try the next one. | |
| 186 raise Exception("No image diff executable could be found. You may need " | |
| 187 "to build the image diff project under at least one build " | |
| 188 "target to create image diffs.") | |
| 189 | |
| 190 | |
| 191 def GeneratePNGDiff(file1, file2, output_file): | |
| 192 _compare_available = False | |
| 193 try: | |
| 194 executable = GetImageDiffExecutable() | |
| 195 cmd = [executable, '--diff', file1, file2, output_file] | |
| 196 _compare_available = True | |
| 197 except Exception, e: | |
| 198 print "No command line to compare %s and %s : %s" % (file1, file2, e) | |
| 199 | |
| 200 result = 1 | |
| 201 if _compare_available: | |
| 202 try: | |
| 203 result = subprocess.call(cmd) | |
| 204 except OSError, e: | |
| 205 if e.errno == errno.ENOENT or e.errno == errno.EACCES: | |
| 206 _compare_available = False | |
| 207 print "No possible comparison between %s and %s." % ( | |
| 208 file1, file2) | |
| 209 else: | |
| 210 raise e | |
| 211 if not result: | |
| 212 print "The given PNG images were the same!" | |
| 213 return _compare_available | |
| 214 | |
| 215 | |
| 216 # TODO(gwilson): Change this to use the pretty print differs. | |
| 217 def GenerateTextDiff(file1, file2, output_file): | |
| 218 # Open up expected and actual text files and use difflib to compare them. | |
| 219 dataA = open(file1, 'r').read() | |
| 220 dataB = open(file2, 'r').read() | |
| 221 d = difflib.Differ() | |
| 222 diffs = list(d.compare(dataA.split("\n"), dataB.split("\n"))) | |
| 223 output = open(output_file, 'w') | |
| 224 output.write("\n".join(diffs)) | |
| 225 output.close() | |
| 226 | |
| 227 | |
| 228 class BaselineCandidate(object): | |
| 229 """Simple data object for holding the URL and local file path of a | |
| 230 possible baseline. The local file path is meant to refer to the locally- | |
| 231 cached version of the file at the URL.""" | |
| 232 | |
| 233 def __init__(self, local, url): | |
| 234 self.local_file = local | |
| 235 self.baseline_url = url | |
| 236 | |
| 237 def IsValid(self): | |
| 238 return self.local_file != None and self.baseline_url != None | |
| 239 | |
| 240 | |
| 241 class FailureFinder(object): | |
| 242 | |
| 243 def __init__(self, | |
| 244 build, | |
| 245 builder_name, | |
| 246 exclude_known_failures, | |
| 247 test_regex, | |
| 248 output_dir, | |
| 249 max_failures, | |
| 250 verbose, | |
| 251 builder_output_log_file=None, | |
| 252 archive_step_log_file=None, | |
| 253 zip_file=None, | |
| 254 test_expectations_file=None): | |
| 255 self.build = build | |
| 256 # TODO(gwilson): add full url-encoding for the platform. | |
| 257 self.SetPlatform(builder_name) | |
| 258 self.exclude_known_failures = exclude_known_failures | |
| 259 self.exclude_wontfix = True | |
| 260 self.test_regex = test_regex | |
| 261 self.output_dir = output_dir | |
| 262 self.max_failures = max_failures | |
| 263 self.verbose = verbose | |
| 264 self.fyi_builder = False | |
| 265 self._flaky_test_cache = {} | |
| 266 self._test_expectations_cache = None | |
| 267 # If true, scraping will still happen but no files will be downloaded. | |
| 268 self.dont_download = False | |
| 269 # Local caches of log files. If set, the finder will use these files | |
| 270 # rather than scraping them from the buildbot. | |
| 271 self.builder_output_log_file = builder_output_log_file | |
| 272 self.archive_step_log_file = archive_step_log_file | |
| 273 self.zip_file = zip_file | |
| 274 self.test_expectations_file = test_expectations_file | |
| 275 self.delete_zip_file = True | |
| 276 # Determines if the script should scrape the baselines from webkit.org | |
| 277 # and chromium.org, or if it should use local baselines in the current | |
| 278 # checkout. | |
| 279 self.use_local_baselines = False | |
| 280 | |
| 281 def SetPlatform(self, platform): | |
| 282 self.platform = platform.replace(" ", "%20") | |
| 283 | |
| 284 # TODO(gwilson): Change this to get the last build that finished | |
| 285 # successfully. | |
| 286 | |
| 287 def GetLastBuild(self): | |
| 288 """ | |
| 289 Returns the last build number for this platform. | |
| 290 If use_fyi is true, this only looks at the fyi builder. | |
| 291 """ | |
| 292 try: | |
| 293 return ExtractSingleRegexAtURL(GetBuilderURL(self.platform, | |
| 294 self.fyi_builder), | |
| 295 LAST_BUILD_REGEX) | |
| 296 except urllib2.HTTPError: | |
| 297 if not self.fyi_builder: | |
| 298 self.fyi_builder = True | |
| 299 return self.GetLastBuild() | |
| 300 | |
| 301 def GetFailures(self): | |
| 302 if not self.build: | |
| 303 self.build = self.GetLastBuild() | |
| 304 if self.verbose: | |
| 305 print "Using build number %s" % self.build | |
| 306 | |
| 307 if self.use_local_baselines: | |
| 308 self._BuildBaselineIndexes() | |
| 309 self.failures = self._GetFailuresFromBuilder() | |
| 310 if (self.failures and | |
| 311 (self._DownloadResultResources() or self.dont_download)): | |
| 312 return self.failures | |
| 313 return None | |
| 314 | |
| 315 def _GetFailuresFromBuilder(self): | |
| 316 """ | |
| 317 Returns a list of failures for the given build and platform by scraping | |
| 318 the buildbots and parsing their results. | |
| 319 The list returned contains Failure class objects. | |
| 320 """ | |
| 321 if self.verbose: | |
| 322 print "Fetching failures from buildbot..." | |
| 323 | |
| 324 content = self._ScrapeBuilderOutput() | |
| 325 if not content: | |
| 326 return None | |
| 327 matches = self._FindMatchesInBuilderOutput(content) | |
| 328 | |
| 329 if self.verbose: | |
| 330 print "%s failures found." % len(matches) | |
| 331 | |
| 332 failures = [] | |
| 333 matches.sort() | |
| 334 for match in matches: | |
| 335 if (len(failures) < self.max_failures and | |
| 336 (not self.test_regex or match[0].find(self.test_regex) > -1)): | |
| 337 failure = self._CreateFailureFromMatch(match) | |
| 338 if self.verbose: | |
| 339 print failure.test_path | |
| 340 failures.append(failure) | |
| 341 | |
| 342 return failures | |
| 343 | |
| 344 def _ScrapeBuilderOutput(self): | |
| 345 # If the build log file is specified, use that instead of scraping. | |
| 346 if self.builder_output_log_file: | |
| 347 log = open(self.builder_output_log_file, 'r') | |
| 348 return "".join(log.readlines()) | |
| 349 | |
| 350 # Scrape the failures from the buildbot for this revision. | |
| 351 try: | |
| 352 | |
| 353 return ScrapeURL(GetResultsURL(self.build, | |
| 354 self.platform, | |
| 355 self.fyi_builder)) | |
| 356 except: | |
| 357 # If we hit a problem, and we're not on the FYI builder, try it | |
| 358 # again on the FYI builder. | |
| 359 if not self.fyi_builder: | |
| 360 if self.verbose: | |
| 361 print ("Could not find builder on waterfall, trying fyi " | |
| 362 "waterfall...") | |
| 363 self.fyi_builder = True | |
| 364 return self._ScrapeBuilderOutput() | |
| 365 print "I could not find that builder, or build did not compile." | |
| 366 print "Check that the builder name matches exactly " | |
| 367 print "(case sensitive), and wrap quotes around builder names " | |
| 368 print "that have spaces." | |
| 369 return None | |
| 370 | |
| 371 # TODO(gwilson): The type of failure is now output in the summary, so no | |
| 372 # matching between the summary and the earlier output is necessary. | |
| 373 # Change this method and others to derive failure types from summary only. | |
| 374 | |
| 375 def _FindMatchesInBuilderOutput(self, output): | |
| 376 matches = [] | |
| 377 matches = re.findall(FAILED_REGEX, output, re.MULTILINE) | |
| 378 if self.exclude_known_failures: | |
| 379 summary = re.search(SUMMARY_REGEX, output, re.DOTALL) | |
| 380 regressions = [] | |
| 381 if summary: | |
| 382 regressions = self._FindRegressionsInSummary(summary.group(1)) | |
| 383 matches = self._MatchRegressionsToFailures(regressions, matches) | |
| 384 return matches | |
| 385 | |
| 386 def _CreateFailureFromMatch(self, match): | |
| 387 failure = Failure() | |
| 388 failure.text_diff_mismatch = match[1] != '' | |
| 389 failure.image_mismatch = match[2] != '' | |
| 390 failure.crashed = match[4] != '' | |
| 391 failure.timeout = match[3] != '' | |
| 392 failure.test_path = match[0] | |
| 393 failure.platform = self.platform | |
| 394 return failure | |
| 395 | |
| 396 def _FindRegressionsInSummary(self, summary): | |
| 397 regressions = [] | |
| 398 if not summary or not len(summary): | |
| 399 return regressions | |
| 400 matches = re.findall(SUMMARY_REGRESSIONS, summary, re.DOTALL) | |
| 401 for match in matches: | |
| 402 lines = re.findall(FAILED_UNEXPECTED_REGEX, match, re.DOTALL) | |
| 403 for line in lines: | |
| 404 clipped = line.strip() | |
| 405 if clipped.find("=") > -1: | |
| 406 clipped = clipped[:clipped.find("=") - 1] | |
| 407 regressions.append(clipped) | |
| 408 return regressions | |
| 409 | |
| 410 def _MatchRegressionsToFailures(self, regressions, failures): | |
| 411 matches = [] | |
| 412 for regression in regressions: | |
| 413 for failure in failures: | |
| 414 if failure[0].find(regression) > -1: | |
| 415 matches.append(failure) | |
| 416 break | |
| 417 return matches | |
| 418 | |
| 419 # TODO(gwilson): add support for multiple conflicting build numbers by | |
| 420 # renaming the zip file and naming the directory appropriately. | |
| 421 | |
| 422 def _DownloadResultResources(self): | |
| 423 """ | |
| 424 Finds and downloads/extracts all of the test results (pixel/text | |
| 425 output) for all of the given failures. | |
| 426 """ | |
| 427 | |
| 428 target_zip = "%s/layout-test-results-%s.zip" % (self.output_dir, | |
| 429 self.build) | |
| 430 if self.zip_file: | |
| 431 filename = self.zip_file | |
| 432 self.delete_zip_file = False | |
| 433 else: | |
| 434 revision, build_name = self._GetRevisionAndBuildFromArchiveStep() | |
| 435 zip_url = GetZipFileURL(revision, build_name) | |
| 436 if self.verbose: | |
| 437 print "Downloading zip file from %s to %s" % (zip_url, | |
| 438 target_zip) | |
| 439 filename = self._DownloadFile(zip_url, target_zip, "b") | |
| 440 if not filename: | |
| 441 if self.verbose: | |
| 442 print ("Could not download zip file from %s. " | |
| 443 "Does it exist?" % zip_url) | |
| 444 return False | |
| 445 | |
| 446 if zipfile.is_zipfile(filename): | |
| 447 zip = zipfile.ZipFile(filename) | |
| 448 if self.verbose: | |
| 449 print 'Extracting files...' | |
| 450 directory = "%s/layout-test-results-%s" % (self.output_dir, | |
| 451 self.build) | |
| 452 CreateDirectory(directory) | |
| 453 self._UnzipZipfile(zip, TEMP_ZIP_DIR) | |
| 454 | |
| 455 for failure in self.failures: | |
| 456 failure.test_expectations_line = ( | |
| 457 self._GetTestExpectationsLine(failure.test_path)) | |
| 458 if self.exclude_wontfix and failure.IsWontFix(): | |
| 459 self.failures.remove(failure) | |
| 460 continue | |
| 461 if failure.text_diff_mismatch: | |
| 462 self._PopulateTextFailure(failure, directory, zip) | |
| 463 if failure.image_mismatch: | |
| 464 self._PopulateImageFailure(failure, directory, zip) | |
| 465 if not self.use_local_baselines: | |
| 466 failure.test_age = self._GetFileAge(failure.GetTestHome()) | |
| 467 failure.flakiness = self._GetFlakiness(failure.test_path, | |
| 468 self.platform) | |
| 469 zip.close() | |
| 470 if self.verbose: | |
| 471 print "Files extracted." | |
| 472 if self.delete_zip_file: | |
| 473 if self.verbose: | |
| 474 print "Cleaning up zip file..." | |
| 475 path_utils.RemoveDirectory(TEMP_ZIP_DIR) | |
| 476 os.remove(filename) | |
| 477 return True | |
| 478 else: | |
| 479 if self.verbose: | |
| 480 print ("Downloaded file '%s' doesn't look like a zip file." | |
| 481 % filename) | |
| 482 return False | |
| 483 | |
| 484 def _UnzipZipfile(self, zip, base_dir): | |
| 485 for i, name in enumerate(zip.namelist()): | |
| 486 if not name.endswith('/'): | |
| 487 extracted_file_path = os.path.join(base_dir, name) | |
| 488 try: | |
| 489 (path, filename) = os.path.split(extracted_file_path) | |
| 490 os.makedirs(path, 0777) | |
| 491 except: | |
| 492 pass | |
| 493 outfile = open(extracted_file_path, 'wb') | |
| 494 outfile.write(zip.read(name)) | |
| 495 outfile.flush() | |
| 496 outfile.close() | |
| 497 os.chmod(extracted_file_path, 0777) | |
| 498 | |
| 499 def _GetRevisionAndBuildFromArchiveStep(self): | |
| 500 if self.archive_step_log_file: | |
| 501 log = open(self.archive_step_log_file, 'r') | |
| 502 content = "".join(log.readlines()) | |
| 503 else: | |
| 504 content = ScrapeURL(GetArchiveURL(self.build, | |
| 505 self.platform, | |
| 506 self.fyi_builder)) | |
| 507 revision = ExtractFirstValue(content, ARCHIVE_URL_REGEX) | |
| 508 build_name = ExtractFirstValue(content, BUILD_NAME_REGEX) | |
| 509 return (revision, build_name) | |
| 510 | |
| 511 def _PopulateTextFailure(self, failure, directory, zip): | |
| 512 baseline = self._GetBaseline(failure.GetExpectedTextFilename(), | |
| 513 directory) | |
| 514 failure.text_baseline_local = baseline.local_file | |
| 515 failure.text_baseline_url = baseline.baseline_url | |
| 516 failure.text_baseline_age = ( | |
| 517 self._GetFileAge(failure.GetTextBaselineTracHome())) | |
| 518 failure.text_actual_local = "%s/%s" % (directory, | |
| 519 failure.GetActualTextFilename()) | |
| 520 if (baseline and baseline.IsValid() and not self.dont_download): | |
| 521 self._CopyFileFromZipDir(failure.GetTextResultLocationInZipFile(), | |
| 522 failure.text_actual_local) | |
| 523 GenerateTextDiff(failure.text_baseline_local, | |
| 524 failure.text_actual_local, | |
| 525 directory + "/" + failure.GetTextDiffFilename()) | |
| 526 | |
| 527 def _PopulateImageFailure(self, failure, directory, zip): | |
| 528 baseline = self._GetBaseline(failure.GetExpectedImageFilename(), | |
| 529 directory) | |
| 530 failure.image_baseline_local = baseline.local_file | |
| 531 failure.image_baseline_url = baseline.baseline_url | |
| 532 if baseline and baseline.IsValid(): | |
| 533 failure.image_baseline_age = ( | |
| 534 self._GetFileAge(failure.GetImageBaselineTracHome())) | |
| 535 failure.image_actual_local = "%s/%s" % (directory, | |
| 536 failure.GetActualImageFilename()) | |
| 537 self._CopyFileFromZipDir(failure.GetImageResultLocationInZipFile(), | |
| 538 failure.image_actual_local) | |
| 539 if (not GeneratePNGDiff(failure.image_baseline_local, | |
| 540 failure.image_actual_local, | |
| 541 "%s/%s" % | |
| 542 (directory, failure.GetImageDiffFilename())) | |
| 543 and self.verbose): | |
| 544 print "Could not generate PNG diff for %s" % failure.test_path | |
| 545 if failure.IsImageBaselineInChromium() or self.use_local_baselines: | |
| 546 upstream_baseline = ( | |
| 547 self._GetUpstreamBaseline(failure.GetExpectedImageFilename(), | |
| 548 directory)) | |
| 549 failure.image_baseline_upstream_local = \ | |
| 550 upstream_baseline.local_file | |
| 551 failure.image_baseline_upstream_url = \ | |
| 552 upstream_baseline.baseline_url | |
| 553 | |
| 554 def _GetBaseline(self, filename, directory, upstream_only=False): | |
| 555 """ Search and download the baseline for the given test (put it in the | |
| 556 directory given.)""" | |
| 557 | |
| 558 local_filename = os.path.join(directory, filename) | |
| 559 local_directory = local_filename[:local_filename.rfind("/")] | |
| 560 if upstream_only: | |
| 561 last_index = local_filename.rfind(".") | |
| 562 if last_index > -1: | |
| 563 local_filename = (local_filename[:last_index] + | |
| 564 UPSTREAM_IMAGE_FILE_ENDING) | |
| 565 | |
| 566 download_file_modifiers = "" | |
| 567 if local_filename.endswith(".png"): | |
| 568 download_file_modifiers = "b" # binary file | |
| 569 | |
| 570 if not self.dont_download: | |
| 571 CreateDirectory(local_directory) | |
| 572 | |
| 573 local_baseline = None | |
| 574 url_of_baseline = None | |
| 575 | |
| 576 if self.use_local_baselines: | |
| 577 test_path_key = self._NormalizeBaselineIdentifier(filename) | |
| 578 dict = self.baseline_dict | |
| 579 if upstream_only: | |
| 580 dict = self.webkit_baseline_dict | |
| 581 if test_path_key in dict: | |
| 582 local_baseline = dict[test_path_key] | |
| 583 url_of_baseline = local_baseline | |
| 584 shutil.copy(local_baseline, local_directory) | |
| 585 elif self.verbose: | |
| 586 print ("Baseline %s does not exist in the index." % | |
| 587 test_path_key) | |
| 588 else: | |
| 589 index = 0 | |
| 590 possible_files = self._GetPossibleFileList(filename, upstream_only) | |
| 591 # Download the baselines from the webkit.org site. | |
| 592 while local_baseline == None and index < len(possible_files): | |
| 593 local_baseline = self._DownloadFile(possible_files[index], | |
| 594 local_filename, | |
| 595 download_file_modifiers, | |
| 596 True) | |
| 597 if local_baseline: | |
| 598 url_of_baseline = possible_files[index] | |
| 599 index += 1 | |
| 600 | |
| 601 if not local_baseline: | |
| 602 if self.verbose: | |
| 603 print "Could not find any baseline for %s" % filename | |
| 604 else: | |
| 605 local_baseline = os.path.normpath(local_baseline) | |
| 606 if local_baseline and self.verbose: | |
| 607 print "Found baseline: %s" % url_of_baseline | |
| 608 | |
| 609 return BaselineCandidate(local_baseline, url_of_baseline) | |
| 610 | |
| 611 def _AddBaselinePaths(self, list, base_path, directories): | |
| 612 for dir in directories: | |
| 613 list.append(os.path.join(base_path, dir)) | |
| 614 | |
| 615 # TODO(gwilson): Refactor this method to use | |
| 616 # platform_utils_*.BaselineSearchPath instead of custom logic. | |
| 617 | |
| 618 def _BuildBaselineIndexes(self): | |
| 619 """ Builds an index of all the known local baselines in both chromium | |
| 620 and webkit. Two baselines are created, a webkit-specific (no chromium | |
| 621 baseline) dictionary and an overall (both) dictionary. Each one has a | |
| 622 structure like: "/fast/dom/one-expected.txt" -> | |
| 623 "C:\\path\\to\\fast\\dom\\one-expected.txt" | |
| 624 """ | |
| 625 if self.verbose: | |
| 626 print "Building index of all local baselines..." | |
| 627 | |
| 628 self.baseline_dict = {} | |
| 629 self.webkit_baseline_dict = {} | |
| 630 | |
| 631 base = os.path.abspath(os.path.curdir) | |
| 632 webkit_base = path_utils.PathFromBase('third_party', 'Webkit', | |
| 633 'LayoutTests') | |
| 634 chromium_base = path_utils.PathFromBase('webkit', 'data', | |
| 635 'layout_tests') | |
| 636 chromium_base_platform = os.path.join(chromium_base, PLATFORM) | |
| 637 webkit_base_platform = os.path.join(webkit_base, PLATFORM) | |
| 638 | |
| 639 possible_chromium_files = [] | |
| 640 possible_webkit_files = [] | |
| 641 | |
| 642 if IsMacPlatform(self.platform): | |
| 643 self._AddBaselinePaths(possible_chromium_files, | |
| 644 chromium_base_platform, | |
| 645 CHROMIUM_MAC_PLATFORM_DIRS) | |
| 646 self._AddBaselinePaths(possible_chromium_files, | |
| 647 webkit_base_platform, | |
| 648 WEBKIT_MAC_PLATFORM_DIRS) | |
| 649 self._AddBaselinePaths(possible_webkit_files, | |
| 650 webkit_base_platform, | |
| 651 WEBKIT_MAC_PLATFORM_DIRS) | |
| 652 elif IsLinuxPlatform(self.platform): | |
| 653 self._AddBaselinePaths(possible_chromium_files, | |
| 654 chromium_base_platform, | |
| 655 CHROMIUM_LINUX_PLATFORM_DIRS) | |
| 656 else: | |
| 657 self._AddBaselinePaths(possible_chromium_files, | |
| 658 chromium_base_platform, | |
| 659 CHROMIUM_WIN_PLATFORM_DIRS) | |
| 660 | |
| 661 if not IsMacPlatform(self.platform): | |
| 662 self._AddBaselinePaths(possible_webkit_files, | |
| 663 webkit_base_platform, | |
| 664 WEBKIT_WIN_PLATFORM_DIRS) | |
| 665 | |
| 666 possible_webkit_files.append(webkit_base) | |
| 667 | |
| 668 self._PopulateBaselineDict(possible_webkit_files, | |
| 669 self.webkit_baseline_dict) | |
| 670 self._PopulateBaselineDict(possible_chromium_files, | |
| 671 self.baseline_dict) | |
| 672 for key in self.webkit_baseline_dict.keys(): | |
| 673 if not key in self.baseline_dict: | |
| 674 self.baseline_dict[key] = self.webkit_baseline_dict[key] | |
| 675 | |
| 676 return True | |
| 677 | |
| 678 def _PopulateBaselineDict(self, directories, dictionary): | |
| 679 for dir in directories: | |
| 680 os.path.walk(dir, self._VisitBaselineDir, dictionary) | |
| 681 | |
| 682 def _VisitBaselineDir(self, dict, dirname, names): | |
| 683 """ Method intended to be called by os.path.walk to build up an index | |
| 684 of where all the test baselines exist. """ | |
| 685 # Exclude .svn from the walk, since we don't care what is in these | |
| 686 # dirs. | |
| 687 if '.svn' in names: | |
| 688 names.remove('.svn') | |
| 689 for name in names: | |
| 690 if name.find("-expected.") > -1: | |
| 691 test_path_key = os.path.join(dirname, name) | |
| 692 # Fix path separators to match the separators used on | |
| 693 # the buildbots. | |
| 694 test_path_key = test_path_key.replace("\\", "/") | |
| 695 test_path_key = self._NormalizeBaselineIdentifier( | |
| 696 test_path_key) | |
| 697 if not test_path_key in dict: | |
| 698 dict[test_path_key] = os.path.join(dirname, name) | |
| 699 | |
| 700 # TODO(gwilson): Simplify identifier creation to not rely so heavily on | |
| 701 # directory and path names. | |
| 702 | |
| 703 def _NormalizeBaselineIdentifier(self, test_path): | |
| 704 """ Given either a baseline path (i.e. /LayoutTests/platform/mac/...) | |
| 705 or a test path (i.e. /LayoutTests/fast/dom/....) will normalize | |
| 706 to a unique identifier. This is basically a hashing function for | |
| 707 layout test paths.""" | |
| 708 | |
| 709 for regex in LOCAL_BASELINE_REGEXES: | |
| 710 value = ExtractFirstValue(test_path, regex) | |
| 711 if value: | |
| 712 return value | |
| 713 return test_path | |
| 714 | |
| 715 def _AddBaselineURLs(self, list, base_url, platforms): | |
| 716 # If the base URL doesn't contain any platform in its path, only add | |
| 717 # the base URL to the list. This happens with the chrome/ dir. | |
| 718 if base_url.find("%s") == -1: | |
| 719 list.append(base_url) | |
| 720 return | |
| 721 for platform in platforms: | |
| 722 list.append(base_url % platform) | |
| 723 | |
| 724 # TODO(gwilson): Refactor this method to use | |
| 725 # platform_utils_*.BaselineSearchPath instead of custom logic. This may | |
| 726 # require some kind of wrapper since this method looks for URLs instead | |
| 727 # of local paths. | |
| 728 | |
| 729 def _GetPossibleFileList(self, filename, only_webkit): | |
| 730 """ Returns a list of possible filename locations for the given file. | |
| 731 Uses the platform of the class to determine the order. | |
| 732 """ | |
| 733 | |
| 734 possible_chromium_files = [] | |
| 735 possible_webkit_files = [] | |
| 736 | |
| 737 chromium_platform_url = LAYOUT_TEST_REPO_BASE_URL | |
| 738 if not filename.startswith("chrome"): | |
| 739 chromium_platform_url += "platform/%s/" | |
| 740 chromium_platform_url += filename | |
| 741 | |
| 742 webkit_platform_url = WEBKIT_PLATFORM_BASELINE_URL + filename | |
| 743 | |
| 744 if IsMacPlatform(self.platform): | |
| 745 self._AddBaselineURLs(possible_chromium_files, | |
| 746 chromium_platform_url, | |
| 747 CHROMIUM_MAC_PLATFORM_DIRS) | |
| 748 self._AddBaselineURLs(possible_webkit_files, | |
| 749 webkit_platform_url, | |
| 750 WEBKIT_MAC_PLATFORM_DIRS) | |
| 751 elif IsLinuxPlatform(self.platform): | |
| 752 self._AddBaselineURLs(possible_chromium_files, | |
| 753 chromium_platform_url, | |
| 754 CHROMIUM_LINUX_PLATFORM_DIRS) | |
| 755 else: | |
| 756 self._AddBaselineURLs(possible_chromium_files, | |
| 757 chromium_platform_url, | |
| 758 CHROMIUM_WIN_PLATFORM_DIRS) | |
| 759 | |
| 760 if not IsMacPlatform(self.platform): | |
| 761 self._AddBaselineURLs(possible_webkit_files, | |
| 762 webkit_platform_url, | |
| 763 WEBKIT_WIN_PLATFORM_DIRS) | |
| 764 possible_webkit_files.append(WEBKIT_LAYOUT_TEST_BASE_URL + filename) | |
| 765 | |
| 766 if only_webkit: | |
| 767 return possible_webkit_files | |
| 768 return possible_chromium_files + possible_webkit_files | |
| 769 | |
| 770 # Like _GetBaseline, but only retrieves the baseline from upstream (skip | |
| 771 # looking in chromium). | |
| 772 | |
| 773 def _GetUpstreamBaseline(self, filename, directory): | |
| 774 return self._GetBaseline(filename, directory, upstream_only=True) | |
| 775 | |
| 776 def _GetFileAge(self, url): | |
| 777 # Check if the given URL is really a local file path. | |
| 778 if not url or not url.startswith("http"): | |
| 779 return None | |
| 780 try: | |
| 781 if url.find(WEBKIT_TRAC_HOSTNAME) > -1: | |
| 782 return ExtractSingleRegexAtURL(url[:url.rfind("/")], | |
| 783 WEBKIT_FILE_AGE_REGEX % | |
| 784 url[url.find("/browser"):]) | |
| 785 else: | |
| 786 return ExtractSingleRegexAtURL(url + "?view=log", | |
| 787 CHROMIUM_FILE_AGE_REGEX) | |
| 788 except: | |
| 789 if self.verbose: | |
| 790 print "Could not find age for %s. Does the file exist?" % url | |
| 791 return None | |
| 792 | |
| 793 # Returns a flakiness on a scale of 1-50. | |
| 794 # TODO(gwilson): modify this to also return which of the last 10 | |
| 795 # builds failed for this test. | |
| 796 | |
| 797 def _GetFlakiness(self, test_path, target_platform): | |
| 798 url = GetFlakyTestURL(target_platform) | |
| 799 if url == "": | |
| 800 return None | |
| 801 | |
| 802 if url in self._flaky_test_cache: | |
| 803 content = self._flaky_test_cache[url] | |
| 804 else: | |
| 805 content = urllib2.urlopen(urllib2.Request(url)).read() | |
| 806 self._flaky_test_cache[url] = content | |
| 807 | |
| 808 flakiness = ExtractFirstValue(content, FLAKY_TEST_REGEX % test_path) | |
| 809 return flakiness | |
| 810 | |
| 811 def _GetTestExpectations(self): | |
| 812 if not self._test_expectations_cache: | |
| 813 try: | |
| 814 if self.test_expectations_file: | |
| 815 log = open(self.test_expectations_file, 'r') | |
| 816 self._test_expectations_cache = "\n".join(log.readlines()) | |
| 817 else: | |
| 818 self._test_expectations_cache = ScrapeURL( | |
| 819 TEST_EXPECTATIONS_URL) | |
| 820 except HTTPError: | |
| 821 print ("Could not find test_expectations.txt at %s" % | |
| 822 TEST_EXPECTATIONS_URL) | |
| 823 | |
| 824 return self._test_expectations_cache | |
| 825 | |
| 826 def _GetTestExpectationsLine(self, test_path): | |
| 827 content = self._GetTestExpectations() | |
| 828 | |
| 829 if not content: | |
| 830 return None | |
| 831 | |
| 832 for match in content.splitlines(): | |
| 833 line = re.search(".*? : (.*?) = .*", match) | |
| 834 if line and test_path.find(line.group(1)) > -1: | |
| 835 return match | |
| 836 | |
| 837 return None | |
| 838 | |
| 839 def _CopyFileFromZipDir(self, file_in_zip, file_to_create): | |
| 840 modifiers = "" | |
| 841 if file_to_create.endswith(".png"): | |
| 842 modifiers = "b" | |
| 843 dir = os.path.join(os.path.split(file_to_create)[0:-1])[0] | |
| 844 CreateDirectory(dir) | |
| 845 file = os.path.normpath(os.path.join(TEMP_ZIP_DIR, file_in_zip)) | |
| 846 shutil.copy(file, dir) | |
| 847 | |
| 848 def _ExtractFileFromZip(self, zip, file_in_zip, file_to_create): | |
| 849 modifiers = "" | |
| 850 if file_to_create.endswith(".png"): | |
| 851 modifiers = "b" | |
| 852 try: | |
| 853 CreateDirectory(file_to_create[0:file_to_create.rfind("/")]) | |
| 854 localFile = open(file_to_create, "w%s" % modifiers) | |
| 855 localFile.write(zip.read(file_in_zip)) | |
| 856 localFile.close() | |
| 857 os.chmod(file_to_create, 0777) | |
| 858 return True | |
| 859 except KeyError: | |
| 860 print "File %s does not exist in zip file." % (file_in_zip) | |
| 861 except AttributeError: | |
| 862 print "File %s does not exist in zip file." % (file_in_zip) | |
| 863 print "Is this zip file assembled correctly?" | |
| 864 return False | |
| 865 | |
| 866 def _DownloadFile(self, url, local_filename=None, modifiers="", | |
| 867 force=False): | |
| 868 """ | |
| 869 Copy the contents of a file from a given URL | |
| 870 to a local file. | |
| 871 """ | |
| 872 try: | |
| 873 if local_filename == None: | |
| 874 local_filename = url.split('/')[-1] | |
| 875 if os.path.isfile(local_filename) and not force: | |
| 876 if self.verbose: | |
| 877 print "File at %s already exists." % local_filename | |
| 878 return local_filename | |
| 879 if self.dont_download: | |
| 880 return local_filename | |
| 881 webFile = urllib2.urlopen(url) | |
| 882 localFile = open(local_filename, ("w%s" % modifiers)) | |
| 883 localFile.write(webFile.read()) | |
| 884 webFile.close() | |
| 885 localFile.close() | |
| 886 os.chmod(local_filename, 0777) | |
| 887 except urllib2.HTTPError: | |
| 888 return None | |
| 889 except urllib2.URLError: | |
| 890 print "The url %s is malformed." % url | |
| 891 return None | |
| 892 return localFile.name | |
| OLD | NEW |