Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 #!/usr/bin/python | 1 #!/usr/bin/python |
| 2 | 2 |
| 3 ''' | 3 ''' |
| 4 Copyright 2012 Google Inc. | 4 Copyright 2012 Google Inc. |
| 5 | 5 |
| 6 Use of this source code is governed by a BSD-style license that can be | 6 Use of this source code is governed by a BSD-style license that can be |
| 7 found in the LICENSE file. | 7 found in the LICENSE file. |
| 8 ''' | 8 ''' |
| 9 | 9 |
| 10 ''' | 10 ''' |
| 11 Rebaselines the given GM tests, on all bots and all configurations. | 11 Rebaselines the given GM tests, on all bots and all configurations. |
| 12 Must be run from the gm-expected directory. If run from a git or SVN | 12 Must be run from the gm-expected directory. If run from a git or SVN |
| 13 checkout, the files will be added to the staging area for commit. | 13 checkout, the files will be added to the staging area for commit. |
| 14 ''' | 14 ''' |
| 15 | 15 |
| 16 # System-level imports | 16 # System-level imports |
| 17 import argparse | 17 import argparse |
| 18 import os | 18 import os |
| 19 import re | |
| 19 import subprocess | 20 import subprocess |
| 20 import sys | 21 import sys |
| 21 import urllib2 | 22 import urllib2 |
| 22 | 23 |
| 23 # Imports from within Skia | 24 # Imports from within Skia |
| 24 # | 25 # |
| 25 # Make sure that they are in the PYTHONPATH, but add them at the *end* | 26 # We need to add the 'gm' directory, so that we can import gm_json.py within |
| 26 # so any that are already in the PYTHONPATH will be preferred. | 27 # that directory. That script allows us to parse the actual-results.json file |
| 28 # written out by the GM tool. | |
| 29 # Make sure that the 'gm' dir is in the PYTHONPATH, but add it at the *end* | |
| 30 # so any dirs that are already in the PYTHONPATH will be preferred. | |
| 31 # | |
| 32 # This assumes that the 'gm' directory has been checked out as a sibling of | |
| 33 # the 'tools' directory containing this script, which will be the case if | |
| 34 # 'trunk' was checked out as a single unit. | |
| 27 GM_DIRECTORY = os.path.realpath( | 35 GM_DIRECTORY = os.path.realpath( |
| 28 os.path.join(os.path.dirname(os.path.dirname(__file__)), 'gm')) | 36 os.path.join(os.path.dirname(os.path.dirname(__file__)), 'gm')) |
| 29 if GM_DIRECTORY not in sys.path: | 37 if GM_DIRECTORY not in sys.path: |
| 30 sys.path.append(GM_DIRECTORY) | 38 sys.path.append(GM_DIRECTORY) |
| 31 import gm_json | 39 import gm_json |
| 32 | 40 |
| 33 | 41 |
| 34 # Mapping of gm-expectations subdir (under | 42 # Mapping of gm-expectations subdir (under |
| 35 # https://skia.googlecode.com/svn/gm-expected/ ) | 43 # https://skia.googlecode.com/svn/gm-expected/ ) |
| 36 # to builder name (see list at http://108.170.217.252:10117/builders ) | 44 # to builder name (see list at http://108.170.217.252:10117/builders ) |
| (...skipping 27 matching lines...) Expand all Loading... | |
| 64 pass | 72 pass |
| 65 | 73 |
| 66 class Rebaseliner(object): | 74 class Rebaseliner(object): |
| 67 | 75 |
| 68 # params: | 76 # params: |
| 69 # json_base_url: base URL from which to read json_filename | 77 # json_base_url: base URL from which to read json_filename |
| 70 # json_filename: filename (under json_base_url) from which to read a | 78 # json_filename: filename (under json_base_url) from which to read a |
| 71 # summary of results; typically "actual-results.json" | 79 # summary of results; typically "actual-results.json" |
| 72 # subdirs: which platform subdirectories to rebaseline; if not specified, | 80 # subdirs: which platform subdirectories to rebaseline; if not specified, |
| 73 # rebaseline all platform subdirectories | 81 # rebaseline all platform subdirectories |
| 74 # tests: list of tests to rebaseline, or None if we should rebaseline | 82 # tests: list of tests to rebaseline, as a filter applied to |
| 75 # whatever files the JSON results summary file tells us to | 83 # the list from the JSON file |
| 76 # configs: which configs to run for each test; this should only be | 84 # configs: which configs to run for each test, as a filter applied to |
| 77 # specified if the list of tests was also specified (otherwise, | 85 # the list from the JSON file |
| 78 # the JSON file will give us test names and configs) | 86 # add_new: if True, download actual results of tests which we don't |
| 87 # have baselines for yet, in addition to any failing tests | |
| 79 # dry_run: if True, instead of actually downloading files or adding | 88 # dry_run: if True, instead of actually downloading files or adding |
| 80 # files to checkout, display a list of operations that | 89 # files to checkout, display a list of operations that |
| 81 # we would normally perform | 90 # we would normally perform |
| 82 def __init__(self, json_base_url, json_filename, | 91 def __init__(self, json_base_url, json_filename, |
| 83 subdirs=None, tests=None, configs=None, dry_run=False): | 92 subdirs=None, tests=None, configs=None, add_new=False, |
| 84 if configs and not tests: | 93 dry_run=False): |
| 85 raise ValueError('configs should only be specified if tests ' + | |
| 86 'were specified also') | |
| 87 self._tests = tests | 94 self._tests = tests |
| 88 self._configs = configs | 95 self._configs = configs |
| 89 if not subdirs: | 96 if not subdirs: |
| 90 self._subdirs = sorted(SUBDIR_MAPPING.keys()) | 97 self._subdirs = sorted(SUBDIR_MAPPING.keys()) |
| 98 self._missing_json_is_fatal = False | |
|
epoger
2013/06/07 15:15:00
Made one more change in patchset 4:
If we are run
| |
| 91 else: | 99 else: |
| 92 self._subdirs = subdirs | 100 self._subdirs = subdirs |
| 101 self._missing_json_is_fatal = True | |
| 93 self._json_base_url = json_base_url | 102 self._json_base_url = json_base_url |
| 94 self._json_filename = json_filename | 103 self._json_filename = json_filename |
| 104 self._add_new = add_new | |
| 95 self._dry_run = dry_run | 105 self._dry_run = dry_run |
| 96 self._is_svn_checkout = ( | 106 self._is_svn_checkout = ( |
| 97 os.path.exists('.svn') or | 107 os.path.exists('.svn') or |
| 98 os.path.exists(os.path.join(os.pardir, '.svn'))) | 108 os.path.exists(os.path.join(os.pardir, '.svn'))) |
| 99 self._is_git_checkout = ( | 109 self._is_git_checkout = ( |
| 100 os.path.exists('.git') or | 110 os.path.exists('.git') or |
| 101 os.path.exists(os.path.join(os.pardir, '.git'))) | 111 os.path.exists(os.path.join(os.pardir, '.git'))) |
| 102 | 112 |
| 103 # If dry_run is False, execute subprocess.call(cmd). | 113 # If dry_run is False, execute subprocess.call(cmd). |
| 104 # If dry_run is True, print the command we would have otherwise run. | 114 # If dry_run is True, print the command we would have otherwise run. |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 137 else: | 147 else: |
| 138 return urllib2.urlopen(url).read() | 148 return urllib2.urlopen(url).read() |
| 139 | 149 |
| 140 # Returns a list of files that require rebaselining. | 150 # Returns a list of files that require rebaselining. |
| 141 # | 151 # |
| 142 # Note that this returns a list of FILES, like this: | 152 # Note that this returns a list of FILES, like this: |
| 143 # ['imageblur_565.png', 'xfermodes_pdf.png'] | 153 # ['imageblur_565.png', 'xfermodes_pdf.png'] |
| 144 # rather than a list of TESTS, like this: | 154 # rather than a list of TESTS, like this: |
| 145 # ['imageblur', 'xfermodes'] | 155 # ['imageblur', 'xfermodes'] |
| 146 # | 156 # |
| 157 # If self._add_new is True, then include tests which we don't have | |
| 158 # baselines for yet, in addition to any failing tests. | |
| 159 # | |
| 160 # If the JSON actual result summary file cannot be loaded, the behavior | |
| 161 # depends on self._missing_json_is_fatal: | |
| 162 # - if true: execution will halt with an exception | |
| 163 # - if false: we will log an error message but return an empty list so we | |
| 164 # go on to the next platform | |
| 165 # | |
| 147 # params: | 166 # params: |
| 148 # json_url: URL pointing to a JSON actual result summary file | 167 # json_url: URL pointing to a JSON actual result summary file |
| 149 # | |
| 150 # TODO(epoger): add a parameter indicating whether "no-comparison" | |
| 151 # results (those for which we don't have any expectations yet) | |
| 152 # should be rebaselined. For now, we only return failed expectations. | |
| 153 def _GetFilesToRebaseline(self, json_url): | 168 def _GetFilesToRebaseline(self, json_url): |
| 154 print ('# Getting files to rebaseline from JSON summary URL %s ...' | 169 if self._dry_run: |
| 155 % json_url) | 170 print '' |
| 156 json_contents = self._GetContentsOfUrl(json_url) | 171 print '#' |
| 172 try: | |
| 173 print ('# Getting files to rebaseline from JSON summary URL %s ...' | |
| 174 % json_url) | |
| 175 json_contents = self._GetContentsOfUrl(json_url) | |
| 176 except urllib2.HTTPError: | |
| 177 message = 'unable to load JSON summary URL %s' % json_url | |
| 178 if self._missing_json_is_fatal: | |
| 179 raise ValueError(message) | |
| 180 else: | |
| 181 print '# %s' % message | |
| 182 return [] | |
| 183 | |
| 157 json_dict = gm_json.LoadFromString(json_contents) | 184 json_dict = gm_json.LoadFromString(json_contents) |
| 158 actual_results = json_dict[gm_json.JSONKEY_ACTUALRESULTS] | 185 actual_results = json_dict[gm_json.JSONKEY_ACTUALRESULTS] |
| 159 | 186 |
| 160 files_to_rebaseline = [] | 187 files_to_rebaseline = [] |
| 161 failed_results = actual_results[gm_json.JSONKEY_ACTUALRESULTS_FAILED] | 188 failed_results = actual_results[gm_json.JSONKEY_ACTUALRESULTS_FAILED] |
| 162 if failed_results: | 189 if failed_results: |
| 163 files_to_rebaseline.extend(failed_results.keys()) | 190 files_to_rebaseline.extend(failed_results.keys()) |
| 191 if self._add_new: | |
| 192 new_results = actual_results[gm_json.JSONKEY_ACTUALRESULTS_NOCOMPARI SON] | |
| 193 if new_results: | |
| 194 files_to_rebaseline.extend(new_results.keys()) | |
| 164 | 195 |
| 165 print '# ... found files_to_rebaseline %s' % files_to_rebaseline | 196 print '# ... found files_to_rebaseline %s' % files_to_rebaseline |
| 197 if self._dry_run: | |
| 198 print '#' | |
| 166 return files_to_rebaseline | 199 return files_to_rebaseline |
| 167 | 200 |
| 168 # Rebaseline a single file. | 201 # Rebaseline a single file. |
| 169 def _RebaselineOneFile(self, expectations_subdir, builder_name, | 202 def _RebaselineOneFile(self, expectations_subdir, builder_name, |
| 170 infilename, outfilename): | 203 infilename, outfilename): |
| 204 if self._dry_run: | |
| 205 print '' | |
| 171 print '# ' + infilename | 206 print '# ' + infilename |
| 172 url = ('http://skia-autogen.googlecode.com/svn/gm-actual/' + | 207 url = ('http://skia-autogen.googlecode.com/svn/gm-actual/' + |
| 173 expectations_subdir + '/' + builder_name + '/' + | 208 expectations_subdir + '/' + builder_name + '/' + |
| 174 expectations_subdir + '/' + infilename) | 209 expectations_subdir + '/' + infilename) |
| 175 | 210 |
| 176 # Try to download this file, but if that fails, keep going... | 211 # Try to download this file. |
| 177 # | 212 # |
| 178 # This not treated as a fatal failure because not all | 213 # If the download fails, this will raise an exception and halt the |
| 179 # platforms generate all configs (e.g., Android does not | 214 # rebaseline process. Since the JSON results summary told us that |
| 180 # generate PDF). | 215 # this file needed rebaselining, we ought to be able to download it... |
| 181 # | 216 self._DownloadFile(source_url=url, dest_filename=outfilename) |
| 182 # We could tweak the list of configs within this tool to | |
| 183 # reflect which combinations the bots actually generate, and | |
| 184 # then fail if any of those expected combinations are | |
| 185 # missing... but then this tool would become useless every | |
| 186 # time someone tweaked the configs on the bots without | |
| 187 # updating this script. | |
| 188 try: | |
| 189 self._DownloadFile(source_url=url, dest_filename=outfilename) | |
| 190 except CommandFailedException: | |
| 191 print '# Couldn\'t fetch ' + url | |
| 192 return | |
| 193 | 217 |
| 194 # Add this file to version control (if it isn't already). | 218 # Add this file to version control (if it isn't already). |
| 195 if self._is_svn_checkout: | 219 if self._is_svn_checkout: |
| 196 cmd = [ 'svn', 'add', '--quiet', outfilename ] | 220 cmd = [ 'svn', 'add', '--quiet', outfilename ] |
| 197 self._Call(cmd) | 221 self._Call(cmd) |
| 198 cmd = [ 'svn', 'propset', '--quiet', 'svn:mime-type', 'image/png', | 222 cmd = [ 'svn', 'propset', '--quiet', 'svn:mime-type', 'image/png', |
| 199 outfilename ]; | 223 outfilename ]; |
| 200 self._Call(cmd) | 224 self._Call(cmd) |
| 201 elif self._is_git_checkout: | 225 elif self._is_git_checkout: |
| 202 cmd = [ 'git', 'add', outfilename ] | 226 cmd = [ 'git', 'add', outfilename ] |
| 203 self._Call(cmd) | 227 self._Call(cmd) |
| 204 | 228 |
| 205 # Rebaseline the given configs for a single test. | |
| 206 # | |
| 207 # params: | |
| 208 # expectations_subdir | |
| 209 # builder_name | |
| 210 # test: a single test to rebaseline | |
| 211 def _RebaselineOneTest(self, expectations_subdir, builder_name, test): | |
| 212 if self._configs: | |
| 213 configs = self._configs | |
| 214 else: | |
| 215 if (expectations_subdir == 'base-shuttle-win7-intel-angle'): | |
| 216 configs = [ 'angle', 'anglemsaa16' ] | |
| 217 else: | |
| 218 configs = [ '565', '8888', 'gpu', 'pdf', 'mesa', 'msaa16', | |
| 219 'msaa4' ] | |
| 220 print '# ' + expectations_subdir + ':' | |
| 221 for config in configs: | |
| 222 infilename = test + '_' + config + '.png' | |
| 223 outfilename = os.path.join(expectations_subdir, infilename); | |
| 224 self._RebaselineOneFile(expectations_subdir=expectations_subdir, | |
| 225 builder_name=builder_name, | |
| 226 infilename=infilename, | |
| 227 outfilename=outfilename) | |
| 228 | |
| 229 # Rebaseline all platforms/tests/types we specified in the constructor. | 229 # Rebaseline all platforms/tests/types we specified in the constructor. |
| 230 def RebaselineAll(self): | 230 def RebaselineAll(self): |
| 231 filename_pattern = re.compile('(\S+)_(\S+).png') | |
| 231 for subdir in self._subdirs: | 232 for subdir in self._subdirs: |
| 232 if not subdir in SUBDIR_MAPPING.keys(): | 233 if not subdir in SUBDIR_MAPPING.keys(): |
| 233 raise Exception(('unrecognized platform subdir "%s"; ' + | 234 raise Exception(('unrecognized platform subdir "%s"; ' + |
| 234 'should be one of %s') % ( | 235 'should be one of %s') % ( |
| 235 subdir, SUBDIR_MAPPING.keys())) | 236 subdir, SUBDIR_MAPPING.keys())) |
| 236 builder_name = SUBDIR_MAPPING[subdir] | 237 builder_name = SUBDIR_MAPPING[subdir] |
| 237 if self._tests: | 238 json_url = '/'.join([self._json_base_url, |
| 238 for test in self._tests: | 239 subdir, builder_name, subdir, |
| 239 self._RebaselineOneTest(expectations_subdir=subdir, | 240 self._json_filename]) |
| 240 builder_name=builder_name, | 241 filenames = self._GetFilesToRebaseline(json_url=json_url) |
| 241 test=test) | 242 for filename in filenames: |
| 242 else: # get the raw list of files that need rebaselining from JSON | 243 # Apply our filters, if we have any. |
| 243 json_url = '/'.join([self._json_base_url, | 244 match = filename_pattern.match(filename) |
| 244 subdir, builder_name, subdir, | 245 test = match.group(1) |
| 245 self._json_filename]) | 246 config = match.group(2) |
| 246 filenames = self._GetFilesToRebaseline(json_url=json_url) | 247 if self._tests and test not in self._tests: |
| 247 for filename in filenames: | 248 continue |
| 248 outfilename = os.path.join(subdir, filename); | 249 if self._configs and config not in self._configs: |
| 249 self._RebaselineOneFile(expectations_subdir=subdir, | 250 continue |
| 250 builder_name=builder_name, | 251 |
| 251 infilename=filename, | 252 outfilename = os.path.join(subdir, filename); |
| 252 outfilename=outfilename) | 253 self._RebaselineOneFile(expectations_subdir=subdir, |
| 254 builder_name=builder_name, | |
| 255 infilename=filename, | |
| 256 outfilename=outfilename) | |
| 253 | 257 |
| 254 # main... | 258 # main... |
| 255 | 259 |
| 256 parser = argparse.ArgumentParser() | 260 parser = argparse.ArgumentParser() |
| 261 parser.add_argument('--add-new', action='store_true', | |
| 262 help='in addition to the standard behavior of ' + | |
| 263 'downloading images whose tests are failing, ' + | |
| 264 'also download images for which we haven\'t checked in ' + | |
| 265 'expectations yet') | |
| 257 parser.add_argument('--configs', metavar='CONFIG', nargs='+', | 266 parser.add_argument('--configs', metavar='CONFIG', nargs='+', |
| 258 help='which configurations to rebaseline, e.g. ' + | 267 help='which configurations to rebaseline, e.g. ' + |
| 259 '"--configs 565 8888"; if unspecified, run a default ' + | 268 '"--configs 565 8888", as a filter over the configs ' + |
| 260 'set of configs. This should ONLY be specified if ' + | 269 'which JSON_FILENAME tells us need rebaselining; ' + |
| 261 '--tests has also been specified.') | 270 'if unspecified, then rebaseline all the configs that ' + |
| 262 parser.add_argument('--dry_run', action='store_true', | 271 'JSON_FILENAME tells us need rebaselining.') |
| 272 parser.add_argument('--dry-run', action='store_true', | |
| 263 help='instead of actually downloading files or adding ' + | 273 help='instead of actually downloading files or adding ' + |
| 264 'files to checkout, display a list of operations that ' + | 274 'files to checkout, display a list of operations that ' + |
| 265 'we would normally perform') | 275 'we would normally perform') |
| 266 parser.add_argument('--json_base_url', | 276 parser.add_argument('--json-base-url', |
| 267 help='base URL from which to read JSON_FILENAME ' + | 277 help='base URL from which to read JSON_FILENAME ' + |
| 268 'files; defaults to %(default)s', | 278 'files; defaults to %(default)s', |
| 269 default='http://skia-autogen.googlecode.com/svn/gm-actual') | 279 default='http://skia-autogen.googlecode.com/svn/gm-actual') |
| 270 parser.add_argument('--json_filename', | 280 parser.add_argument('--json-filename', |
| 271 help='filename (under JSON_BASE_URL) to read a summary ' + | 281 help='filename (under JSON_BASE_URL) to read a summary ' + |
| 272 'of results from; defaults to %(default)s', | 282 'of results from; defaults to %(default)s', |
| 273 default='actual-results.json') | 283 default='actual-results.json') |
| 274 parser.add_argument('--subdirs', metavar='SUBDIR', nargs='+', | 284 parser.add_argument('--subdirs', metavar='SUBDIR', nargs='+', |
| 275 help='which platform subdirectories to rebaseline; ' + | 285 help='which platform subdirectories to rebaseline; ' + |
| 276 'if unspecified, rebaseline all subdirs, same as ' + | 286 'if unspecified, rebaseline all subdirs, same as ' + |
| 277 '"--subdirs %s"' % ' '.join(sorted(SUBDIR_MAPPING.keys()))) | 287 '"--subdirs %s"' % ' '.join(sorted(SUBDIR_MAPPING.keys()))) |
| 278 parser.add_argument('--tests', metavar='TEST', nargs='+', | 288 parser.add_argument('--tests', metavar='TEST', nargs='+', |
| 279 help='which tests to rebaseline, e.g. ' + | 289 help='which tests to rebaseline, e.g. ' + |
| 280 '"--tests aaclip bigmatrix"; if unspecified, then all ' + | 290 '"--tests aaclip bigmatrix", as a filter over the tests ' + |
| 281 'failing tests (according to the actual-results.json ' + | 291 'which JSON_FILENAME tells us need rebaselining; ' + |
| 282 'file) will be rebaselined.') | 292 'if unspecified, then rebaseline all the tests that ' + |
| 293 'JSON_FILENAME tells us need rebaselining.') | |
| 283 args = parser.parse_args() | 294 args = parser.parse_args() |
| 284 rebaseliner = Rebaseliner(tests=args.tests, configs=args.configs, | 295 rebaseliner = Rebaseliner(tests=args.tests, configs=args.configs, |
| 285 subdirs=args.subdirs, dry_run=args.dry_run, | 296 subdirs=args.subdirs, add_new=args.add_new, |
| 297 dry_run=args.dry_run, | |
| 286 json_base_url=args.json_base_url, | 298 json_base_url=args.json_base_url, |
| 287 json_filename=args.json_filename) | 299 json_filename=args.json_filename) |
| 288 rebaseliner.RebaselineAll() | 300 rebaseliner.RebaselineAll() |
| OLD | NEW |