Chromium Code Reviews| Index: tools/rebaseline.py |
| =================================================================== |
| --- tools/rebaseline.py (revision 9460) |
| +++ tools/rebaseline.py (working copy) |
| @@ -16,14 +16,22 @@ |
| # System-level imports |
| import argparse |
| import os |
| +import re |
| import subprocess |
| import sys |
| import urllib2 |
| # Imports from within Skia |
| # |
| -# Make sure that they are in the PYTHONPATH, but add them at the *end* |
| -# so any that are already in the PYTHONPATH will be preferred. |
| +# We need to add the 'gm' directory, so that we can import gm_json.py within |
| +# that directory. That script allows us to parse the actual-results.json file |
| +# written out by the GM tool. |
| +# Make sure that the 'gm' dir is in the PYTHONPATH, but add it at the *end* |
| +# so any dirs that are already in the PYTHONPATH will be preferred. |
| +# |
| +# This assumes that the 'gm' directory has been checked out as a sibling of |
| +# the 'tools' directory containing this script, which will be the case if |
| +# 'trunk' was checked out as a single unit. |
| GM_DIRECTORY = os.path.realpath( |
| os.path.join(os.path.dirname(os.path.dirname(__file__)), 'gm')) |
| if GM_DIRECTORY not in sys.path: |
| @@ -71,19 +79,18 @@ |
| # summary of results; typically "actual-results.json" |
| # subdirs: which platform subdirectories to rebaseline; if not specified, |
| # rebaseline all platform subdirectories |
| - # tests: list of tests to rebaseline, or None if we should rebaseline |
| - # whatever files the JSON results summary file tells us to |
| - # configs: which configs to run for each test; this should only be |
| - # specified if the list of tests was also specified (otherwise, |
| - # the JSON file will give us test names and configs) |
| + # tests: list of tests to rebaseline, as a filter applied to |
| + # the list from the JSON file |
| + # configs: which configs to run for each test, as a filter applied to |
| + # the list from the JSON file |
| + # add_new: if True, download actual results of tests which we don't |
| + # have baselines for yet, in addition to any failing tests |
| # dry_run: if True, instead of actually downloading files or adding |
| # files to checkout, display a list of operations that |
| # we would normally perform |
| def __init__(self, json_base_url, json_filename, |
| - subdirs=None, tests=None, configs=None, dry_run=False): |
| - if configs and not tests: |
| - raise ValueError('configs should only be specified if tests ' + |
| - 'were specified also') |
| + subdirs=None, tests=None, configs=None, add_new=False, |
| + dry_run=False): |
| self._tests = tests |
| self._configs = configs |
| if not subdirs: |
| @@ -92,6 +99,7 @@ |
| self._subdirs = subdirs |
| self._json_base_url = json_base_url |
| self._json_filename = json_filename |
| + self._add_new = add_new |
| self._dry_run = dry_run |
| self._is_svn_checkout = ( |
| os.path.exists('.svn') or |
| @@ -144,13 +152,15 @@ |
| # rather than a list of TESTS, like this: |
| # ['imageblur', 'xfermodes'] |
| # |
| + # If self._add_new is True, then include tests which we don't have |
| + # baselines for yet, in addition to any failing tests. |
| + # |
| # params: |
| # json_url: URL pointing to a JSON actual result summary file |
| - # |
| - # TODO(epoger): add a parameter indicating whether "no-comparison" |
| - # results (those for which we don't have any expectations yet) |
| - # should be rebaselined. For now, we only return failed expectations. |
| def _GetFilesToRebaseline(self, json_url): |
| + if self._dry_run: |
| + print '' |
| + print '#' |
| print ('# Getting files to rebaseline from JSON summary URL %s ...' |
| % json_url) |
| json_contents = self._GetContentsOfUrl(json_url) |
| @@ -161,35 +171,32 @@ |
| failed_results = actual_results[gm_json.JSONKEY_ACTUALRESULTS_FAILED] |
| if failed_results: |
| files_to_rebaseline.extend(failed_results.keys()) |
| + if self._add_new: |
|
epoger
2013/06/06 20:19:02
Patchset 3 adds implementation of the --add-new op
|
| + new_results = actual_results[gm_json.JSONKEY_ACTUALRESULTS_NOCOMPARISON] |
| + if new_results: |
| + files_to_rebaseline.extend(new_results.keys()) |
| print '# ... found files_to_rebaseline %s' % files_to_rebaseline |
| + if self._dry_run: |
| + print '#' |
| return files_to_rebaseline |
| # Rebaseline a single file. |
| def _RebaselineOneFile(self, expectations_subdir, builder_name, |
| infilename, outfilename): |
| + if self._dry_run: |
| + print '' |
| print '# ' + infilename |
| url = ('http://skia-autogen.googlecode.com/svn/gm-actual/' + |
| expectations_subdir + '/' + builder_name + '/' + |
| expectations_subdir + '/' + infilename) |
| - # Try to download this file, but if that fails, keep going... |
| + # Try to download this file. |
| # |
| - # This not treated as a fatal failure because not all |
| - # platforms generate all configs (e.g., Android does not |
| - # generate PDF). |
| - # |
| - # We could tweak the list of configs within this tool to |
| - # reflect which combinations the bots actually generate, and |
| - # then fail if any of those expected combinations are |
| - # missing... but then this tool would become useless every |
| - # time someone tweaked the configs on the bots without |
| - # updating this script. |
| - try: |
| - self._DownloadFile(source_url=url, dest_filename=outfilename) |
| - except CommandFailedException: |
| - print '# Couldn\'t fetch ' + url |
| - return |
| + # If the download fails, this will raise an exception and halt the |
| + # rebaseline process. Since the JSON results summary told us that |
| + # this file needed rebaselining, we ought to be able to download it... |
| + self._DownloadFile(source_url=url, dest_filename=outfilename) |
| # Add this file to version control (if it isn't already). |
| if self._is_svn_checkout: |
| @@ -202,72 +209,58 @@ |
| cmd = [ 'git', 'add', outfilename ] |
| self._Call(cmd) |
| - # Rebaseline the given configs for a single test. |
| - # |
| - # params: |
| - # expectations_subdir |
| - # builder_name |
| - # test: a single test to rebaseline |
| - def _RebaselineOneTest(self, expectations_subdir, builder_name, test): |
| - if self._configs: |
| - configs = self._configs |
| - else: |
| - if (expectations_subdir == 'base-shuttle-win7-intel-angle'): |
| - configs = [ 'angle', 'anglemsaa16' ] |
| - else: |
| - configs = [ '565', '8888', 'gpu', 'pdf', 'mesa', 'msaa16', |
| - 'msaa4' ] |
| - print '# ' + expectations_subdir + ':' |
| - for config in configs: |
| - infilename = test + '_' + config + '.png' |
| - outfilename = os.path.join(expectations_subdir, infilename); |
| - self._RebaselineOneFile(expectations_subdir=expectations_subdir, |
| - builder_name=builder_name, |
| - infilename=infilename, |
| - outfilename=outfilename) |
| - |
| # Rebaseline all platforms/tests/types we specified in the constructor. |
| def RebaselineAll(self): |
| + filename_pattern = re.compile('(\S+)_(\S+).png') |
| for subdir in self._subdirs: |
| if not subdir in SUBDIR_MAPPING.keys(): |
| raise Exception(('unrecognized platform subdir "%s"; ' + |
| 'should be one of %s') % ( |
| subdir, SUBDIR_MAPPING.keys())) |
| builder_name = SUBDIR_MAPPING[subdir] |
| - if self._tests: |
| - for test in self._tests: |
| - self._RebaselineOneTest(expectations_subdir=subdir, |
| - builder_name=builder_name, |
| - test=test) |
| - else: # get the raw list of files that need rebaselining from JSON |
| - json_url = '/'.join([self._json_base_url, |
| - subdir, builder_name, subdir, |
| - self._json_filename]) |
| - filenames = self._GetFilesToRebaseline(json_url=json_url) |
| - for filename in filenames: |
| - outfilename = os.path.join(subdir, filename); |
| - self._RebaselineOneFile(expectations_subdir=subdir, |
| - builder_name=builder_name, |
| - infilename=filename, |
| - outfilename=outfilename) |
| + json_url = '/'.join([self._json_base_url, |
| + subdir, builder_name, subdir, |
| + self._json_filename]) |
| + filenames = self._GetFilesToRebaseline(json_url=json_url) |
| + for filename in filenames: |
| + # Apply our filters, if we have any. |
| + match = filename_pattern.match(filename) |
| + test = match.group(1) |
| + config = match.group(2) |
| + if self._tests and test not in self._tests: |
| + continue |
| + if self._configs and config not in self._configs: |
| + continue |
| + outfilename = os.path.join(subdir, filename); |
| + self._RebaselineOneFile(expectations_subdir=subdir, |
| + builder_name=builder_name, |
| + infilename=filename, |
| + outfilename=outfilename) |
| + |
| # main... |
| parser = argparse.ArgumentParser() |
| +parser.add_argument('--add-new', action='store_true', |
| + help='in addition to the standard behavior of ' + |
| + 'downloading images whose tests are failing, ' + |
| + 'also download images for which we haven\'t checked in ' + |
| + 'expectations yet') |
| parser.add_argument('--configs', metavar='CONFIG', nargs='+', |
| help='which configurations to rebaseline, e.g. ' + |
| - '"--configs 565 8888"; if unspecified, run a default ' + |
| - 'set of configs. This should ONLY be specified if ' + |
| - '--tests has also been specified.') |
| -parser.add_argument('--dry_run', action='store_true', |
| + '"--configs 565 8888", as a filter over the configs ' + |
| + 'which JSON_FILENAME tells us need rebaselining; ' + |
| + 'if unspecified, then rebaseline all the configs that ' + |
| + 'JSON_FILENAME tells us need rebaselining.') |
| +parser.add_argument('--dry-run', action='store_true', |
| help='instead of actually downloading files or adding ' + |
| 'files to checkout, display a list of operations that ' + |
| 'we would normally perform') |
| -parser.add_argument('--json_base_url', |
| +parser.add_argument('--json-base-url', |
| help='base URL from which to read JSON_FILENAME ' + |
| 'files; defaults to %(default)s', |
| default='http://skia-autogen.googlecode.com/svn/gm-actual') |
| -parser.add_argument('--json_filename', |
| +parser.add_argument('--json-filename', |
| help='filename (under JSON_BASE_URL) to read a summary ' + |
| 'of results from; defaults to %(default)s', |
| default='actual-results.json') |
| @@ -277,12 +270,14 @@ |
| '"--subdirs %s"' % ' '.join(sorted(SUBDIR_MAPPING.keys()))) |
| parser.add_argument('--tests', metavar='TEST', nargs='+', |
| help='which tests to rebaseline, e.g. ' + |
| - '"--tests aaclip bigmatrix"; if unspecified, then all ' + |
| - 'failing tests (according to the actual-results.json ' + |
| - 'file) will be rebaselined.') |
| + '"--tests aaclip bigmatrix", as a filter over the tests ' + |
| + 'which JSON_FILENAME tells us need rebaselining; ' + |
| + 'if unspecified, then rebaseline all the tests that ' + |
| + 'JSON_FILENAME tells us need rebaselining.') |
| args = parser.parse_args() |
| rebaseliner = Rebaseliner(tests=args.tests, configs=args.configs, |
| - subdirs=args.subdirs, dry_run=args.dry_run, |
| + subdirs=args.subdirs, add_new=args.add_new, |
| + dry_run=args.dry_run, |
| json_base_url=args.json_base_url, |
| json_filename=args.json_filename) |
| rebaseliner.RebaselineAll() |