Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(61)

Side by Side Diff: tools/rebaseline.py

Issue 18348018: rebaseline.py: if expectations dir contains JSON format results, update those instead of image files (Closed) Base URL: http://skia.googlecode.com/svn/trunk/
Patch Set: sync_to_r9909 Created 7 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « gm/gm_json.py ('k') | tools/tests/rebaseline/output/all/output-expected/command_line » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/python 1 #!/usr/bin/python
2 2
3 ''' 3 '''
4 Copyright 2012 Google Inc. 4 Copyright 2012 Google Inc.
5 5
6 Use of this source code is governed by a BSD-style license that can be 6 Use of this source code is governed by a BSD-style license that can be
7 found in the LICENSE file. 7 found in the LICENSE file.
8 ''' 8 '''
9 9
10 ''' 10 '''
11 Rebaselines the given GM tests, on all bots and all configurations. 11 Rebaselines the given GM tests, on all bots and all configurations.
12 Must be run from the gm-expected directory. If run from a git or SVN 12 Must be run from the gm-expected directory. If run from a git or SVN
13 checkout, the files will be added to the staging area for commit. 13 checkout, the files will be added to the staging area for commit.
14 ''' 14 '''
15 15
16 # System-level imports 16 # System-level imports
17 import argparse 17 import argparse
18 import os 18 import os
19 import re 19 import re
20 import subprocess
21 import sys 20 import sys
22 import urllib2 21 import urllib2
23 22
24 # Imports from local directory 23 # Imports from local directory
25 import rebaseline_imagefiles 24 import rebaseline_imagefiles
26 25
27 # Imports from within Skia 26 # Imports from within Skia
28 # 27 #
29 # We need to add the 'gm' directory, so that we can import gm_json.py within 28 # We need to add the 'gm' directory, so that we can import gm_json.py within
30 # that directory. That script allows us to parse the actual-results.json file 29 # that directory. That script allows us to parse the actual-results.json file
31 # written out by the GM tool. 30 # written out by the GM tool.
32 # Make sure that the 'gm' dir is in the PYTHONPATH, but add it at the *end* 31 # Make sure that the 'gm' dir is in the PYTHONPATH, but add it at the *end*
33 # so any dirs that are already in the PYTHONPATH will be preferred. 32 # so any dirs that are already in the PYTHONPATH will be preferred.
34 # 33 #
35 # This assumes that the 'gm' directory has been checked out as a sibling of 34 # This assumes that the 'gm' directory has been checked out as a sibling of
36 # the 'tools' directory containing this script, which will be the case if 35 # the 'tools' directory containing this script, which will be the case if
37 # 'trunk' was checked out as a single unit. 36 # 'trunk' was checked out as a single unit.
38 GM_DIRECTORY = os.path.realpath( 37 GM_DIRECTORY = os.path.realpath(
39 os.path.join(os.path.dirname(os.path.dirname(__file__)), 'gm')) 38 os.path.join(os.path.dirname(os.path.dirname(__file__)), 'gm'))
40 if GM_DIRECTORY not in sys.path: 39 if GM_DIRECTORY not in sys.path:
41 sys.path.append(GM_DIRECTORY) 40 sys.path.append(GM_DIRECTORY)
42 import gm_json 41 import gm_json
43 42
44 JSON_EXPECTATIONS_FILENAME='expected-results.json'
45
46 # Mapping of gm-expectations subdir (under 43 # Mapping of gm-expectations subdir (under
47 # https://skia.googlecode.com/svn/gm-expected/ ) 44 # https://skia.googlecode.com/svn/gm-expected/ )
48 # to builder name (see list at http://108.170.217.252:10117/builders ) 45 # to builder name (see list at http://108.170.217.252:10117/builders )
49 SUBDIR_MAPPING = { 46 SUBDIR_MAPPING = {
50 'base-shuttle-win7-intel-float': 47 'base-shuttle-win7-intel-float':
51 'Test-Win7-ShuttleA-HD2000-x86-Release', 48 'Test-Win7-ShuttleA-HD2000-x86-Release',
52 'base-shuttle-win7-intel-angle': 49 'base-shuttle-win7-intel-angle':
53 'Test-Win7-ShuttleA-HD2000-x86-Release-ANGLE', 50 'Test-Win7-ShuttleA-HD2000-x86-Release-ANGLE',
54 'base-shuttle-win7-intel-directwrite': 51 'base-shuttle-win7-intel-directwrite':
55 'Test-Win7-ShuttleA-HD2000-x86-Release-DirectWrite', 52 'Test-Win7-ShuttleA-HD2000-x86-Release-DirectWrite',
(...skipping 15 matching lines...) Expand all
71 'Test-Android-Nexus10-MaliT604-Arm7-Release', 68 'Test-Android-Nexus10-MaliT604-Arm7-Release',
72 'base-android-nexus-4': 69 'base-android-nexus-4':
73 'Test-Android-Nexus4-Adreno320-Arm7-Release', 70 'Test-Android-Nexus4-Adreno320-Arm7-Release',
74 } 71 }
75 72
76 73
77 class CommandFailedException(Exception): 74 class CommandFailedException(Exception):
78 pass 75 pass
79 76
80 # Object that rebaselines a JSON expectations file (not individual image files). 77 # Object that rebaselines a JSON expectations file (not individual image files).
81 #
82 # TODO(epoger): Most of this is just the code from the old ImageRebaseliner...
83 # some of it will need to be updated in order to properly rebaseline JSON files.
84 # There is a lot of code duplicated between here and ImageRebaseliner, but
85 # that's fine because we will delete ImageRebaseliner soon.
86 class JsonRebaseliner(object): 78 class JsonRebaseliner(object):
87 79
88 # params: 80 # params:
89 # expectations_root: root directory of all expectations 81 # expectations_root: root directory of all expectations JSON files
90 # json_base_url: base URL from which to read json_filename 82 # expectations_filename: filename (under expectations_root) of JSON
91 # json_filename: filename (under json_base_url) from which to read a 83 # expectations file; typically
92 # summary of results; typically "actual-results.json" 84 # "expected-results.json"
85 # actuals_base_url: base URL from which to read actual-result JSON files
86 # actuals_filename: filename (under actuals_base_url) from which to read a
87 # summary of results; typically "actual-results.json"
93 # tests: list of tests to rebaseline, or None if we should rebaseline 88 # tests: list of tests to rebaseline, or None if we should rebaseline
94 # whatever files the JSON results summary file tells us to 89 # whatever files the JSON results summary file tells us to
95 # configs: which configs to run for each test; this should only be 90 # configs: which configs to run for each test; this should only be
96 # specified if the list of tests was also specified (otherwise, 91 # specified if the list of tests was also specified (otherwise,
97 # the JSON file will give us test names and configs) 92 # the JSON file will give us test names and configs)
98 # dry_run: if True, instead of actually downloading files or adding
99 # files to checkout, display a list of operations that
100 # we would normally perform
101 # add_new: if True, add expectations for tests which don't have any yet 93 # add_new: if True, add expectations for tests which don't have any yet
102 # missing_json_is_fatal: whether to halt execution if we cannot read a 94 def __init__(self, expectations_root, expectations_filename,
103 # JSON actual result summary file 95 actuals_base_url, actuals_filename,
104 def __init__(self, expectations_root, json_base_url, json_filename, 96 tests=None, configs=None, add_new=False):
105 tests=None, configs=None, dry_run=False,
106 add_new=False, missing_json_is_fatal=False):
107 raise ValueError('JsonRebaseliner not yet implemented') # TODO(epoger)
108 if configs and not tests: 97 if configs and not tests:
109 raise ValueError('configs should only be specified if tests ' + 98 raise ValueError('configs should only be specified if tests ' +
110 'were specified also') 99 'were specified also')
111 self._expectations_root = expectations_root 100 self._expectations_root = expectations_root
101 self._expectations_filename = expectations_filename
112 self._tests = tests 102 self._tests = tests
113 self._configs = configs 103 self._configs = configs
114 self._json_base_url = json_base_url 104 self._actuals_base_url = actuals_base_url
115 self._json_filename = json_filename 105 self._actuals_filename = actuals_filename
116 self._dry_run = dry_run
117 self._add_new = add_new 106 self._add_new = add_new
118 self._missing_json_is_fatal = missing_json_is_fatal
119 self._googlestorage_gm_actuals_root = (
120 'http://chromium-skia-gm.commondatastorage.googleapis.com/gm')
121 self._testname_pattern = re.compile('(\S+)_(\S+).png') 107 self._testname_pattern = re.compile('(\S+)_(\S+).png')
122 self._is_svn_checkout = (
123 os.path.exists('.svn') or
124 os.path.exists(os.path.join(os.pardir, '.svn')))
125 self._is_git_checkout = (
126 os.path.exists('.git') or
127 os.path.exists(os.path.join(os.pardir, '.git')))
128 108
129 # If dry_run is False, execute subprocess.call(cmd). 109 # Returns the full contents of filepath, as a single string.
130 # If dry_run is True, print the command we would have otherwise run. 110 # If filepath looks like a URL, try to read it that way instead of as
131 # Raises a CommandFailedException if the command fails. 111 # a path on local storage.
132 def _Call(self, cmd): 112 def _GetFileContents(self, filepath):
133 if self._dry_run: 113 if filepath.startswith('http:') or filepath.startswith('https:'):
134 print '%s' % ' '.join(cmd) 114 return urllib2.urlopen(filepath).read()
135 return
136 if subprocess.call(cmd) != 0:
137 raise CommandFailedException('error running command: ' +
138 ' '.join(cmd))
139
140 # Download a single actual result from GoogleStorage, returning True if it
141 # succeeded.
142 def _DownloadFromGoogleStorage(self, infilename, outfilename, all_results):
143 test_name = self._testname_pattern.match(infilename).group(1)
144 if not test_name:
145 print '# unable to find test_name for infilename %s' % infilename
146 return False
147 try:
148 hash_type, hash_value = all_results[infilename]
149 except KeyError:
150 print ('# unable to find filename %s in all_results dict' %
151 infilename)
152 return False
153 except ValueError as e:
154 print '# ValueError reading filename %s from all_results dict: %s'%(
155 infilename, e)
156 return False
157 url = '%s/%s/%s/%s.png' % (self._googlestorage_gm_actuals_root,
158 hash_type, test_name, hash_value)
159 try:
160 self._DownloadFile(source_url=url, dest_filename=outfilename)
161 return True
162 except CommandFailedException:
163 print '# Couldn\'t fetch gs_url %s' % url
164 return False
165
166 # Download a single actual result from skia-autogen, returning True if it
167 # succeeded.
168 def _DownloadFromAutogen(self, infilename, outfilename,
169 expectations_subdir, builder_name):
170 url = ('http://skia-autogen.googlecode.com/svn/gm-actual/' +
171 expectations_subdir + '/' + builder_name + '/' +
172 expectations_subdir + '/' + infilename)
173 try:
174 self._DownloadFile(source_url=url, dest_filename=outfilename)
175 return True
176 except CommandFailedException:
177 print '# Couldn\'t fetch autogen_url %s' % url
178 return False
179
180 # Download a single file, raising a CommandFailedException if it fails.
181 def _DownloadFile(self, source_url, dest_filename):
182 # Download into a temporary file and then rename it afterwards,
183 # so that we don't corrupt the existing file if it fails midway thru.
184 temp_filename = os.path.join(os.path.dirname(dest_filename),
185 '.temp-' + os.path.basename(dest_filename))
186
187 # TODO(epoger): Replace calls to "curl"/"mv" (which will only work on
188 # Unix) with a Python HTTP library (which should work cross-platform)
189 self._Call([ 'curl', '--fail', '--silent', source_url,
190 '--output', temp_filename ])
191 self._Call([ 'mv', temp_filename, dest_filename ])
192
193 # Returns the full contents of a URL, as a single string.
194 #
195 # Unlike standard URL handling, we allow relative "file:" URLs;
196 # for example, "file:one/two" resolves to the file ./one/two
197 # (relative to current working dir)
198 def _GetContentsOfUrl(self, url):
199 file_prefix = 'file:'
200 if url.startswith(file_prefix):
201 filename = url[len(file_prefix):]
202 return open(filename, 'r').read()
203 else: 115 else:
204 return urllib2.urlopen(url).read() 116 return open(filepath, 'r').read()
205 117
206 # Returns a dictionary of actual results from actual-results.json file. 118 # Returns a dictionary of actual results from actual-results.json file.
207 # 119 #
208 # The dictionary returned has this format: 120 # The dictionary returned has this format:
209 # { 121 # {
210 # u'imageblur_565.png': [u'bitmap-64bitMD5', 3359963596899141322], 122 # u'imageblur_565.png': [u'bitmap-64bitMD5', 3359963596899141322],
211 # u'imageblur_8888.png': [u'bitmap-64bitMD5', 4217923806027861152], 123 # u'imageblur_8888.png': [u'bitmap-64bitMD5', 4217923806027861152],
212 # u'shadertext3_8888.png': [u'bitmap-64bitMD5', 3713708307125704716] 124 # u'shadertext3_8888.png': [u'bitmap-64bitMD5', 3713708307125704716]
213 # } 125 # }
214 # 126 #
215 # If the JSON actual result summary file cannot be loaded, the behavior 127 # If the JSON actual result summary file cannot be loaded, raise an
216 # depends on self._missing_json_is_fatal: 128 # exception.
217 # - if true: execution will halt with an exception
218 # - if false: we will log an error message but return an empty dictionary
219 # 129 #
220 # params: 130 # params:
221 # json_url: URL pointing to a JSON actual result summary file 131 # json_url: URL pointing to a JSON actual result summary file
222 # sections: a list of section names to include in the results, e.g. 132 # sections: a list of section names to include in the results, e.g.
223 # [gm_json.JSONKEY_ACTUALRESULTS_FAILED, 133 # [gm_json.JSONKEY_ACTUALRESULTS_FAILED,
224 # gm_json.JSONKEY_ACTUALRESULTS_NOCOMPARISON] ; 134 # gm_json.JSONKEY_ACTUALRESULTS_NOCOMPARISON] ;
225 # if None, then include ALL sections. 135 # if None, then include ALL sections.
226 def _GetActualResults(self, json_url, sections=None): 136 def _GetActualResults(self, json_url, sections=None):
227 try: 137 json_contents = self._GetFileContents(json_url)
228 json_contents = self._GetContentsOfUrl(json_url)
229 except (urllib2.HTTPError, IOError):
230 message = 'unable to load JSON summary URL %s' % json_url
231 if self._missing_json_is_fatal:
232 raise ValueError(message)
233 else:
234 print '# %s' % message
235 return {}
236
237 json_dict = gm_json.LoadFromString(json_contents) 138 json_dict = gm_json.LoadFromString(json_contents)
238 results_to_return = {} 139 results_to_return = {}
239 actual_results = json_dict[gm_json.JSONKEY_ACTUALRESULTS] 140 actual_results = json_dict[gm_json.JSONKEY_ACTUALRESULTS]
240 if not sections: 141 if not sections:
241 sections = actual_results.keys() 142 sections = actual_results.keys()
242 for section in sections: 143 for section in sections:
243 section_results = actual_results[section] 144 section_results = actual_results[section]
244 if section_results: 145 if section_results:
245 results_to_return.update(section_results) 146 results_to_return.update(section_results)
246 return results_to_return 147 return results_to_return
247 148
248 # Returns a list of files that require rebaselining.
249 #
250 # Note that this returns a list of FILES, like this:
251 # ['imageblur_565.png', 'xfermodes_pdf.png']
252 # rather than a list of TESTS, like this:
253 # ['imageblur', 'xfermodes']
254 #
255 # params:
256 # json_url: URL pointing to a JSON actual result summary file
257 # add_new: if True, then return files listed in any of these sections:
258 # - JSONKEY_ACTUALRESULTS_FAILED
259 # - JSONKEY_ACTUALRESULTS_NOCOMPARISON
260 # if False, then return files listed in these sections:
261 # - JSONKEY_ACTUALRESULTS_FAILED
262 #
263 def _GetFilesToRebaseline(self, json_url, add_new):
264 if self._dry_run:
265 print ''
266 print '#'
267 print ('# Getting files to rebaseline from JSON summary URL %s ...'
268 % json_url)
269 sections = [gm_json.JSONKEY_ACTUALRESULTS_FAILED]
270 if add_new:
271 sections.append(gm_json.JSONKEY_ACTUALRESULTS_NOCOMPARISON)
272 results_to_rebaseline = self._GetActualResults(json_url=json_url,
273 sections=sections)
274 files_to_rebaseline = results_to_rebaseline.keys()
275 files_to_rebaseline.sort()
276 print '# ... found files_to_rebaseline %s' % files_to_rebaseline
277 if self._dry_run:
278 print '#'
279 return files_to_rebaseline
280
281 # Rebaseline a single file.
282 def _RebaselineOneFile(self, expectations_subdir, builder_name,
283 infilename, outfilename, all_results):
284 if self._dry_run:
285 print ''
286 print '# ' + infilename
287
288 # First try to download this result image from Google Storage.
289 # If that fails, try skia-autogen.
290 # If that fails too, just go on to the next file.
291 #
292 # This not treated as a fatal failure because not all
293 # platforms generate all configs (e.g., Android does not
294 # generate PDF).
295 #
296 # TODO(epoger): Once we are downloading only files that the
297 # actual-results.json file told us to, this should become a
298 # fatal error. (If the actual-results.json file told us that
299 # the test failed with XXX results, we should be able to download
300 # those results every time.)
301 if not self._DownloadFromGoogleStorage(infilename=infilename,
302 outfilename=outfilename,
303 all_results=all_results):
304 if not self._DownloadFromAutogen(infilename=infilename,
305 outfilename=outfilename,
306 expectations_subdir=expectations_su bdir,
307 builder_name=builder_name):
308 print '# Couldn\'t fetch infilename ' + infilename
309 return
310
311 # Add this file to version control (if appropriate).
312 if self._add_new:
313 if self._is_svn_checkout:
314 cmd = [ 'svn', 'add', '--quiet', outfilename ]
315 self._Call(cmd)
316 cmd = [ 'svn', 'propset', '--quiet', 'svn:mime-type',
317 'image/png', outfilename ];
318 self._Call(cmd)
319 elif self._is_git_checkout:
320 cmd = [ 'git', 'add', outfilename ]
321 self._Call(cmd)
322
323 # Rebaseline the given configs for a single test.
324 #
325 # params:
326 # expectations_subdir
327 # builder_name
328 # test: a single test to rebaseline
329 # all_results: a dictionary of all actual results
330 def _RebaselineOneTest(self, expectations_subdir, builder_name, test,
331 all_results):
332 if self._configs:
333 configs = self._configs
334 else:
335 if (expectations_subdir == 'base-shuttle-win7-intel-angle'):
336 configs = [ 'angle', 'anglemsaa16' ]
337 else:
338 configs = [ '565', '8888', 'gpu', 'pdf', 'mesa', 'msaa16',
339 'msaa4' ]
340 if self._dry_run:
341 print ''
342 print '# ' + expectations_subdir + ':'
343 for config in configs:
344 infilename = test + '_' + config + '.png'
345 outfilename = os.path.join(expectations_subdir, infilename);
346 self._RebaselineOneFile(expectations_subdir=expectations_subdir,
347 builder_name=builder_name,
348 infilename=infilename,
349 outfilename=outfilename,
350 all_results=all_results)
351
352 # Rebaseline all tests/types we specified in the constructor, 149 # Rebaseline all tests/types we specified in the constructor,
353 # within this gm-expectations subdir. 150 # within this gm-expectations subdir.
354 # 151 #
355 # params: 152 # params:
356 # subdir : e.g. 'base-shuttle-win7-intel-float' 153 # subdir : e.g. 'base-shuttle-win7-intel-float'
357 # builder : e.g. 'Test-Win7-ShuttleA-HD2000-x86-Release' 154 # builder : e.g. 'Test-Win7-ShuttleA-HD2000-x86-Release'
358 def RebaselineSubdir(self, subdir, builder): 155 def RebaselineSubdir(self, subdir, builder):
359 json_url = '/'.join([self._json_base_url, 156 # Read in the actual result summary, and extract all the tests whose
360 subdir, builder, subdir, 157 # results we need to update.
361 self._json_filename]) 158 actuals_url = '/'.join([self._actuals_base_url,
362 all_results = self._GetActualResults(json_url=json_url) 159 subdir, builder, subdir,
160 self._actuals_filename])
161 sections = [gm_json.JSONKEY_ACTUALRESULTS_FAILED]
162 if self._add_new:
163 sections.append(gm_json.JSONKEY_ACTUALRESULTS_NOCOMPARISON)
164 results_to_update = self._GetActualResults(json_url=actuals_url,
165 sections=sections)
363 166
364 if self._tests: 167 # Read in current expectations.
365 for test in self._tests: 168 expectations_json_filepath = os.path.join(
366 self._RebaselineOneTest(expectations_subdir=subdir, 169 self._expectations_root, subdir, self._expectations_filename)
367 builder_name=builder, 170 expectations_dict = gm_json.LoadFromFile(expectations_json_filepath)
368 test=test, all_results=all_results) 171
369 else: # get the raw list of files that need rebaselining from JSON 172 # Update the expectations in memory, skipping any tests/configs that
370 filenames = self._GetFilesToRebaseline(json_url=json_url, 173 # the caller asked to exclude.
371 add_new=self._add_new) 174 skipped_images = []
372 for filename in filenames: 175 if results_to_update:
373 outfilename = os.path.join(subdir, filename); 176 for (image_name, image_results) in results_to_update.iteritems():
374 self._RebaselineOneFile(expectations_subdir=subdir, 177 (test, config) = self._testname_pattern.match(image_name).groups ()
375 builder_name=builder, 178 if self._tests:
376 infilename=filename, 179 if test not in self._tests:
377 outfilename=outfilename, 180 skipped_images.append(image_name)
378 all_results=all_results) 181 continue
182 if self._configs:
183 if config not in self._configs:
184 skipped_images.append(image_name)
185 continue
186 expectations_dict[gm_json.JSONKEY_EXPECTEDRESULTS] \
187 [image_name] \
188 [gm_json.JSONKEY_EXPECTEDRESULTS_ALLOWEDDIGESTS ] = \
189 [image_results]
190
191 # Write out updated expectations.
192 gm_json.WriteToFile(expectations_dict, expectations_json_filepath)
193
194 if skipped_images:
195 print ('Skipped these tests due to test/config filters: %s' %
196 skipped_images)
197
379 198
380 # main... 199 # main...
381 200
382 parser = argparse.ArgumentParser() 201 parser = argparse.ArgumentParser()
202 parser.add_argument('--actuals-base-url',
203 help='base URL from which to read files containing JSON ' +
204 'summaries of actual GM results; defaults to %(default)s',
205 default='http://skia-autogen.googlecode.com/svn/gm-actual')
206 parser.add_argument('--actuals-filename',
207 help='filename (within platform-specific subdirectories ' +
208 'of ACTUALS_BASE_URL) to read a summary of results from; ' +
209 'defaults to %(default)s',
210 default='actual-results.json')
211 # TODO(epoger): Add test that exercises --add-new argument.
383 parser.add_argument('--add-new', action='store_true', 212 parser.add_argument('--add-new', action='store_true',
384 help='in addition to the standard behavior of ' + 213 help='in addition to the standard behavior of ' +
385 'updating expectations for failing tests, add ' + 214 'updating expectations for failing tests, add ' +
386 'expectations for tests which don\'t have expectations ' + 215 'expectations for tests which don\'t have expectations ' +
387 'yet.') 216 'yet.')
217 # TODO(epoger): Add test that exercises --configs argument.
218 # TODO(epoger): Once we are only rebaselining JSON files, update the helpstring
219 # to indicate that this is a *filter* over the config names that
220 # actual-results.json tells us need to be rebaselined.
221 # You don't need to specify tests also, etc.
388 parser.add_argument('--configs', metavar='CONFIG', nargs='+', 222 parser.add_argument('--configs', metavar='CONFIG', nargs='+',
389 help='which configurations to rebaseline, e.g. ' + 223 help='which configurations to rebaseline, e.g. ' +
390 '"--configs 565 8888"; if unspecified, run a default ' + 224 '"--configs 565 8888"; if unspecified, run a default ' +
391 'set of configs. This should ONLY be specified if ' + 225 'set of configs. This should ONLY be specified if ' +
392 '--tests has also been specified.') 226 '--tests has also been specified.')
227 # TODO(epoger): The --dry-run argument will no longer be needed once we
228 # are only rebaselining JSON files.
393 parser.add_argument('--dry-run', action='store_true', 229 parser.add_argument('--dry-run', action='store_true',
394 help='instead of actually downloading files or adding ' + 230 help='instead of actually downloading files or adding ' +
395 'files to checkout, display a list of operations that ' + 231 'files to checkout, display a list of operations that ' +
396 'we would normally perform') 232 'we would normally perform')
233 parser.add_argument('--expectations-filename',
234 help='filename (under EXPECTATIONS_ROOT) to read ' +
235 'current expectations from, and to write new ' +
236 'expectations into; defaults to %(default)s',
237 default='expected-results.json')
397 parser.add_argument('--expectations-root', 238 parser.add_argument('--expectations-root',
398 help='root of expectations directory to update-- should ' + 239 help='root of expectations directory to update-- should ' +
399 'contain one or more base-* subdirectories. Defaults to ' + 240 'contain one or more base-* subdirectories. Defaults to ' +
400 '%(default)s', 241 '%(default)s',
401 default='.') 242 default='.')
402 parser.add_argument('--json-base-url',
403 help='base URL from which to read JSON_FILENAME ' +
404 'files; defaults to %(default)s',
405 default='http://skia-autogen.googlecode.com/svn/gm-actual')
406 parser.add_argument('--json-filename',
407 help='filename (under JSON_BASE_URL) to read a summary ' +
408 'of results from; defaults to %(default)s',
409 default='actual-results.json')
410 parser.add_argument('--subdirs', metavar='SUBDIR', nargs='+', 243 parser.add_argument('--subdirs', metavar='SUBDIR', nargs='+',
411 help='which platform subdirectories to rebaseline; ' + 244 help='which platform subdirectories to rebaseline; ' +
412 'if unspecified, rebaseline all subdirs, same as ' + 245 'if unspecified, rebaseline all subdirs, same as ' +
413 '"--subdirs %s"' % ' '.join(sorted(SUBDIR_MAPPING.keys()))) 246 '"--subdirs %s"' % ' '.join(sorted(SUBDIR_MAPPING.keys())))
247 # TODO(epoger): Add test that exercises --tests argument.
248 # TODO(epoger): Once we are only rebaselining JSON files, update the helpstring
249 # to indicate that this is a *filter* over the test names that
250 # actual-results.json tells us need to be rebaselined.
414 parser.add_argument('--tests', metavar='TEST', nargs='+', 251 parser.add_argument('--tests', metavar='TEST', nargs='+',
415 help='which tests to rebaseline, e.g. ' + 252 help='which tests to rebaseline, e.g. ' +
416 '"--tests aaclip bigmatrix"; if unspecified, then all ' + 253 '"--tests aaclip bigmatrix"; if unspecified, then all ' +
417 'failing tests (according to the actual-results.json ' + 254 'failing tests (according to the actual-results.json ' +
418 'file) will be rebaselined.') 255 'file) will be rebaselined.')
419 args = parser.parse_args() 256 args = parser.parse_args()
420 if args.subdirs: 257 if args.subdirs:
421 subdirs = args.subdirs 258 subdirs = args.subdirs
422 missing_json_is_fatal = True 259 missing_json_is_fatal = True
423 else: 260 else:
424 subdirs = sorted(SUBDIR_MAPPING.keys()) 261 subdirs = sorted(SUBDIR_MAPPING.keys())
425 missing_json_is_fatal = False 262 missing_json_is_fatal = False
426 for subdir in subdirs: 263 for subdir in subdirs:
427 if not subdir in SUBDIR_MAPPING.keys(): 264 if not subdir in SUBDIR_MAPPING.keys():
428 raise Exception(('unrecognized platform subdir "%s"; ' + 265 raise Exception(('unrecognized platform subdir "%s"; ' +
429 'should be one of %s') % ( 266 'should be one of %s') % (
430 subdir, SUBDIR_MAPPING.keys())) 267 subdir, SUBDIR_MAPPING.keys()))
431 builder = SUBDIR_MAPPING[subdir] 268 builder = SUBDIR_MAPPING[subdir]
432 269
433 # We instantiate different Rebaseliner objects depending 270 # We instantiate different Rebaseliner objects depending
434 # on whether we are rebaselining an expected-results.json file, or 271 # on whether we are rebaselining an expected-results.json file, or
435 # individual image files. Different gm-expected subdirectories may move 272 # individual image files. Different gm-expected subdirectories may move
436 # from individual image files to JSON-format expectations at different 273 # from individual image files to JSON-format expectations at different
437 # times, so we need to make this determination per subdirectory. 274 # times, so we need to make this determination per subdirectory.
438 # 275 #
439 # See https://goto.google.com/ChecksumTransitionDetail 276 # See https://goto.google.com/ChecksumTransitionDetail
440 expectations_json_file = os.path.join(args.expectations_root, subdir, 277 expectations_json_file = os.path.join(args.expectations_root, subdir,
441 JSON_EXPECTATIONS_FILENAME) 278 args.expectations_filename)
442 if os.path.isfile(expectations_json_file): 279 if os.path.isfile(expectations_json_file):
443 sys.stderr.write('ERROR: JsonRebaseliner is not implemented yet.\n')
444 sys.exit(1)
445 rebaseliner = JsonRebaseliner( 280 rebaseliner = JsonRebaseliner(
446 expectations_root=args.expectations_root, 281 expectations_root=args.expectations_root,
282 expectations_filename=args.expectations_filename,
447 tests=args.tests, configs=args.configs, 283 tests=args.tests, configs=args.configs,
448 dry_run=args.dry_run, 284 actuals_base_url=args.actuals_base_url,
449 json_base_url=args.json_base_url, 285 actuals_filename=args.actuals_filename,
450 json_filename=args.json_filename, 286 add_new=args.add_new)
451 add_new=args.add_new,
452 missing_json_is_fatal=missing_json_is_fatal)
453 else: 287 else:
454 rebaseliner = rebaseline_imagefiles.ImageRebaseliner( 288 rebaseliner = rebaseline_imagefiles.ImageRebaseliner(
455 expectations_root=args.expectations_root, 289 expectations_root=args.expectations_root,
456 tests=args.tests, configs=args.configs, 290 tests=args.tests, configs=args.configs,
457 dry_run=args.dry_run, 291 dry_run=args.dry_run,
458 json_base_url=args.json_base_url, 292 json_base_url=args.actuals_base_url,
459 json_filename=args.json_filename, 293 json_filename=args.actuals_filename,
460 add_new=args.add_new, 294 add_new=args.add_new,
461 missing_json_is_fatal=missing_json_is_fatal) 295 missing_json_is_fatal=missing_json_is_fatal)
462 rebaseliner.RebaselineSubdir(subdir=subdir, builder=builder) 296 rebaseliner.RebaselineSubdir(subdir=subdir, builder=builder)
OLDNEW
« no previous file with comments | « gm/gm_json.py ('k') | tools/tests/rebaseline/output/all/output-expected/command_line » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698