Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(91)

Side by Side Diff: tools/rebaseline.py

Issue 21901004: Delete image-based rebaselining tool; we have switched to checksums (Closed) Base URL: http://skia.googlecode.com/svn/trunk/
Patch Set: Created 7 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/python 1 #!/usr/bin/python
2 2
3 ''' 3 '''
4 Copyright 2012 Google Inc. 4 Copyright 2012 Google Inc.
5 5
6 Use of this source code is governed by a BSD-style license that can be 6 Use of this source code is governed by a BSD-style license that can be
7 found in the LICENSE file. 7 found in the LICENSE file.
8 ''' 8 '''
9 9
10 ''' 10 '''
11 Rebaselines the given GM tests, on all bots and all configurations. 11 Rebaselines the given GM tests, on all bots and all configurations.
12
13 TODO(epoger): Fix indentation in this file (2-space indents, not 4-space).
epoger 2013/08/02 20:26:49 Patchset 1 just fixes the indentation in this file
14 ''' 12 '''
15 13
16 # System-level imports 14 # System-level imports
17 import argparse 15 import argparse
18 import os 16 import os
19 import re 17 import re
20 import subprocess 18 import subprocess
21 import sys 19 import sys
22 import urllib2 20 import urllib2
23 21
24 # Imports from local directory 22 # Imports from local directory
25 import rebaseline_imagefiles 23 import rebaseline_imagefiles
26 24
27 # Imports from within Skia 25 # Imports from within Skia
28 # 26 #
29 # We need to add the 'gm' directory, so that we can import gm_json.py within 27 # We need to add the 'gm' directory, so that we can import gm_json.py within
30 # that directory. That script allows us to parse the actual-results.json file 28 # that directory. That script allows us to parse the actual-results.json file
31 # written out by the GM tool. 29 # written out by the GM tool.
32 # Make sure that the 'gm' dir is in the PYTHONPATH, but add it at the *end* 30 # Make sure that the 'gm' dir is in the PYTHONPATH, but add it at the *end*
33 # so any dirs that are already in the PYTHONPATH will be preferred. 31 # so any dirs that are already in the PYTHONPATH will be preferred.
34 # 32 #
35 # This assumes that the 'gm' directory has been checked out as a sibling of 33 # This assumes that the 'gm' directory has been checked out as a sibling of
36 # the 'tools' directory containing this script, which will be the case if 34 # the 'tools' directory containing this script, which will be the case if
37 # 'trunk' was checked out as a single unit. 35 # 'trunk' was checked out as a single unit.
38 GM_DIRECTORY = os.path.realpath( 36 GM_DIRECTORY = os.path.realpath(
39 os.path.join(os.path.dirname(os.path.dirname(__file__)), 'gm')) 37 os.path.join(os.path.dirname(os.path.dirname(__file__)), 'gm'))
40 if GM_DIRECTORY not in sys.path: 38 if GM_DIRECTORY not in sys.path:
41 sys.path.append(GM_DIRECTORY) 39 sys.path.append(GM_DIRECTORY)
42 import gm_json 40 import gm_json
43 41
44 # Mapping of expectations/gm subdir (under 42 # Mapping of expectations/gm subdir (under
45 # https://skia.googlecode.com/svn/trunk/expectations/gm/ ) 43 # https://skia.googlecode.com/svn/trunk/expectations/gm/ )
46 # to builder name (see list at http://108.170.217.252:10117/builders ) 44 # to builder name (see list at http://108.170.217.252:10117/builders )
47 SUBDIR_MAPPING = { 45 SUBDIR_MAPPING = {
48 'base-shuttle-win7-intel-float': 46 'base-shuttle-win7-intel-float':
49 'Test-Win7-ShuttleA-HD2000-x86-Release', 47 'Test-Win7-ShuttleA-HD2000-x86-Release',
50 'base-shuttle-win7-intel-angle': 48 'base-shuttle-win7-intel-angle':
51 'Test-Win7-ShuttleA-HD2000-x86-Release-ANGLE', 49 'Test-Win7-ShuttleA-HD2000-x86-Release-ANGLE',
(...skipping 14 matching lines...) Expand all
66 'base-android-xoom': 64 'base-android-xoom':
67 'Test-Android-Xoom-Tegra2-Arm7-Release', 65 'Test-Android-Xoom-Tegra2-Arm7-Release',
68 'base-android-nexus-10': 66 'base-android-nexus-10':
69 'Test-Android-Nexus10-MaliT604-Arm7-Release', 67 'Test-Android-Nexus10-MaliT604-Arm7-Release',
70 'base-android-nexus-4': 68 'base-android-nexus-4':
71 'Test-Android-Nexus4-Adreno320-Arm7-Release', 69 'Test-Android-Nexus4-Adreno320-Arm7-Release',
72 } 70 }
73 71
74 72
75 class _InternalException(Exception): 73 class _InternalException(Exception):
76 pass 74 pass
77 75
78 # Object that handles exceptions, either raising them immediately or collecting 76 # Object that handles exceptions, either raising them immediately or collecting
79 # them to display later on. 77 # them to display later on.
80 class ExceptionHandler(object): 78 class ExceptionHandler(object):
81 79
82 # params: 80 # params:
83 # keep_going_on_failure: if False, report failures and quit right away; 81 # keep_going_on_failure: if False, report failures and quit right away;
84 # if True, collect failures until 82 # if True, collect failures until
85 # ReportAllFailures() is called 83 # ReportAllFailures() is called
86 def __init__(self, keep_going_on_failure=False): 84 def __init__(self, keep_going_on_failure=False):
87 self._keep_going_on_failure = keep_going_on_failure 85 self._keep_going_on_failure = keep_going_on_failure
88 self._failures_encountered = [] 86 self._failures_encountered = []
89 self._exiting = False 87 self._exiting = False
90 88
91 # Exit the program with the given status value. 89 # Exit the program with the given status value.
92 def _Exit(self, status=1): 90 def _Exit(self, status=1):
93 self._exiting = True 91 self._exiting = True
94 sys.exit(status) 92 sys.exit(status)
95 93
96 # We have encountered an exception; either collect the info and keep going, 94 # We have encountered an exception; either collect the info and keep going,
97 # or exit the program right away. 95 # or exit the program right away.
98 def RaiseExceptionOrContinue(self, e): 96 def RaiseExceptionOrContinue(self, e):
99 # If we are already quitting the program, propagate any exceptions 97 # If we are already quitting the program, propagate any exceptions
100 # so that the proper exit status will be communicated to the shell. 98 # so that the proper exit status will be communicated to the shell.
101 if self._exiting: 99 if self._exiting:
102 raise e 100 raise e
103 101
104 if self._keep_going_on_failure: 102 if self._keep_going_on_failure:
105 print >> sys.stderr, 'WARNING: swallowing exception %s' % e 103 print >> sys.stderr, 'WARNING: swallowing exception %s' % e
106 self._failures_encountered.append(e) 104 self._failures_encountered.append(e)
107 else: 105 else:
108 print >> sys.stderr, e 106 print >> sys.stderr, e
109 print >> sys.stderr, ( 107 print >> sys.stderr, (
110 'Halting at first exception; to keep going, re-run ' + 108 'Halting at first exception; to keep going, re-run ' +
111 'with the --keep-going-on-failure option set.') 109 'with the --keep-going-on-failure option set.')
112 self._Exit() 110 self._Exit()
113 111
114 def ReportAllFailures(self): 112 def ReportAllFailures(self):
115 if self._failures_encountered: 113 if self._failures_encountered:
116 print >> sys.stderr, ('Encountered %d failures (see above).' % 114 print >> sys.stderr, ('Encountered %d failures (see above).' %
117 len(self._failures_encountered)) 115 len(self._failures_encountered))
118 self._Exit() 116 self._Exit()
119 117
120 118
121 # Object that rebaselines a JSON expectations file (not individual image files). 119 # Object that rebaselines a JSON expectations file (not individual image files).
122 class JsonRebaseliner(object): 120 class JsonRebaseliner(object):
123 121
124 # params: 122 # params:
125 # expectations_root: root directory of all expectations JSON files 123 # expectations_root: root directory of all expectations JSON files
126 # expectations_input_filename: filename (under expectations_root) of JSON 124 # expectations_input_filename: filename (under expectations_root) of JSON
127 # expectations file to read; typically 125 # expectations file to read; typically
128 # "expected-results.json" 126 # "expected-results.json"
129 # expectations_output_filename: filename (under expectations_root) to 127 # expectations_output_filename: filename (under expectations_root) to
130 # which updated expectations should be 128 # which updated expectations should be
131 # written; typically the same as 129 # written; typically the same as
132 # expectations_input_filename, to overwrite 130 # expectations_input_filename, to overwrite
133 # the old content 131 # the old content
134 # actuals_base_url: base URL from which to read actual-result JSON files 132 # actuals_base_url: base URL from which to read actual-result JSON files
135 # actuals_filename: filename (under actuals_base_url) from which to read a 133 # actuals_filename: filename (under actuals_base_url) from which to read a
136 # summary of results; typically "actual-results.json" 134 # summary of results; typically "actual-results.json"
137 # exception_handler: reference to rebaseline.ExceptionHandler object 135 # exception_handler: reference to rebaseline.ExceptionHandler object
138 # tests: list of tests to rebaseline, or None if we should rebaseline 136 # tests: list of tests to rebaseline, or None if we should rebaseline
139 # whatever files the JSON results summary file tells us to 137 # whatever files the JSON results summary file tells us to
140 # configs: which configs to run for each test, or None if we should 138 # configs: which configs to run for each test, or None if we should
141 # rebaseline whatever configs the JSON results summary file tells 139 # rebaseline whatever configs the JSON results summary file tells
142 # us to 140 # us to
143 # add_new: if True, add expectations for tests which don't have any yet 141 # add_new: if True, add expectations for tests which don't have any yet
144 def __init__(self, expectations_root, expectations_input_filename, 142 def __init__(self, expectations_root, expectations_input_filename,
145 expectations_output_filename, actuals_base_url, 143 expectations_output_filename, actuals_base_url,
146 actuals_filename, exception_handler, 144 actuals_filename, exception_handler,
147 tests=None, configs=None, add_new=False): 145 tests=None, configs=None, add_new=False):
148 self._expectations_root = expectations_root 146 self._expectations_root = expectations_root
149 self._expectations_input_filename = expectations_input_filename 147 self._expectations_input_filename = expectations_input_filename
150 self._expectations_output_filename = expectations_output_filename 148 self._expectations_output_filename = expectations_output_filename
151 self._tests = tests 149 self._tests = tests
152 self._configs = configs 150 self._configs = configs
153 self._actuals_base_url = actuals_base_url 151 self._actuals_base_url = actuals_base_url
154 self._actuals_filename = actuals_filename 152 self._actuals_filename = actuals_filename
155 self._exception_handler = exception_handler 153 self._exception_handler = exception_handler
156 self._add_new = add_new 154 self._add_new = add_new
157 self._image_filename_re = re.compile(gm_json.IMAGE_FILENAME_PATTERN) 155 self._image_filename_re = re.compile(gm_json.IMAGE_FILENAME_PATTERN)
158 self._using_svn = os.path.isdir(os.path.join(expectations_root, '.svn')) 156 self._using_svn = os.path.isdir(os.path.join(expectations_root, '.svn'))
159 157
160 # Executes subprocess.call(cmd). 158 # Executes subprocess.call(cmd).
161 # Raises an Exception if the command fails. 159 # Raises an Exception if the command fails.
162 def _Call(self, cmd): 160 def _Call(self, cmd):
163 if subprocess.call(cmd) != 0: 161 if subprocess.call(cmd) != 0:
164 raise _InternalException('error running command: ' + ' '.join(cmd)) 162 raise _InternalException('error running command: ' + ' '.join(cmd))
165 163
166 # Returns the full contents of filepath, as a single string. 164 # Returns the full contents of filepath, as a single string.
167 # If filepath looks like a URL, try to read it that way instead of as 165 # If filepath looks like a URL, try to read it that way instead of as
168 # a path on local storage. 166 # a path on local storage.
169 # 167 #
170 # Raises _InternalException if there is a problem. 168 # Raises _InternalException if there is a problem.
171 def _GetFileContents(self, filepath): 169 def _GetFileContents(self, filepath):
172 if filepath.startswith('http:') or filepath.startswith('https:'): 170 if filepath.startswith('http:') or filepath.startswith('https:'):
173 try: 171 try:
174 return urllib2.urlopen(filepath).read() 172 return urllib2.urlopen(filepath).read()
175 except urllib2.HTTPError as e: 173 except urllib2.HTTPError as e:
176 raise _InternalException('unable to read URL %s: %s' % ( 174 raise _InternalException('unable to read URL %s: %s' % (
177 filepath, e)) 175 filepath, e))
178 else: 176 else:
179 return open(filepath, 'r').read() 177 return open(filepath, 'r').read()
180 178
181 # Returns a dictionary of actual results from actual-results.json file. 179 # Returns a dictionary of actual results from actual-results.json file.
182 # 180 #
183 # The dictionary returned has this format: 181 # The dictionary returned has this format:
184 # { 182 # {
185 # u'imageblur_565.png': [u'bitmap-64bitMD5', 3359963596899141322], 183 # u'imageblur_565.png': [u'bitmap-64bitMD5', 3359963596899141322],
186 # u'imageblur_8888.png': [u'bitmap-64bitMD5', 4217923806027861152], 184 # u'imageblur_8888.png': [u'bitmap-64bitMD5', 4217923806027861152],
187 # u'shadertext3_8888.png': [u'bitmap-64bitMD5', 3713708307125704716] 185 # u'shadertext3_8888.png': [u'bitmap-64bitMD5', 3713708307125704716]
188 # } 186 # }
189 # 187 #
190 # If the JSON actual result summary file cannot be loaded, logs a warning 188 # If the JSON actual result summary file cannot be loaded, logs a warning
191 # message and returns None. 189 # message and returns None.
192 # If the JSON actual result summary file can be loaded, but we have 190 # If the JSON actual result summary file can be loaded, but we have
193 # trouble parsing it, raises an Exception. 191 # trouble parsing it, raises an Exception.
194 # 192 #
195 # params: 193 # params:
196 # json_url: URL pointing to a JSON actual result summary file 194 # json_url: URL pointing to a JSON actual result summary file
197 # sections: a list of section names to include in the results, e.g. 195 # sections: a list of section names to include in the results, e.g.
198 # [gm_json.JSONKEY_ACTUALRESULTS_FAILED, 196 # [gm_json.JSONKEY_ACTUALRESULTS_FAILED,
199 # gm_json.JSONKEY_ACTUALRESULTS_NOCOMPARISON] ; 197 # gm_json.JSONKEY_ACTUALRESULTS_NOCOMPARISON] ;
200 # if None, then include ALL sections. 198 # if None, then include ALL sections.
201 def _GetActualResults(self, json_url, sections=None): 199 def _GetActualResults(self, json_url, sections=None):
202 try: 200 try:
203 json_contents = self._GetFileContents(json_url) 201 json_contents = self._GetFileContents(json_url)
204 except _InternalException: 202 except _InternalException:
205 print >> sys.stderr, ( 203 print >> sys.stderr, (
206 'could not read json_url %s ; skipping this platform.' % 204 'could not read json_url %s ; skipping this platform.' %
207 json_url) 205 json_url)
208 return None 206 return None
209 json_dict = gm_json.LoadFromString(json_contents) 207 json_dict = gm_json.LoadFromString(json_contents)
210 results_to_return = {} 208 results_to_return = {}
211 actual_results = json_dict[gm_json.JSONKEY_ACTUALRESULTS] 209 actual_results = json_dict[gm_json.JSONKEY_ACTUALRESULTS]
212 if not sections: 210 if not sections:
213 sections = actual_results.keys() 211 sections = actual_results.keys()
214 for section in sections: 212 for section in sections:
215 section_results = actual_results[section] 213 section_results = actual_results[section]
216 if section_results: 214 if section_results:
217 results_to_return.update(section_results) 215 results_to_return.update(section_results)
218 return results_to_return 216 return results_to_return
219 217
220 # Rebaseline all tests/types we specified in the constructor, 218 # Rebaseline all tests/types we specified in the constructor,
221 # within this expectations/gm subdir. 219 # within this expectations/gm subdir.
222 # 220 #
223 # params: 221 # params:
224 # subdir : e.g. 'base-shuttle-win7-intel-float' 222 # subdir : e.g. 'base-shuttle-win7-intel-float'
225 # builder : e.g. 'Test-Win7-ShuttleA-HD2000-x86-Release' 223 # builder : e.g. 'Test-Win7-ShuttleA-HD2000-x86-Release'
226 def RebaselineSubdir(self, subdir, builder): 224 def RebaselineSubdir(self, subdir, builder):
227 # Read in the actual result summary, and extract all the tests whose 225 # Read in the actual result summary, and extract all the tests whose
228 # results we need to update. 226 # results we need to update.
229 actuals_url = '/'.join([self._actuals_base_url, 227 actuals_url = '/'.join([self._actuals_base_url,
230 subdir, builder, subdir, 228 subdir, builder, subdir,
231 self._actuals_filename]) 229 self._actuals_filename])
232 # In most cases, we won't need to re-record results that are already 230 # In most cases, we won't need to re-record results that are already
233 # succeeding, but including the SUCCEEDED results will allow us to 231 # succeeding, but including the SUCCEEDED results will allow us to
234 # re-record expectations if they somehow get out of sync. 232 # re-record expectations if they somehow get out of sync.
235 sections = [gm_json.JSONKEY_ACTUALRESULTS_FAILED, 233 sections = [gm_json.JSONKEY_ACTUALRESULTS_FAILED,
236 gm_json.JSONKEY_ACTUALRESULTS_SUCCEEDED] 234 gm_json.JSONKEY_ACTUALRESULTS_SUCCEEDED]
237 if self._add_new: 235 if self._add_new:
238 sections.append(gm_json.JSONKEY_ACTUALRESULTS_NOCOMPARISON) 236 sections.append(gm_json.JSONKEY_ACTUALRESULTS_NOCOMPARISON)
239 results_to_update = self._GetActualResults(json_url=actuals_url, 237 results_to_update = self._GetActualResults(json_url=actuals_url,
240 sections=sections) 238 sections=sections)
241 239
242 # Read in current expectations. 240 # Read in current expectations.
243 expectations_input_filepath = os.path.join( 241 expectations_input_filepath = os.path.join(
244 self._expectations_root, subdir, self._expectations_input_filename) 242 self._expectations_root, subdir, self._expectations_input_filename)
245 expectations_dict = gm_json.LoadFromFile(expectations_input_filepath) 243 expectations_dict = gm_json.LoadFromFile(expectations_input_filepath)
246 expected_results = expectations_dict[gm_json.JSONKEY_EXPECTEDRESULTS] 244 expected_results = expectations_dict[gm_json.JSONKEY_EXPECTEDRESULTS]
247 245
248 # Update the expectations in memory, skipping any tests/configs that 246 # Update the expectations in memory, skipping any tests/configs that
249 # the caller asked to exclude. 247 # the caller asked to exclude.
250 skipped_images = [] 248 skipped_images = []
251 if results_to_update: 249 if results_to_update:
252 for (image_name, image_results) in results_to_update.iteritems(): 250 for (image_name, image_results) in results_to_update.iteritems():
253 (test, config) = \ 251 (test, config) = self._image_filename_re.match(image_name).groups()
254 self._image_filename_re.match(image_name).groups() 252 if self._tests:
255 if self._tests: 253 if test not in self._tests:
256 if test not in self._tests: 254 skipped_images.append(image_name)
257 skipped_images.append(image_name) 255 continue
258 continue 256 if self._configs:
259 if self._configs: 257 if config not in self._configs:
260 if config not in self._configs: 258 skipped_images.append(image_name)
261 skipped_images.append(image_name) 259 continue
262 continue 260 if not expected_results.get(image_name):
263 if not expected_results.get(image_name): 261 expected_results[image_name] = {}
264 expected_results[image_name] = {} 262 expected_results[image_name][gm_json.JSONKEY_EXPECTEDRESULTS_ALLOWEDDIGE STS] = \
265 expected_results[image_name] \
266 [gm_json.JSONKEY_EXPECTEDRESULTS_ALLOWEDDIGESTS] = \
267 [image_results] 263 [image_results]
268 264
269 # Write out updated expectations. 265 # Write out updated expectations.
270 expectations_output_filepath = os.path.join( 266 expectations_output_filepath = os.path.join(
271 self._expectations_root, subdir, self._expectations_output_filename) 267 self._expectations_root, subdir, self._expectations_output_filename)
272 gm_json.WriteToFile(expectations_dict, expectations_output_filepath) 268 gm_json.WriteToFile(expectations_dict, expectations_output_filepath)
273 269
274 # Mark the JSON file as plaintext, so text-style diffs can be applied. 270 # Mark the JSON file as plaintext, so text-style diffs can be applied.
275 # Fixes https://code.google.com/p/skia/issues/detail?id=1442 271 # Fixes https://code.google.com/p/skia/issues/detail?id=1442
276 if self._using_svn: 272 if self._using_svn:
277 self._Call(['svn', 'propset', '--quiet', 'svn:mime-type', 273 self._Call(['svn', 'propset', '--quiet', 'svn:mime-type',
278 'text/x-json', expectations_output_filepath]) 274 'text/x-json', expectations_output_filepath])
279 275
280 # main... 276 # main...
281 277
282 parser = argparse.ArgumentParser() 278 parser = argparse.ArgumentParser()
283 parser.add_argument('--actuals-base-url', 279 parser.add_argument('--actuals-base-url',
284 help='base URL from which to read files containing JSON ' + 280 help='base URL from which to read files containing JSON ' +
285 'summaries of actual GM results; defaults to %(default)s', 281 'summaries of actual GM results; defaults to %(default)s',
286 default='http://skia-autogen.googlecode.com/svn/gm-actual') 282 default='http://skia-autogen.googlecode.com/svn/gm-actual')
287 parser.add_argument('--actuals-filename', 283 parser.add_argument('--actuals-filename',
288 help='filename (within platform-specific subdirectories ' + 284 help='filename (within platform-specific subdirectories ' +
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
335 # TODO(epoger): Add test that exercises --tests argument. 331 # TODO(epoger): Add test that exercises --tests argument.
336 parser.add_argument('--tests', metavar='TEST', nargs='+', 332 parser.add_argument('--tests', metavar='TEST', nargs='+',
337 help='which tests to rebaseline, e.g. ' + 333 help='which tests to rebaseline, e.g. ' +
338 '"--tests aaclip bigmatrix", as a filter over the full ' + 334 '"--tests aaclip bigmatrix", as a filter over the full ' +
339 'set of results in ACTUALS_FILENAME; if unspecified, ' + 335 'set of results in ACTUALS_FILENAME; if unspecified, ' +
340 'rebaseline *all* tests that are available.') 336 'rebaseline *all* tests that are available.')
341 args = parser.parse_args() 337 args = parser.parse_args()
342 exception_handler = ExceptionHandler( 338 exception_handler = ExceptionHandler(
343 keep_going_on_failure=args.keep_going_on_failure) 339 keep_going_on_failure=args.keep_going_on_failure)
344 if args.subdirs: 340 if args.subdirs:
345 subdirs = args.subdirs 341 subdirs = args.subdirs
346 missing_json_is_fatal = True 342 missing_json_is_fatal = True
347 else: 343 else:
348 subdirs = sorted(SUBDIR_MAPPING.keys()) 344 subdirs = sorted(SUBDIR_MAPPING.keys())
349 missing_json_is_fatal = False 345 missing_json_is_fatal = False
350 for subdir in subdirs: 346 for subdir in subdirs:
351 if not subdir in SUBDIR_MAPPING.keys(): 347 if not subdir in SUBDIR_MAPPING.keys():
352 raise Exception(('unrecognized platform subdir "%s"; ' + 348 raise Exception(('unrecognized platform subdir "%s"; ' +
353 'should be one of %s') % ( 349 'should be one of %s') % (
354 subdir, SUBDIR_MAPPING.keys())) 350 subdir, SUBDIR_MAPPING.keys()))
355 builder = SUBDIR_MAPPING[subdir] 351 builder = SUBDIR_MAPPING[subdir]
356 352
357 # We instantiate different Rebaseliner objects depending 353 # We instantiate different Rebaseliner objects depending
358 # on whether we are rebaselining an expected-results.json file, or 354 # on whether we are rebaselining an expected-results.json file, or
359 # individual image files. Different expectations/gm subdirectories may move 355 # individual image files. Different expectations/gm subdirectories may move
360 # from individual image files to JSON-format expectations at different 356 # from individual image files to JSON-format expectations at different
361 # times, so we need to make this determination per subdirectory. 357 # times, so we need to make this determination per subdirectory.
362 # 358 #
363 # See https://goto.google.com/ChecksumTransitionDetail 359 # See https://goto.google.com/ChecksumTransitionDetail
364 expectations_json_file = os.path.join(args.expectations_root, subdir, 360 expectations_json_file = os.path.join(args.expectations_root, subdir,
365 args.expectations_filename) 361 args.expectations_filename)
366 if os.path.isfile(expectations_json_file): 362 if os.path.isfile(expectations_json_file):
367 rebaseliner = JsonRebaseliner( 363 rebaseliner = JsonRebaseliner(
368 expectations_root=args.expectations_root, 364 expectations_root=args.expectations_root,
369 expectations_input_filename=args.expectations_filename, 365 expectations_input_filename=args.expectations_filename,
370 expectations_output_filename=(args.expectations_filename_output or 366 expectations_output_filename=(args.expectations_filename_output or
371 args.expectations_filename), 367 args.expectations_filename),
372 tests=args.tests, configs=args.configs, 368 tests=args.tests, configs=args.configs,
373 actuals_base_url=args.actuals_base_url, 369 actuals_base_url=args.actuals_base_url,
374 actuals_filename=args.actuals_filename, 370 actuals_filename=args.actuals_filename,
375 exception_handler=exception_handler, 371 exception_handler=exception_handler,
376 add_new=args.add_new) 372 add_new=args.add_new)
377 else: 373 else:
378 # TODO(epoger): When we get rid of the ImageRebaseliner implementation, 374 # TODO(epoger): When we get rid of the ImageRebaseliner implementation,
379 # we should raise an Exception in this case (no JSON expectations file 375 # we should raise an Exception in this case (no JSON expectations file
380 # found to update), to prevent a recurrence of 376 # found to update), to prevent a recurrence of
381 # https://code.google.com/p/skia/issues/detail?id=1403 ('rebaseline.py 377 # https://code.google.com/p/skia/issues/detail?id=1403 ('rebaseline.py
382 # script fails with misleading output when run outside of gm-expected 378 # script fails with misleading output when run outside of gm-expected
383 # dir') 379 # dir')
384 rebaseliner = rebaseline_imagefiles.ImageRebaseliner( 380 rebaseliner = rebaseline_imagefiles.ImageRebaseliner(
385 expectations_root=args.expectations_root, 381 expectations_root=args.expectations_root,
386 tests=args.tests, configs=args.configs, 382 tests=args.tests, configs=args.configs,
387 dry_run=args.dry_run, 383 dry_run=args.dry_run,
388 json_base_url=args.actuals_base_url, 384 json_base_url=args.actuals_base_url,
389 json_filename=args.actuals_filename, 385 json_filename=args.actuals_filename,
390 exception_handler=exception_handler, 386 exception_handler=exception_handler,
391 add_new=args.add_new, 387 add_new=args.add_new,
392 missing_json_is_fatal=missing_json_is_fatal) 388 missing_json_is_fatal=missing_json_is_fatal)
393 389
394 try: 390 try:
395 rebaseliner.RebaselineSubdir(subdir=subdir, builder=builder) 391 rebaseliner.RebaselineSubdir(subdir=subdir, builder=builder)
396 except BaseException as e: 392 except BaseException as e:
397 exception_handler.RaiseExceptionOrContinue(e) 393 exception_handler.RaiseExceptionOrContinue(e)
398 394
399 exception_handler.ReportAllFailures() 395 exception_handler.ReportAllFailures()
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698