OLD | NEW |
---|---|
1 #!/usr/bin/python | 1 #!/usr/bin/python |
2 | 2 |
3 ''' | 3 ''' |
4 Copyright 2012 Google Inc. | 4 Copyright 2012 Google Inc. |
5 | 5 |
6 Use of this source code is governed by a BSD-style license that can be | 6 Use of this source code is governed by a BSD-style license that can be |
7 found in the LICENSE file. | 7 found in the LICENSE file. |
8 ''' | 8 ''' |
9 | 9 |
10 ''' | 10 ''' |
11 Rebaselines the given GM tests, on all bots and all configurations. | 11 Rebaselines the given GM tests, on all bots and all configurations. |
12 | |
13 TODO(epoger): Fix indentation in this file (2-space indents, not 4-space). | |
14 ''' | 12 ''' |
15 | 13 |
16 # System-level imports | 14 # System-level imports |
17 import argparse | 15 import argparse |
18 import os | 16 import os |
19 import re | 17 import re |
20 import subprocess | 18 import subprocess |
21 import sys | 19 import sys |
22 import urllib2 | 20 import urllib2 |
23 | 21 |
24 # Imports from local directory | |
25 import rebaseline_imagefiles | |
26 | |
27 # Imports from within Skia | 22 # Imports from within Skia |
28 # | 23 # |
29 # We need to add the 'gm' directory, so that we can import gm_json.py within | 24 # We need to add the 'gm' directory, so that we can import gm_json.py within |
30 # that directory. That script allows us to parse the actual-results.json file | 25 # that directory. That script allows us to parse the actual-results.json file |
31 # written out by the GM tool. | 26 # written out by the GM tool. |
32 # Make sure that the 'gm' dir is in the PYTHONPATH, but add it at the *end* | 27 # Make sure that the 'gm' dir is in the PYTHONPATH, but add it at the *end* |
33 # so any dirs that are already in the PYTHONPATH will be preferred. | 28 # so any dirs that are already in the PYTHONPATH will be preferred. |
34 # | 29 # |
35 # This assumes that the 'gm' directory has been checked out as a sibling of | 30 # This assumes that the 'gm' directory has been checked out as a sibling of |
36 # the 'tools' directory containing this script, which will be the case if | 31 # the 'tools' directory containing this script, which will be the case if |
37 # 'trunk' was checked out as a single unit. | 32 # 'trunk' was checked out as a single unit. |
38 GM_DIRECTORY = os.path.realpath( | 33 GM_DIRECTORY = os.path.realpath( |
39 os.path.join(os.path.dirname(os.path.dirname(__file__)), 'gm')) | 34 os.path.join(os.path.dirname(os.path.dirname(__file__)), 'gm')) |
40 if GM_DIRECTORY not in sys.path: | 35 if GM_DIRECTORY not in sys.path: |
41 sys.path.append(GM_DIRECTORY) | 36 sys.path.append(GM_DIRECTORY) |
42 import gm_json | 37 import gm_json |
43 | 38 |
44 # Mapping of expectations/gm subdir (under | 39 # Mapping of expectations/gm subdir (under |
45 # https://skia.googlecode.com/svn/trunk/expectations/gm/ ) | 40 # https://skia.googlecode.com/svn/trunk/expectations/gm/ ) |
46 # to builder name (see list at http://108.170.217.252:10117/builders ) | 41 # to builder name (see list at http://108.170.217.252:10117/builders ) |
47 SUBDIR_MAPPING = { | 42 SUBDIR_MAPPING = { |
48 'base-shuttle-win7-intel-float': | 43 'base-shuttle-win7-intel-float': |
49 'Test-Win7-ShuttleA-HD2000-x86-Release', | 44 'Test-Win7-ShuttleA-HD2000-x86-Release', |
50 'base-shuttle-win7-intel-angle': | 45 'base-shuttle-win7-intel-angle': |
51 'Test-Win7-ShuttleA-HD2000-x86-Release-ANGLE', | 46 'Test-Win7-ShuttleA-HD2000-x86-Release-ANGLE', |
(...skipping 14 matching lines...) Expand all Loading... | |
66 'base-android-xoom': | 61 'base-android-xoom': |
67 'Test-Android-Xoom-Tegra2-Arm7-Release', | 62 'Test-Android-Xoom-Tegra2-Arm7-Release', |
68 'base-android-nexus-10': | 63 'base-android-nexus-10': |
69 'Test-Android-Nexus10-MaliT604-Arm7-Release', | 64 'Test-Android-Nexus10-MaliT604-Arm7-Release', |
70 'base-android-nexus-4': | 65 'base-android-nexus-4': |
71 'Test-Android-Nexus4-Adreno320-Arm7-Release', | 66 'Test-Android-Nexus4-Adreno320-Arm7-Release', |
72 } | 67 } |
73 | 68 |
74 | 69 |
75 class _InternalException(Exception): | 70 class _InternalException(Exception): |
76 pass | 71 pass |
77 | 72 |
78 # Object that handles exceptions, either raising them immediately or collecting | 73 # Object that handles exceptions, either raising them immediately or collecting |
79 # them to display later on. | 74 # them to display later on. |
80 class ExceptionHandler(object): | 75 class ExceptionHandler(object): |
81 | 76 |
82 # params: | 77 # params: |
83 # keep_going_on_failure: if False, report failures and quit right away; | 78 # keep_going_on_failure: if False, report failures and quit right away; |
84 # if True, collect failures until | 79 # if True, collect failures until |
85 # ReportAllFailures() is called | 80 # ReportAllFailures() is called |
86 def __init__(self, keep_going_on_failure=False): | 81 def __init__(self, keep_going_on_failure=False): |
87 self._keep_going_on_failure = keep_going_on_failure | 82 self._keep_going_on_failure = keep_going_on_failure |
88 self._failures_encountered = [] | 83 self._failures_encountered = [] |
89 self._exiting = False | 84 self._exiting = False |
90 | 85 |
91 # Exit the program with the given status value. | 86 # Exit the program with the given status value. |
92 def _Exit(self, status=1): | 87 def _Exit(self, status=1): |
93 self._exiting = True | 88 self._exiting = True |
94 sys.exit(status) | 89 sys.exit(status) |
95 | 90 |
96 # We have encountered an exception; either collect the info and keep going, | 91 # We have encountered an exception; either collect the info and keep going, |
97 # or exit the program right away. | 92 # or exit the program right away. |
98 def RaiseExceptionOrContinue(self, e): | 93 def RaiseExceptionOrContinue(self, e): |
99 # If we are already quitting the program, propagate any exceptions | 94 # If we are already quitting the program, propagate any exceptions |
100 # so that the proper exit status will be communicated to the shell. | 95 # so that the proper exit status will be communicated to the shell. |
101 if self._exiting: | 96 if self._exiting: |
102 raise e | 97 raise e |
103 | 98 |
104 if self._keep_going_on_failure: | 99 if self._keep_going_on_failure: |
105 print >> sys.stderr, 'WARNING: swallowing exception %s' % e | 100 print >> sys.stderr, 'WARNING: swallowing exception %s' % e |
106 self._failures_encountered.append(e) | 101 self._failures_encountered.append(e) |
107 else: | 102 else: |
108 print >> sys.stderr, e | 103 print >> sys.stderr, e |
109 print >> sys.stderr, ( | 104 print >> sys.stderr, ( |
110 'Halting at first exception; to keep going, re-run ' + | 105 'Halting at first exception; to keep going, re-run ' + |
111 'with the --keep-going-on-failure option set.') | 106 'with the --keep-going-on-failure option set.') |
112 self._Exit() | 107 self._Exit() |
113 | 108 |
114 def ReportAllFailures(self): | 109 def ReportAllFailures(self): |
115 if self._failures_encountered: | 110 if self._failures_encountered: |
116 print >> sys.stderr, ('Encountered %d failures (see above).' % | 111 print >> sys.stderr, ('Encountered %d failures (see above).' % |
117 len(self._failures_encountered)) | 112 len(self._failures_encountered)) |
118 self._Exit() | 113 self._Exit() |
119 | 114 |
120 | 115 |
121 # Object that rebaselines a JSON expectations file (not individual image files). | 116 # Object that rebaselines a JSON expectations file (not individual image files). |
122 class JsonRebaseliner(object): | 117 class JsonRebaseliner(object): |
123 | 118 |
124 # params: | 119 # params: |
125 # expectations_root: root directory of all expectations JSON files | 120 # expectations_root: root directory of all expectations JSON files |
126 # expectations_input_filename: filename (under expectations_root) of JSON | 121 # expectations_input_filename: filename (under expectations_root) of JSON |
127 # expectations file to read; typically | 122 # expectations file to read; typically |
128 # "expected-results.json" | 123 # "expected-results.json" |
129 # expectations_output_filename: filename (under expectations_root) to | 124 # expectations_output_filename: filename (under expectations_root) to |
130 # which updated expectations should be | 125 # which updated expectations should be |
131 # written; typically the same as | 126 # written; typically the same as |
132 # expectations_input_filename, to overwrite | 127 # expectations_input_filename, to overwrite |
133 # the old content | 128 # the old content |
134 # actuals_base_url: base URL from which to read actual-result JSON files | 129 # actuals_base_url: base URL from which to read actual-result JSON files |
135 # actuals_filename: filename (under actuals_base_url) from which to read a | 130 # actuals_filename: filename (under actuals_base_url) from which to read a |
136 # summary of results; typically "actual-results.json" | 131 # summary of results; typically "actual-results.json" |
137 # exception_handler: reference to rebaseline.ExceptionHandler object | 132 # exception_handler: reference to rebaseline.ExceptionHandler object |
138 # tests: list of tests to rebaseline, or None if we should rebaseline | 133 # tests: list of tests to rebaseline, or None if we should rebaseline |
139 # whatever files the JSON results summary file tells us to | 134 # whatever files the JSON results summary file tells us to |
140 # configs: which configs to run for each test, or None if we should | 135 # configs: which configs to run for each test, or None if we should |
141 # rebaseline whatever configs the JSON results summary file tells | 136 # rebaseline whatever configs the JSON results summary file tells |
142 # us to | 137 # us to |
143 # add_new: if True, add expectations for tests which don't have any yet | 138 # add_new: if True, add expectations for tests which don't have any yet |
144 def __init__(self, expectations_root, expectations_input_filename, | 139 def __init__(self, expectations_root, expectations_input_filename, |
145 expectations_output_filename, actuals_base_url, | 140 expectations_output_filename, actuals_base_url, |
146 actuals_filename, exception_handler, | 141 actuals_filename, exception_handler, |
147 tests=None, configs=None, add_new=False): | 142 tests=None, configs=None, add_new=False): |
148 self._expectations_root = expectations_root | 143 self._expectations_root = expectations_root |
149 self._expectations_input_filename = expectations_input_filename | 144 self._expectations_input_filename = expectations_input_filename |
150 self._expectations_output_filename = expectations_output_filename | 145 self._expectations_output_filename = expectations_output_filename |
151 self._tests = tests | 146 self._tests = tests |
152 self._configs = configs | 147 self._configs = configs |
153 self._actuals_base_url = actuals_base_url | 148 self._actuals_base_url = actuals_base_url |
154 self._actuals_filename = actuals_filename | 149 self._actuals_filename = actuals_filename |
155 self._exception_handler = exception_handler | 150 self._exception_handler = exception_handler |
156 self._add_new = add_new | 151 self._add_new = add_new |
157 self._image_filename_re = re.compile(gm_json.IMAGE_FILENAME_PATTERN) | 152 self._image_filename_re = re.compile(gm_json.IMAGE_FILENAME_PATTERN) |
158 self._using_svn = os.path.isdir(os.path.join(expectations_root, '.svn')) | 153 self._using_svn = os.path.isdir(os.path.join(expectations_root, '.svn')) |
159 | 154 |
160 # Executes subprocess.call(cmd). | 155 # Executes subprocess.call(cmd). |
161 # Raises an Exception if the command fails. | 156 # Raises an Exception if the command fails. |
162 def _Call(self, cmd): | 157 def _Call(self, cmd): |
163 if subprocess.call(cmd) != 0: | 158 if subprocess.call(cmd) != 0: |
164 raise _InternalException('error running command: ' + ' '.join(cmd)) | 159 raise _InternalException('error running command: ' + ' '.join(cmd)) |
165 | 160 |
166 # Returns the full contents of filepath, as a single string. | 161 # Returns the full contents of filepath, as a single string. |
167 # If filepath looks like a URL, try to read it that way instead of as | 162 # If filepath looks like a URL, try to read it that way instead of as |
168 # a path on local storage. | 163 # a path on local storage. |
169 # | 164 # |
170 # Raises _InternalException if there is a problem. | 165 # Raises _InternalException if there is a problem. |
171 def _GetFileContents(self, filepath): | 166 def _GetFileContents(self, filepath): |
172 if filepath.startswith('http:') or filepath.startswith('https:'): | 167 if filepath.startswith('http:') or filepath.startswith('https:'): |
173 try: | 168 try: |
174 return urllib2.urlopen(filepath).read() | 169 return urllib2.urlopen(filepath).read() |
175 except urllib2.HTTPError as e: | 170 except urllib2.HTTPError as e: |
176 raise _InternalException('unable to read URL %s: %s' % ( | 171 raise _InternalException('unable to read URL %s: %s' % ( |
177 filepath, e)) | 172 filepath, e)) |
178 else: | 173 else: |
179 return open(filepath, 'r').read() | 174 return open(filepath, 'r').read() |
180 | 175 |
181 # Returns a dictionary of actual results from actual-results.json file. | 176 # Returns a dictionary of actual results from actual-results.json file. |
182 # | 177 # |
183 # The dictionary returned has this format: | 178 # The dictionary returned has this format: |
184 # { | 179 # { |
185 # u'imageblur_565.png': [u'bitmap-64bitMD5', 3359963596899141322], | 180 # u'imageblur_565.png': [u'bitmap-64bitMD5', 3359963596899141322], |
186 # u'imageblur_8888.png': [u'bitmap-64bitMD5', 4217923806027861152], | 181 # u'imageblur_8888.png': [u'bitmap-64bitMD5', 4217923806027861152], |
187 # u'shadertext3_8888.png': [u'bitmap-64bitMD5', 3713708307125704716] | 182 # u'shadertext3_8888.png': [u'bitmap-64bitMD5', 3713708307125704716] |
188 # } | 183 # } |
189 # | 184 # |
190 # If the JSON actual result summary file cannot be loaded, logs a warning | 185 # If the JSON actual result summary file cannot be loaded, logs a warning |
191 # message and returns None. | 186 # message and returns None. |
192 # If the JSON actual result summary file can be loaded, but we have | 187 # If the JSON actual result summary file can be loaded, but we have |
193 # trouble parsing it, raises an Exception. | 188 # trouble parsing it, raises an Exception. |
194 # | 189 # |
195 # params: | 190 # params: |
196 # json_url: URL pointing to a JSON actual result summary file | 191 # json_url: URL pointing to a JSON actual result summary file |
197 # sections: a list of section names to include in the results, e.g. | 192 # sections: a list of section names to include in the results, e.g. |
198 # [gm_json.JSONKEY_ACTUALRESULTS_FAILED, | 193 # [gm_json.JSONKEY_ACTUALRESULTS_FAILED, |
199 # gm_json.JSONKEY_ACTUALRESULTS_NOCOMPARISON] ; | 194 # gm_json.JSONKEY_ACTUALRESULTS_NOCOMPARISON] ; |
200 # if None, then include ALL sections. | 195 # if None, then include ALL sections. |
201 def _GetActualResults(self, json_url, sections=None): | 196 def _GetActualResults(self, json_url, sections=None): |
202 try: | 197 try: |
203 json_contents = self._GetFileContents(json_url) | 198 json_contents = self._GetFileContents(json_url) |
204 except _InternalException: | 199 except _InternalException: |
205 print >> sys.stderr, ( | 200 print >> sys.stderr, ( |
206 'could not read json_url %s ; skipping this platform.' % | 201 'could not read json_url %s ; skipping this platform.' % |
207 json_url) | 202 json_url) |
208 return None | 203 return None |
209 json_dict = gm_json.LoadFromString(json_contents) | 204 json_dict = gm_json.LoadFromString(json_contents) |
210 results_to_return = {} | 205 results_to_return = {} |
211 actual_results = json_dict[gm_json.JSONKEY_ACTUALRESULTS] | 206 actual_results = json_dict[gm_json.JSONKEY_ACTUALRESULTS] |
212 if not sections: | 207 if not sections: |
213 sections = actual_results.keys() | 208 sections = actual_results.keys() |
214 for section in sections: | 209 for section in sections: |
215 section_results = actual_results[section] | 210 section_results = actual_results[section] |
216 if section_results: | 211 if section_results: |
217 results_to_return.update(section_results) | 212 results_to_return.update(section_results) |
218 return results_to_return | 213 return results_to_return |
219 | 214 |
220 # Rebaseline all tests/types we specified in the constructor, | 215 # Rebaseline all tests/types we specified in the constructor, |
221 # within this expectations/gm subdir. | 216 # within this expectations/gm subdir. |
222 # | 217 # |
223 # params: | 218 # params: |
224 # subdir : e.g. 'base-shuttle-win7-intel-float' | 219 # subdir : e.g. 'base-shuttle-win7-intel-float' |
225 # builder : e.g. 'Test-Win7-ShuttleA-HD2000-x86-Release' | 220 # builder : e.g. 'Test-Win7-ShuttleA-HD2000-x86-Release' |
226 def RebaselineSubdir(self, subdir, builder): | 221 def RebaselineSubdir(self, subdir, builder): |
227 # Read in the actual result summary, and extract all the tests whose | 222 # Read in the actual result summary, and extract all the tests whose |
228 # results we need to update. | 223 # results we need to update. |
229 actuals_url = '/'.join([self._actuals_base_url, | 224 actuals_url = '/'.join([self._actuals_base_url, |
230 subdir, builder, subdir, | 225 subdir, builder, subdir, |
231 self._actuals_filename]) | 226 self._actuals_filename]) |
232 # In most cases, we won't need to re-record results that are already | 227 # In most cases, we won't need to re-record results that are already |
233 # succeeding, but including the SUCCEEDED results will allow us to | 228 # succeeding, but including the SUCCEEDED results will allow us to |
234 # re-record expectations if they somehow get out of sync. | 229 # re-record expectations if they somehow get out of sync. |
235 sections = [gm_json.JSONKEY_ACTUALRESULTS_FAILED, | 230 sections = [gm_json.JSONKEY_ACTUALRESULTS_FAILED, |
236 gm_json.JSONKEY_ACTUALRESULTS_SUCCEEDED] | 231 gm_json.JSONKEY_ACTUALRESULTS_SUCCEEDED] |
237 if self._add_new: | 232 if self._add_new: |
238 sections.append(gm_json.JSONKEY_ACTUALRESULTS_NOCOMPARISON) | 233 sections.append(gm_json.JSONKEY_ACTUALRESULTS_NOCOMPARISON) |
239 results_to_update = self._GetActualResults(json_url=actuals_url, | 234 results_to_update = self._GetActualResults(json_url=actuals_url, |
240 sections=sections) | 235 sections=sections) |
241 | 236 |
242 # Read in current expectations. | 237 # Read in current expectations. |
243 expectations_input_filepath = os.path.join( | 238 expectations_input_filepath = os.path.join( |
244 self._expectations_root, subdir, self._expectations_input_filename) | 239 self._expectations_root, subdir, self._expectations_input_filename) |
245 expectations_dict = gm_json.LoadFromFile(expectations_input_filepath) | 240 expectations_dict = gm_json.LoadFromFile(expectations_input_filepath) |
246 expected_results = expectations_dict[gm_json.JSONKEY_EXPECTEDRESULTS] | 241 expected_results = expectations_dict[gm_json.JSONKEY_EXPECTEDRESULTS] |
247 | 242 |
248 # Update the expectations in memory, skipping any tests/configs that | 243 # Update the expectations in memory, skipping any tests/configs that |
249 # the caller asked to exclude. | 244 # the caller asked to exclude. |
250 skipped_images = [] | 245 skipped_images = [] |
251 if results_to_update: | 246 if results_to_update: |
252 for (image_name, image_results) in results_to_update.iteritems(): | 247 for (image_name, image_results) in results_to_update.iteritems(): |
253 (test, config) = \ | 248 (test, config) = self._image_filename_re.match(image_name).groups() |
254 self._image_filename_re.match(image_name).groups() | 249 if self._tests: |
255 if self._tests: | 250 if test not in self._tests: |
256 if test not in self._tests: | 251 skipped_images.append(image_name) |
257 skipped_images.append(image_name) | 252 continue |
258 continue | 253 if self._configs: |
259 if self._configs: | 254 if config not in self._configs: |
260 if config not in self._configs: | 255 skipped_images.append(image_name) |
261 skipped_images.append(image_name) | 256 continue |
262 continue | 257 if not expected_results.get(image_name): |
263 if not expected_results.get(image_name): | 258 expected_results[image_name] = {} |
264 expected_results[image_name] = {} | 259 expected_results[image_name][gm_json.JSONKEY_EXPECTEDRESULTS_ALLOWEDDIGE STS] = \ |
265 expected_results[image_name] \ | |
266 [gm_json.JSONKEY_EXPECTEDRESULTS_ALLOWEDDIGESTS] = \ | |
267 [image_results] | 260 [image_results] |
268 | 261 |
269 # Write out updated expectations. | 262 # Write out updated expectations. |
270 expectations_output_filepath = os.path.join( | 263 expectations_output_filepath = os.path.join( |
271 self._expectations_root, subdir, self._expectations_output_filename) | 264 self._expectations_root, subdir, self._expectations_output_filename) |
272 gm_json.WriteToFile(expectations_dict, expectations_output_filepath) | 265 gm_json.WriteToFile(expectations_dict, expectations_output_filepath) |
273 | 266 |
274 # Mark the JSON file as plaintext, so text-style diffs can be applied. | 267 # Mark the JSON file as plaintext, so text-style diffs can be applied. |
275 # Fixes https://code.google.com/p/skia/issues/detail?id=1442 | 268 # Fixes https://code.google.com/p/skia/issues/detail?id=1442 |
276 if self._using_svn: | 269 if self._using_svn: |
277 self._Call(['svn', 'propset', '--quiet', 'svn:mime-type', | 270 self._Call(['svn', 'propset', '--quiet', 'svn:mime-type', |
278 'text/x-json', expectations_output_filepath]) | 271 'text/x-json', expectations_output_filepath]) |
279 | 272 |
280 # main... | 273 # main... |
281 | 274 |
282 parser = argparse.ArgumentParser() | 275 parser = argparse.ArgumentParser() |
283 parser.add_argument('--actuals-base-url', | 276 parser.add_argument('--actuals-base-url', |
284 help='base URL from which to read files containing JSON ' + | 277 help='base URL from which to read files containing JSON ' + |
285 'summaries of actual GM results; defaults to %(default)s', | 278 'summaries of actual GM results; defaults to %(default)s', |
286 default='http://skia-autogen.googlecode.com/svn/gm-actual') | 279 default='http://skia-autogen.googlecode.com/svn/gm-actual') |
287 parser.add_argument('--actuals-filename', | 280 parser.add_argument('--actuals-filename', |
288 help='filename (within platform-specific subdirectories ' + | 281 help='filename (within platform-specific subdirectories ' + |
289 'of ACTUALS_BASE_URL) to read a summary of results from; ' + | 282 'of ACTUALS_BASE_URL) to read a summary of results from; ' + |
290 'defaults to %(default)s', | 283 'defaults to %(default)s', |
291 default='actual-results.json') | 284 default='actual-results.json') |
292 # TODO(epoger): Add test that exercises --add-new argument. | 285 # TODO(epoger): Add test that exercises --add-new argument. |
293 parser.add_argument('--add-new', action='store_true', | 286 parser.add_argument('--add-new', action='store_true', |
294 help='in addition to the standard behavior of ' + | 287 help='in addition to the standard behavior of ' + |
295 'updating expectations for failing tests, add ' + | 288 'updating expectations for failing tests, add ' + |
296 'expectations for tests which don\'t have expectations ' + | 289 'expectations for tests which don\'t have expectations ' + |
297 'yet.') | 290 'yet.') |
298 # TODO(epoger): Add test that exercises --configs argument. | 291 # TODO(epoger): Add test that exercises --configs argument. |
299 parser.add_argument('--configs', metavar='CONFIG', nargs='+', | 292 parser.add_argument('--configs', metavar='CONFIG', nargs='+', |
300 help='which configurations to rebaseline, e.g. ' + | 293 help='which configurations to rebaseline, e.g. ' + |
301 '"--configs 565 8888", as a filter over the full set of ' + | 294 '"--configs 565 8888", as a filter over the full set of ' + |
302 'results in ACTUALS_FILENAME; if unspecified, rebaseline ' + | 295 'results in ACTUALS_FILENAME; if unspecified, rebaseline ' + |
303 '*all* configs that are available.') | 296 '*all* configs that are available.') |
304 # TODO(epoger): The --dry-run argument will no longer be needed once we | |
305 # are only rebaselining JSON files. | |
306 parser.add_argument('--dry-run', action='store_true', | |
307 help='instead of actually downloading files or adding ' + | |
308 'files to checkout, display a list of operations that ' + | |
309 'we would normally perform') | |
310 parser.add_argument('--expectations-filename', | 297 parser.add_argument('--expectations-filename', |
311 help='filename (under EXPECTATIONS_ROOT) to read ' + | 298 help='filename (under EXPECTATIONS_ROOT) to read ' + |
312 'current expectations from, and to write new ' + | 299 'current expectations from, and to write new ' + |
313 'expectations into (unless a separate ' + | 300 'expectations into (unless a separate ' + |
314 'EXPECTATIONS_FILENAME_OUTPUT has been specified); ' + | 301 'EXPECTATIONS_FILENAME_OUTPUT has been specified); ' + |
315 'defaults to %(default)s', | 302 'defaults to %(default)s', |
316 default='expected-results.json') | 303 default='expected-results.json') |
317 parser.add_argument('--expectations-filename-output', | 304 parser.add_argument('--expectations-filename-output', |
318 help='filename (under EXPECTATIONS_ROOT) to write ' + | 305 help='filename (under EXPECTATIONS_ROOT) to write ' + |
319 'updated expectations into; by default, overwrites the ' + | 306 'updated expectations into; by default, overwrites the ' + |
(...skipping 15 matching lines...) Expand all Loading... | |
335 # TODO(epoger): Add test that exercises --tests argument. | 322 # TODO(epoger): Add test that exercises --tests argument. |
336 parser.add_argument('--tests', metavar='TEST', nargs='+', | 323 parser.add_argument('--tests', metavar='TEST', nargs='+', |
337 help='which tests to rebaseline, e.g. ' + | 324 help='which tests to rebaseline, e.g. ' + |
338 '"--tests aaclip bigmatrix", as a filter over the full ' + | 325 '"--tests aaclip bigmatrix", as a filter over the full ' + |
339 'set of results in ACTUALS_FILENAME; if unspecified, ' + | 326 'set of results in ACTUALS_FILENAME; if unspecified, ' + |
340 'rebaseline *all* tests that are available.') | 327 'rebaseline *all* tests that are available.') |
341 args = parser.parse_args() | 328 args = parser.parse_args() |
342 exception_handler = ExceptionHandler( | 329 exception_handler = ExceptionHandler( |
343 keep_going_on_failure=args.keep_going_on_failure) | 330 keep_going_on_failure=args.keep_going_on_failure) |
344 if args.subdirs: | 331 if args.subdirs: |
345 subdirs = args.subdirs | 332 subdirs = args.subdirs |
346 missing_json_is_fatal = True | 333 missing_json_is_fatal = True |
347 else: | 334 else: |
348 subdirs = sorted(SUBDIR_MAPPING.keys()) | 335 subdirs = sorted(SUBDIR_MAPPING.keys()) |
349 missing_json_is_fatal = False | 336 missing_json_is_fatal = False |
350 for subdir in subdirs: | 337 for subdir in subdirs: |
351 if not subdir in SUBDIR_MAPPING.keys(): | 338 if not subdir in SUBDIR_MAPPING.keys(): |
352 raise Exception(('unrecognized platform subdir "%s"; ' + | 339 raise Exception(('unrecognized platform subdir "%s"; ' + |
353 'should be one of %s') % ( | 340 'should be one of %s') % ( |
354 subdir, SUBDIR_MAPPING.keys())) | 341 subdir, SUBDIR_MAPPING.keys())) |
355 builder = SUBDIR_MAPPING[subdir] | 342 builder = SUBDIR_MAPPING[subdir] |
356 | 343 |
357 # We instantiate different Rebaseliner objects depending | 344 # We instantiate different Rebaseliner objects depending |
358 # on whether we are rebaselining an expected-results.json file, or | 345 # on whether we are rebaselining an expected-results.json file, or |
359 # individual image files. Different expectations/gm subdirectories may move | 346 # individual image files. Different expectations/gm subdirectories may move |
360 # from individual image files to JSON-format expectations at different | 347 # from individual image files to JSON-format expectations at different |
361 # times, so we need to make this determination per subdirectory. | 348 # times, so we need to make this determination per subdirectory. |
362 # | 349 # |
363 # See https://goto.google.com/ChecksumTransitionDetail | 350 # See https://goto.google.com/ChecksumTransitionDetail |
364 expectations_json_file = os.path.join(args.expectations_root, subdir, | 351 expectations_json_file = os.path.join(args.expectations_root, subdir, |
365 args.expectations_filename) | 352 args.expectations_filename) |
366 if os.path.isfile(expectations_json_file): | 353 if os.path.isfile(expectations_json_file): |
367 rebaseliner = JsonRebaseliner( | 354 rebaseliner = JsonRebaseliner( |
368 expectations_root=args.expectations_root, | 355 expectations_root=args.expectations_root, |
369 expectations_input_filename=args.expectations_filename, | 356 expectations_input_filename=args.expectations_filename, |
370 expectations_output_filename=(args.expectations_filename_output or | 357 expectations_output_filename=(args.expectations_filename_output or |
371 args.expectations_filename), | 358 args.expectations_filename), |
372 tests=args.tests, configs=args.configs, | 359 tests=args.tests, configs=args.configs, |
373 actuals_base_url=args.actuals_base_url, | 360 actuals_base_url=args.actuals_base_url, |
374 actuals_filename=args.actuals_filename, | 361 actuals_filename=args.actuals_filename, |
375 exception_handler=exception_handler, | 362 exception_handler=exception_handler, |
376 add_new=args.add_new) | 363 add_new=args.add_new) |
377 else: | |
378 # TODO(epoger): When we get rid of the ImageRebaseliner implementation, | |
379 # we should raise an Exception in this case (no JSON expectations file | |
380 # found to update), to prevent a recurrence of | |
381 # https://code.google.com/p/skia/issues/detail?id=1403 ('rebaseline.py | |
382 # script fails with misleading output when run outside of gm-expected | |
383 # dir') | |
384 rebaseliner = rebaseline_imagefiles.ImageRebaseliner( | |
385 expectations_root=args.expectations_root, | |
386 tests=args.tests, configs=args.configs, | |
387 dry_run=args.dry_run, | |
388 json_base_url=args.actuals_base_url, | |
389 json_filename=args.actuals_filename, | |
390 exception_handler=exception_handler, | |
391 add_new=args.add_new, | |
392 missing_json_is_fatal=missing_json_is_fatal) | |
borenet
2013/08/02 20:49:19
A quick skim suggests that, outside of the above b
| |
393 | |
394 try: | 364 try: |
395 rebaseliner.RebaselineSubdir(subdir=subdir, builder=builder) | 365 rebaseliner.RebaselineSubdir(subdir=subdir, builder=builder) |
396 except BaseException as e: | 366 except BaseException as e: |
397 exception_handler.RaiseExceptionOrContinue(e) | 367 exception_handler.RaiseExceptionOrContinue(e) |
368 else: | |
369 exception_handler.RaiseExceptionOrContinue(_InternalException( | |
370 'expectations_json_file %s not found' % expectations_json_file)) | |
398 | 371 |
399 exception_handler.ReportAllFailures() | 372 exception_handler.ReportAllFailures() |
OLD | NEW |