OLD | NEW |
1 #!/usr/bin/python | 1 #!/usr/bin/python |
2 | 2 |
3 ''' | 3 ''' |
4 Copyright 2012 Google Inc. | 4 Copyright 2012 Google Inc. |
5 | 5 |
6 Use of this source code is governed by a BSD-style license that can be | 6 Use of this source code is governed by a BSD-style license that can be |
7 found in the LICENSE file. | 7 found in the LICENSE file. |
8 ''' | 8 ''' |
9 | 9 |
10 ''' | 10 ''' |
11 Rebaselines the given GM tests, on all bots and all configurations. | 11 Rebaselines the given GM tests, on all bots and all configurations. |
12 Must be run from the gm-expected directory. If run from a git or SVN | 12 Must be run from the gm-expected directory. If run from a git or SVN |
13 checkout, the files will be added to the staging area for commit. | 13 checkout, the files will be added to the staging area for commit. |
14 ''' | 14 ''' |
15 | 15 |
16 # System-level imports | 16 # System-level imports |
| 17 # EPOGER: check if any of these are no longer needed |
17 import argparse | 18 import argparse |
18 import os | 19 import os |
19 import re | 20 import re |
20 import subprocess | 21 import subprocess |
21 import sys | 22 import sys |
22 import urllib2 | 23 import urllib2 |
23 | 24 |
24 # Imports from local directory | 25 # Imports from local directory |
25 import rebaseline_imagefiles | 26 import rebaseline_imagefiles |
26 | 27 |
27 # Imports from within Skia | 28 # Imports from within Skia |
28 # | 29 # |
29 # We need to add the 'gm' directory, so that we can import gm_json.py within | 30 # We need to add the 'gm' directory, so that we can import gm_json.py within |
30 # that directory. That script allows us to parse the actual-results.json file | 31 # that directory. That script allows us to parse the actual-results.json file |
31 # written out by the GM tool. | 32 # written out by the GM tool. |
32 # Make sure that the 'gm' dir is in the PYTHONPATH, but add it at the *end* | 33 # Make sure that the 'gm' dir is in the PYTHONPATH, but add it at the *end* |
33 # so any dirs that are already in the PYTHONPATH will be preferred. | 34 # so any dirs that are already in the PYTHONPATH will be preferred. |
34 # | 35 # |
35 # This assumes that the 'gm' directory has been checked out as a sibling of | 36 # This assumes that the 'gm' directory has been checked out as a sibling of |
36 # the 'tools' directory containing this script, which will be the case if | 37 # the 'tools' directory containing this script, which will be the case if |
37 # 'trunk' was checked out as a single unit. | 38 # 'trunk' was checked out as a single unit. |
38 GM_DIRECTORY = os.path.realpath( | 39 GM_DIRECTORY = os.path.realpath( |
39 os.path.join(os.path.dirname(os.path.dirname(__file__)), 'gm')) | 40 os.path.join(os.path.dirname(os.path.dirname(__file__)), 'gm')) |
40 if GM_DIRECTORY not in sys.path: | 41 if GM_DIRECTORY not in sys.path: |
41 sys.path.append(GM_DIRECTORY) | 42 sys.path.append(GM_DIRECTORY) |
42 import gm_json | 43 import gm_json |
43 | 44 |
| 45 # EPOGER: make this a command-line argument |
44 JSON_EXPECTATIONS_FILENAME='expected-results.json' | 46 JSON_EXPECTATIONS_FILENAME='expected-results.json' |
45 | 47 |
46 # Mapping of gm-expectations subdir (under | 48 # Mapping of gm-expectations subdir (under |
47 # https://skia.googlecode.com/svn/gm-expected/ ) | 49 # https://skia.googlecode.com/svn/gm-expected/ ) |
48 # to builder name (see list at http://108.170.217.252:10117/builders ) | 50 # to builder name (see list at http://108.170.217.252:10117/builders ) |
49 SUBDIR_MAPPING = { | 51 SUBDIR_MAPPING = { |
50 'base-shuttle-win7-intel-float': | 52 'base-shuttle-win7-intel-float': |
51 'Test-Win7-ShuttleA-HD2000-x86-Release', | 53 'Test-Win7-ShuttleA-HD2000-x86-Release', |
52 'base-shuttle-win7-intel-angle': | 54 'base-shuttle-win7-intel-angle': |
53 'Test-Win7-ShuttleA-HD2000-x86-Release-ANGLE', | 55 'Test-Win7-ShuttleA-HD2000-x86-Release-ANGLE', |
(...skipping 17 matching lines...) Expand all Loading... |
71 'Test-Android-Nexus10-MaliT604-Arm7-Release', | 73 'Test-Android-Nexus10-MaliT604-Arm7-Release', |
72 'base-android-nexus-4': | 74 'base-android-nexus-4': |
73 'Test-Android-Nexus4-Adreno320-Arm7-Release', | 75 'Test-Android-Nexus4-Adreno320-Arm7-Release', |
74 } | 76 } |
75 | 77 |
76 | 78 |
77 class CommandFailedException(Exception): | 79 class CommandFailedException(Exception): |
78 pass | 80 pass |
79 | 81 |
80 # Object that rebaselines a JSON expectations file (not individual image files). | 82 # Object that rebaselines a JSON expectations file (not individual image files). |
81 # | |
82 # TODO(epoger): Most of this is just the code from the old ImageRebaseliner... | |
83 # some of it will need to be updated in order to properly rebaseline JSON files. | |
84 # There is a lot of code duplicated between here and ImageRebaseliner, but | |
85 # that's fine because we will delete ImageRebaseliner soon. | |
86 class JsonRebaseliner(object): | 83 class JsonRebaseliner(object): |
87 | 84 |
88 # params: | 85 # params: |
89 # expectations_root: root directory of all expectations | 86 # expectations_root: root directory of all expectations JSON files |
90 # json_base_url: base URL from which to read json_filename | 87 # actuals_base_url: base URL from which to read actual-result JSON files |
91 # json_filename: filename (under json_base_url) from which to read a | 88 # actuals_filename: filename (under actuals_base_url) from which to read a |
92 # summary of results; typically "actual-results.json" | 89 # summary of results; typically "actual-results.json" |
93 # tests: list of tests to rebaseline, or None if we should rebaseline | 90 # tests: list of tests to rebaseline, or None if we should rebaseline |
94 # whatever files the JSON results summary file tells us to | 91 # whatever files the JSON results summary file tells us to |
95 # configs: which configs to run for each test; this should only be | 92 # configs: which configs to run for each test; this should only be |
96 # specified if the list of tests was also specified (otherwise, | 93 # specified if the list of tests was also specified (otherwise, |
97 # the JSON file will give us test names and configs) | 94 # the JSON file will give us test names and configs) |
98 # dry_run: if True, instead of actually downloading files or adding | |
99 # files to checkout, display a list of operations that | |
100 # we would normally perform | |
101 # add_new: if True, add expectations for tests which don't have any yet | 95 # add_new: if True, add expectations for tests which don't have any yet |
102 # missing_json_is_fatal: whether to halt execution if we cannot read a | 96 def __init__(self, expectations_root, actuals_base_url, actuals_filename, |
103 # JSON actual result summary file | 97 tests=None, configs=None, add_new=False): |
104 def __init__(self, expectations_root, json_base_url, json_filename, | |
105 tests=None, configs=None, dry_run=False, | |
106 add_new=False, missing_json_is_fatal=False): | |
107 raise ValueError('JsonRebaseliner not yet implemented') # TODO(epoger) | |
108 if configs and not tests: | 98 if configs and not tests: |
109 raise ValueError('configs should only be specified if tests ' + | 99 raise ValueError('configs should only be specified if tests ' + |
110 'were specified also') | 100 'were specified also') |
111 self._expectations_root = expectations_root | 101 self._expectations_root = expectations_root |
112 self._tests = tests | 102 self._tests = tests |
113 self._configs = configs | 103 self._configs = configs |
114 self._json_base_url = json_base_url | 104 self._actuals_base_url = actuals_base_url |
115 self._json_filename = json_filename | 105 self._actuals_filename = actuals_filename |
116 self._dry_run = dry_run | |
117 self._add_new = add_new | 106 self._add_new = add_new |
118 self._missing_json_is_fatal = missing_json_is_fatal | |
119 self._googlestorage_gm_actuals_root = ( | |
120 'http://chromium-skia-gm.commondatastorage.googleapis.com/gm') | |
121 self._testname_pattern = re.compile('(\S+)_(\S+).png') | 107 self._testname_pattern = re.compile('(\S+)_(\S+).png') |
122 self._is_svn_checkout = ( | |
123 os.path.exists('.svn') or | |
124 os.path.exists(os.path.join(os.pardir, '.svn'))) | |
125 self._is_git_checkout = ( | |
126 os.path.exists('.git') or | |
127 os.path.exists(os.path.join(os.pardir, '.git'))) | |
128 | 108 |
129 # If dry_run is False, execute subprocess.call(cmd). | 109 # EPOGER: check if this (or any other method) is no longer needed. |
130 # If dry_run is True, print the command we would have otherwise run. | 110 # Execute subprocess.call(cmd). |
131 # Raises a CommandFailedException if the command fails. | 111 # Raises a CommandFailedException if the command fails. |
132 def _Call(self, cmd): | 112 def _Call(self, cmd): |
133 if self._dry_run: | |
134 print '%s' % ' '.join(cmd) | |
135 return | |
136 if subprocess.call(cmd) != 0: | 113 if subprocess.call(cmd) != 0: |
137 raise CommandFailedException('error running command: ' + | 114 raise CommandFailedException('error running command: ' + |
138 ' '.join(cmd)) | 115 ' '.join(cmd)) |
139 | 116 |
140 # Download a single actual result from GoogleStorage, returning True if it | |
141 # succeeded. | |
142 def _DownloadFromGoogleStorage(self, infilename, outfilename, all_results): | |
143 test_name = self._testname_pattern.match(infilename).group(1) | |
144 if not test_name: | |
145 print '# unable to find test_name for infilename %s' % infilename | |
146 return False | |
147 try: | |
148 hash_type, hash_value = all_results[infilename] | |
149 except KeyError: | |
150 print ('# unable to find filename %s in all_results dict' % | |
151 infilename) | |
152 return False | |
153 except ValueError as e: | |
154 print '# ValueError reading filename %s from all_results dict: %s'%( | |
155 infilename, e) | |
156 return False | |
157 url = '%s/%s/%s/%s.png' % (self._googlestorage_gm_actuals_root, | |
158 hash_type, test_name, hash_value) | |
159 try: | |
160 self._DownloadFile(source_url=url, dest_filename=outfilename) | |
161 return True | |
162 except CommandFailedException: | |
163 print '# Couldn\'t fetch gs_url %s' % url | |
164 return False | |
165 | |
166 # Download a single actual result from skia-autogen, returning True if it | 117 # Download a single actual result from skia-autogen, returning True if it |
167 # succeeded. | 118 # succeeded. |
168 def _DownloadFromAutogen(self, infilename, outfilename, | 119 def _DownloadFromAutogen(self, infilename, outfilename, |
169 expectations_subdir, builder_name): | 120 expectations_subdir, builder_name): |
170 url = ('http://skia-autogen.googlecode.com/svn/gm-actual/' + | 121 url = ('http://skia-autogen.googlecode.com/svn/gm-actual/' + |
171 expectations_subdir + '/' + builder_name + '/' + | 122 expectations_subdir + '/' + builder_name + '/' + |
172 expectations_subdir + '/' + infilename) | 123 expectations_subdir + '/' + infilename) |
173 try: | 124 try: |
174 self._DownloadFile(source_url=url, dest_filename=outfilename) | 125 self._DownloadFile(source_url=url, dest_filename=outfilename) |
175 return True | 126 return True |
176 except CommandFailedException: | 127 except CommandFailedException: |
177 print '# Couldn\'t fetch autogen_url %s' % url | 128 print '# Couldn\'t fetch autogen_url %s' % url |
178 return False | 129 return False |
179 | 130 |
180 # Download a single file, raising a CommandFailedException if it fails. | 131 # Download a single file, raising a CommandFailedException if it fails. |
181 def _DownloadFile(self, source_url, dest_filename): | 132 def _DownloadFile(self, source_url, dest_filename): |
182 # Download into a temporary file and then rename it afterwards, | 133 # Download into a temporary file and then rename it afterwards, |
183 # so that we don't corrupt the existing file if it fails midway thru. | 134 # so that we don't corrupt the existing file if it fails midway thru. |
184 temp_filename = os.path.join(os.path.dirname(dest_filename), | 135 temp_filename = os.path.join(os.path.dirname(dest_filename), |
185 '.temp-' + os.path.basename(dest_filename)) | 136 '.temp-' + os.path.basename(dest_filename)) |
186 | 137 |
187 # TODO(epoger): Replace calls to "curl"/"mv" (which will only work on | 138 # TODO(epoger): Replace calls to "curl"/"mv" (which will only work on |
188 # Unix) with a Python HTTP library (which should work cross-platform) | 139 # Unix) with a Python HTTP library (which should work cross-platform) |
189 self._Call([ 'curl', '--fail', '--silent', source_url, | 140 self._Call([ 'curl', '--fail', '--silent', source_url, |
190 '--output', temp_filename ]) | 141 '--output', temp_filename ]) |
191 self._Call([ 'mv', temp_filename, dest_filename ]) | 142 self._Call([ 'mv', temp_filename, dest_filename ]) |
192 | 143 |
193 # Returns the full contents of a URL, as a single string. | 144 # Returns the full contents of filepath, as a single string. |
194 # | 145 # If filepath looks like a URL, try to read it that way instead of as |
195 # Unlike standard URL handling, we allow relative "file:" URLs; | 146 # a path on local storage. |
196 # for example, "file:one/two" resolves to the file ./one/two | 147 def _GetFileContents(self, filepath): |
197 # (relative to current working dir) | 148 if filepath.startswith('http:') or filepath.startswith('https:'): |
198 def _GetContentsOfUrl(self, url): | 149 return urllib2.urlopen(filepath).read() |
199 file_prefix = 'file:' | |
200 if url.startswith(file_prefix): | |
201 filename = url[len(file_prefix):] | |
202 return open(filename, 'r').read() | |
203 else: | 150 else: |
204 return urllib2.urlopen(url).read() | 151 return open(filepath, 'r').read() |
205 | 152 |
206 # Returns a dictionary of actual results from actual-results.json file. | 153 # Returns a dictionary of actual results from actual-results.json file. |
207 # | 154 # |
208 # The dictionary returned has this format: | 155 # The dictionary returned has this format: |
209 # { | 156 # { |
210 # u'imageblur_565.png': [u'bitmap-64bitMD5', 3359963596899141322], | 157 # u'imageblur_565.png': [u'bitmap-64bitMD5', 3359963596899141322], |
211 # u'imageblur_8888.png': [u'bitmap-64bitMD5', 4217923806027861152], | 158 # u'imageblur_8888.png': [u'bitmap-64bitMD5', 4217923806027861152], |
212 # u'shadertext3_8888.png': [u'bitmap-64bitMD5', 3713708307125704716] | 159 # u'shadertext3_8888.png': [u'bitmap-64bitMD5', 3713708307125704716] |
213 # } | 160 # } |
214 # | 161 # |
215 # If the JSON actual result summary file cannot be loaded, the behavior | 162 # If the JSON actual result summary file cannot be loaded, raise an |
216 # depends on self._missing_json_is_fatal: | 163 # exception. |
217 # - if true: execution will halt with an exception | |
218 # - if false: we will log an error message but return an empty dictionary | |
219 # | 164 # |
220 # params: | 165 # params: |
221 # json_url: URL pointing to a JSON actual result summary file | 166 # json_url: URL pointing to a JSON actual result summary file |
222 # sections: a list of section names to include in the results, e.g. | 167 # sections: a list of section names to include in the results, e.g. |
223 # [gm_json.JSONKEY_ACTUALRESULTS_FAILED, | 168 # [gm_json.JSONKEY_ACTUALRESULTS_FAILED, |
224 # gm_json.JSONKEY_ACTUALRESULTS_NOCOMPARISON] ; | 169 # gm_json.JSONKEY_ACTUALRESULTS_NOCOMPARISON] ; |
225 # if None, then include ALL sections. | 170 # if None, then include ALL sections. |
226 def _GetActualResults(self, json_url, sections=None): | 171 def _GetActualResults(self, json_url, sections=None): |
227 try: | 172 json_contents = self._GetFileContents(json_url) |
228 json_contents = self._GetContentsOfUrl(json_url) | |
229 except (urllib2.HTTPError, IOError): | |
230 message = 'unable to load JSON summary URL %s' % json_url | |
231 if self._missing_json_is_fatal: | |
232 raise ValueError(message) | |
233 else: | |
234 print '# %s' % message | |
235 return {} | |
236 | |
237 json_dict = gm_json.LoadFromString(json_contents) | 173 json_dict = gm_json.LoadFromString(json_contents) |
238 results_to_return = {} | 174 results_to_return = {} |
239 actual_results = json_dict[gm_json.JSONKEY_ACTUALRESULTS] | 175 actual_results = json_dict[gm_json.JSONKEY_ACTUALRESULTS] |
240 if not sections: | 176 if not sections: |
241 sections = actual_results.keys() | 177 sections = actual_results.keys() |
242 for section in sections: | 178 for section in sections: |
243 section_results = actual_results[section] | 179 section_results = actual_results[section] |
244 if section_results: | 180 if section_results: |
245 results_to_return.update(section_results) | 181 results_to_return.update(section_results) |
246 return results_to_return | 182 return results_to_return |
247 | 183 |
248 # Returns a list of files that require rebaselining. | 184 # Returns a list of files that require rebaselining. |
249 # | 185 # |
250 # Note that this returns a list of FILES, like this: | 186 # Note that this returns a list of FILES, like this: |
251 # ['imageblur_565.png', 'xfermodes_pdf.png'] | 187 # ['imageblur_565.png', 'xfermodes_pdf.png'] |
252 # rather than a list of TESTS, like this: | 188 # rather than a list of TESTS, like this: |
253 # ['imageblur', 'xfermodes'] | 189 # ['imageblur', 'xfermodes'] |
254 # | 190 # |
255 # params: | 191 # params: |
256 # json_url: URL pointing to a JSON actual result summary file | 192 # json_url: URL pointing to a JSON actual result summary file |
257 # add_new: if True, then return files listed in any of these sections: | 193 # add_new: if True, then return files listed in any of these sections: |
258 # - JSONKEY_ACTUALRESULTS_FAILED | 194 # - JSONKEY_ACTUALRESULTS_FAILED |
259 # - JSONKEY_ACTUALRESULTS_NOCOMPARISON | 195 # - JSONKEY_ACTUALRESULTS_NOCOMPARISON |
260 # if False, then return files listed in these sections: | 196 # if False, then return files listed in these sections: |
261 # - JSONKEY_ACTUALRESULTS_FAILED | 197 # - JSONKEY_ACTUALRESULTS_FAILED |
262 # | 198 # |
263 def _GetFilesToRebaseline(self, json_url, add_new): | 199 def _GetFilesToRebaseline(self, json_url, add_new): |
264 if self._dry_run: | |
265 print '' | |
266 print '#' | |
267 print ('# Getting files to rebaseline from JSON summary URL %s ...' | 200 print ('# Getting files to rebaseline from JSON summary URL %s ...' |
268 % json_url) | 201 % json_url) |
269 sections = [gm_json.JSONKEY_ACTUALRESULTS_FAILED] | 202 sections = [gm_json.JSONKEY_ACTUALRESULTS_FAILED] |
270 if add_new: | 203 if add_new: |
271 sections.append(gm_json.JSONKEY_ACTUALRESULTS_NOCOMPARISON) | 204 sections.append(gm_json.JSONKEY_ACTUALRESULTS_NOCOMPARISON) |
272 results_to_rebaseline = self._GetActualResults(json_url=json_url, | 205 results_to_rebaseline = self._GetActualResults(json_url=json_url, |
273 sections=sections) | 206 sections=sections) |
274 files_to_rebaseline = results_to_rebaseline.keys() | 207 files_to_rebaseline = results_to_rebaseline.keys() |
275 files_to_rebaseline.sort() | 208 files_to_rebaseline.sort() |
276 print '# ... found files_to_rebaseline %s' % files_to_rebaseline | 209 print '# ... found files_to_rebaseline %s' % files_to_rebaseline |
277 if self._dry_run: | |
278 print '#' | |
279 return files_to_rebaseline | 210 return files_to_rebaseline |
280 | 211 |
281 # Rebaseline a single file. | 212 # Rebaseline a single file. |
282 def _RebaselineOneFile(self, expectations_subdir, builder_name, | 213 def _RebaselineOneFile(self, expectations_subdir, builder_name, |
283 infilename, outfilename, all_results): | 214 infilename, outfilename, all_results): |
284 if self._dry_run: | |
285 print '' | |
286 print '# ' + infilename | 215 print '# ' + infilename |
287 | 216 |
288 # First try to download this result image from Google Storage. | 217 # First try to download this result image from Google Storage. |
289 # If that fails, try skia-autogen. | 218 # If that fails, try skia-autogen. |
290 # If that fails too, just go on to the next file. | 219 # If that fails too, just go on to the next file. |
291 # | 220 # |
292 # This not treated as a fatal failure because not all | 221 # This not treated as a fatal failure because not all |
293 # platforms generate all configs (e.g., Android does not | 222 # platforms generate all configs (e.g., Android does not |
294 # generate PDF). | 223 # generate PDF). |
295 # | 224 # |
296 # TODO(epoger): Once we are downloading only files that the | 225 # TODO(epoger): Once we are downloading only files that the |
297 # actual-results.json file told us to, this should become a | 226 # actual-results.json file told us to, this should become a |
298 # fatal error. (If the actual-results.json file told us that | 227 # fatal error. (If the actual-results.json file told us that |
299 # the test failed with XXX results, we should be able to download | 228 # the test failed with XXX results, we should be able to download |
300 # those results every time.) | 229 # those results every time.) |
301 if not self._DownloadFromGoogleStorage(infilename=infilename, | 230 if not self._DownloadFromGoogleStorage(infilename=infilename, |
302 outfilename=outfilename, | 231 outfilename=outfilename, |
303 all_results=all_results): | 232 all_results=all_results): |
304 if not self._DownloadFromAutogen(infilename=infilename, | 233 if not self._DownloadFromAutogen(infilename=infilename, |
305 outfilename=outfilename, | 234 outfilename=outfilename, |
306 expectations_subdir=expectations_su
bdir, | 235 expectations_subdir=expectations_su
bdir, |
307 builder_name=builder_name): | 236 builder_name=builder_name): |
308 print '# Couldn\'t fetch infilename ' + infilename | 237 print '# Couldn\'t fetch infilename ' + infilename |
309 return | 238 return |
310 | 239 |
311 # Add this file to version control (if appropriate). | |
312 if self._add_new: | |
313 if self._is_svn_checkout: | |
314 cmd = [ 'svn', 'add', '--quiet', outfilename ] | |
315 self._Call(cmd) | |
316 cmd = [ 'svn', 'propset', '--quiet', 'svn:mime-type', | |
317 'image/png', outfilename ]; | |
318 self._Call(cmd) | |
319 elif self._is_git_checkout: | |
320 cmd = [ 'git', 'add', outfilename ] | |
321 self._Call(cmd) | |
322 | |
323 # Rebaseline the given configs for a single test. | 240 # Rebaseline the given configs for a single test. |
324 # | 241 # |
325 # params: | 242 # params: |
326 # expectations_subdir | 243 # expectations_subdir |
327 # builder_name | 244 # builder_name |
328 # test: a single test to rebaseline | 245 # test: a single test to rebaseline |
329 # all_results: a dictionary of all actual results | 246 # all_results: a dictionary of all actual results |
330 def _RebaselineOneTest(self, expectations_subdir, builder_name, test, | 247 def _RebaselineOneTest(self, expectations_subdir, builder_name, test, |
331 all_results): | 248 all_results): |
332 if self._configs: | 249 if self._configs: |
333 configs = self._configs | 250 configs = self._configs |
334 else: | 251 else: |
335 if (expectations_subdir == 'base-shuttle-win7-intel-angle'): | 252 if (expectations_subdir == 'base-shuttle-win7-intel-angle'): |
336 configs = [ 'angle', 'anglemsaa16' ] | 253 configs = [ 'angle', 'anglemsaa16' ] |
337 else: | 254 else: |
338 configs = [ '565', '8888', 'gpu', 'pdf', 'mesa', 'msaa16', | 255 configs = [ '565', '8888', 'gpu', 'pdf', 'mesa', 'msaa16', |
339 'msaa4' ] | 256 'msaa4' ] |
340 if self._dry_run: | |
341 print '' | |
342 print '# ' + expectations_subdir + ':' | 257 print '# ' + expectations_subdir + ':' |
343 for config in configs: | 258 for config in configs: |
344 infilename = test + '_' + config + '.png' | 259 infilename = test + '_' + config + '.png' |
345 outfilename = os.path.join(expectations_subdir, infilename); | 260 outfilename = os.path.join(expectations_subdir, infilename); |
346 self._RebaselineOneFile(expectations_subdir=expectations_subdir, | 261 self._RebaselineOneFile(expectations_subdir=expectations_subdir, |
347 builder_name=builder_name, | 262 builder_name=builder_name, |
348 infilename=infilename, | 263 infilename=infilename, |
349 outfilename=outfilename, | 264 outfilename=outfilename, |
350 all_results=all_results) | 265 all_results=all_results) |
351 | 266 |
352 # Rebaseline all tests/types we specified in the constructor, | 267 # Rebaseline all tests/types we specified in the constructor, |
353 # within this gm-expectations subdir. | 268 # within this gm-expectations subdir. |
354 # | 269 # |
355 # params: | 270 # params: |
356 # subdir : e.g. 'base-shuttle-win7-intel-float' | 271 # subdir : e.g. 'base-shuttle-win7-intel-float' |
357 # builder : e.g. 'Test-Win7-ShuttleA-HD2000-x86-Release' | 272 # builder : e.g. 'Test-Win7-ShuttleA-HD2000-x86-Release' |
358 def RebaselineSubdir(self, subdir, builder): | 273 def RebaselineSubdir(self, subdir, builder): |
359 json_url = '/'.join([self._json_base_url, | 274 # Read in the actual result summary, and extract all the tests whose |
360 subdir, builder, subdir, | 275 # results we need to update. |
361 self._json_filename]) | 276 actuals_url = '/'.join([self._actuals_base_url, |
362 all_results = self._GetActualResults(json_url=json_url) | 277 subdir, builder, subdir, |
| 278 self._actuals_filename]) |
| 279 sections = [gm_json.JSONKEY_ACTUALRESULTS_FAILED] |
| 280 if self._add_new: |
| 281 sections.append(gm_json.JSONKEY_ACTUALRESULTS_NOCOMPARISON) |
| 282 results_to_update = self._GetActualResults(json_url=actuals_url, |
| 283 sections=sections) |
| 284 #print 'EPOGER: results_to_update is...\n%s\n\n' % results_to_update |
| 285 |
| 286 # EPOGER implement this section... |
| 287 # If tests or configs were set, throw out any new expectations that |
| 288 # don't match. |
| 289 |
| 290 # Read in current expectations. |
| 291 expectations_json_filepath = os.path.join( |
| 292 self._expectations_root, subdir, JSON_EXPECTATIONS_FILENAME) |
| 293 expectations_dict = gm_json.LoadFromFile(expectations_json_filepath) |
| 294 #print 'EPOGER: expectations_dict is...\n%s\n\n' % expectations_dict |
| 295 |
| 296 # EPOGER implement this section... |
| 297 # Update the expectations in memory. |
| 298 |
| 299 # Write out updated expectations. |
| 300 gm_json.WriteToFile(expectations_dict, expectations_json_filepath) |
| 301 |
| 302 # EPOGER: delete |
| 303 def OLDRebaselineSubdir(self, subdir, builder): |
| 304 results_to_rebaseline = self._GetActualResults(json_url=json_url, |
| 305 sections=sections) |
363 | 306 |
364 if self._tests: | 307 if self._tests: |
365 for test in self._tests: | 308 for test in self._tests: |
366 self._RebaselineOneTest(expectations_subdir=subdir, | 309 self._RebaselineOneTest(expectations_subdir=subdir, |
367 builder_name=builder, | 310 builder_name=builder, |
368 test=test, all_results=all_results) | 311 test=test, all_results=all_results) |
369 else: # get the raw list of files that need rebaselining from JSON | 312 else: # get the raw list of files that need rebaselining from JSON |
370 filenames = self._GetFilesToRebaseline(json_url=json_url, | 313 filenames = self._GetFilesToRebaseline(json_url=json_url, |
371 add_new=self._add_new) | 314 add_new=self._add_new) |
372 for filename in filenames: | 315 for filename in filenames: |
(...skipping 19 matching lines...) Expand all Loading... |
392 '--tests has also been specified.') | 335 '--tests has also been specified.') |
393 parser.add_argument('--dry-run', action='store_true', | 336 parser.add_argument('--dry-run', action='store_true', |
394 help='instead of actually downloading files or adding ' + | 337 help='instead of actually downloading files or adding ' + |
395 'files to checkout, display a list of operations that ' + | 338 'files to checkout, display a list of operations that ' + |
396 'we would normally perform') | 339 'we would normally perform') |
397 parser.add_argument('--expectations-root', | 340 parser.add_argument('--expectations-root', |
398 help='root of expectations directory to update-- should ' + | 341 help='root of expectations directory to update-- should ' + |
399 'contain one or more base-* subdirectories. Defaults to ' + | 342 'contain one or more base-* subdirectories. Defaults to ' + |
400 '%(default)s', | 343 '%(default)s', |
401 default='.') | 344 default='.') |
402 parser.add_argument('--json-base-url', | 345 parser.add_argument('--actuals-base-url', |
403 help='base URL from which to read JSON_FILENAME ' + | 346 help='base URL from which to read ACTUALS_FILENAME ' + |
404 'files; defaults to %(default)s', | 347 'files; defaults to %(default)s', |
405 default='http://skia-autogen.googlecode.com/svn/gm-actual') | 348 default='http://skia-autogen.googlecode.com/svn/gm-actual') |
406 parser.add_argument('--json-filename', | 349 parser.add_argument('--actuals-filename', |
407 help='filename (under JSON_BASE_URL) to read a summary ' + | 350 help='filename (under ACTUALS_BASE_URL) to read a ' + |
408 'of results from; defaults to %(default)s', | 351 'summary of results from; defaults to %(default)s', |
409 default='actual-results.json') | 352 default='actual-results.json') |
410 parser.add_argument('--subdirs', metavar='SUBDIR', nargs='+', | 353 parser.add_argument('--subdirs', metavar='SUBDIR', nargs='+', |
411 help='which platform subdirectories to rebaseline; ' + | 354 help='which platform subdirectories to rebaseline; ' + |
412 'if unspecified, rebaseline all subdirs, same as ' + | 355 'if unspecified, rebaseline all subdirs, same as ' + |
413 '"--subdirs %s"' % ' '.join(sorted(SUBDIR_MAPPING.keys()))) | 356 '"--subdirs %s"' % ' '.join(sorted(SUBDIR_MAPPING.keys()))) |
414 parser.add_argument('--tests', metavar='TEST', nargs='+', | 357 parser.add_argument('--tests', metavar='TEST', nargs='+', |
415 help='which tests to rebaseline, e.g. ' + | 358 help='which tests to rebaseline, e.g. ' + |
416 '"--tests aaclip bigmatrix"; if unspecified, then all ' + | 359 '"--tests aaclip bigmatrix"; if unspecified, then all ' + |
417 'failing tests (according to the actual-results.json ' + | 360 'failing tests (according to the actual-results.json ' + |
418 'file) will be rebaselined.') | 361 'file) will be rebaselined.') |
(...skipping 14 matching lines...) Expand all Loading... |
433 # We instantiate different Rebaseliner objects depending | 376 # We instantiate different Rebaseliner objects depending |
434 # on whether we are rebaselining an expected-results.json file, or | 377 # on whether we are rebaselining an expected-results.json file, or |
435 # individual image files. Different gm-expected subdirectories may move | 378 # individual image files. Different gm-expected subdirectories may move |
436 # from individual image files to JSON-format expectations at different | 379 # from individual image files to JSON-format expectations at different |
437 # times, so we need to make this determination per subdirectory. | 380 # times, so we need to make this determination per subdirectory. |
438 # | 381 # |
439 # See https://goto.google.com/ChecksumTransitionDetail | 382 # See https://goto.google.com/ChecksumTransitionDetail |
440 expectations_json_file = os.path.join(args.expectations_root, subdir, | 383 expectations_json_file = os.path.join(args.expectations_root, subdir, |
441 JSON_EXPECTATIONS_FILENAME) | 384 JSON_EXPECTATIONS_FILENAME) |
442 if os.path.isfile(expectations_json_file): | 385 if os.path.isfile(expectations_json_file): |
443 sys.stderr.write('ERROR: JsonRebaseliner is not implemented yet.\n') | |
444 sys.exit(1) | |
445 rebaseliner = JsonRebaseliner( | 386 rebaseliner = JsonRebaseliner( |
446 expectations_root=args.expectations_root, | 387 expectations_root=args.expectations_root, |
447 tests=args.tests, configs=args.configs, | 388 tests=args.tests, configs=args.configs, |
448 dry_run=args.dry_run, | 389 actuals_base_url=args.actuals_base_url, |
449 json_base_url=args.json_base_url, | 390 actuals_filename=args.actuals_filename, |
450 json_filename=args.json_filename, | 391 add_new=args.add_new) |
451 add_new=args.add_new, | |
452 missing_json_is_fatal=missing_json_is_fatal) | |
453 else: | 392 else: |
454 rebaseliner = rebaseline_imagefiles.ImageRebaseliner( | 393 rebaseliner = rebaseline_imagefiles.ImageRebaseliner( |
455 expectations_root=args.expectations_root, | 394 expectations_root=args.expectations_root, |
456 tests=args.tests, configs=args.configs, | 395 tests=args.tests, configs=args.configs, |
457 dry_run=args.dry_run, | 396 dry_run=args.dry_run, |
458 json_base_url=args.json_base_url, | 397 json_base_url=args.actuals_base_url, |
459 json_filename=args.json_filename, | 398 json_filename=args.actuals_filename, |
460 add_new=args.add_new, | 399 add_new=args.add_new, |
461 missing_json_is_fatal=missing_json_is_fatal) | 400 missing_json_is_fatal=missing_json_is_fatal) |
462 rebaseliner.RebaselineSubdir(subdir=subdir, builder=builder) | 401 rebaseliner.RebaselineSubdir(subdir=subdir, builder=builder) |
OLD | NEW |