OLD | NEW |
---|---|
1 #!/usr/bin/python | 1 #!/usr/bin/python |
2 | 2 |
3 ''' | 3 ''' |
4 Copyright 2012 Google Inc. | 4 Copyright 2012 Google Inc. |
5 | 5 |
6 Use of this source code is governed by a BSD-style license that can be | 6 Use of this source code is governed by a BSD-style license that can be |
7 found in the LICENSE file. | 7 found in the LICENSE file. |
8 ''' | 8 ''' |
9 | 9 |
10 ''' | 10 ''' |
11 Rebaselines the given GM tests, on all bots and all configurations. | 11 Rebaselines the given GM tests, on all bots and all configurations. |
12 Must be run from the gm-expected directory. If run from a git or SVN | 12 Must be run from the gm-expected directory. If run from a git or SVN |
13 checkout, the files will be added to the staging area for commit. | 13 checkout, the files will be added to the staging area for commit. |
14 ''' | 14 ''' |
15 | 15 |
16 # System-level imports | 16 # System-level imports |
17 import argparse | 17 import argparse |
18 import os | 18 import os |
19 import subprocess | 19 import subprocess |
20 import sys | 20 import sys |
21 import urllib2 | 21 import urllib2 |
22 | 22 |
23 # Imports from within Skia | 23 # Imports from within Skia |
24 # | 24 # |
25 # Make sure that they are in the PYTHONPATH, but add them at the *end* | 25 # We need to add the 'gm' directory, so that we can import gm_json.py within |
26 # so any that are already in the PYTHONPATH will be preferred. | 26 # that directory. That script allows us to parse the actual-results.json file |
27 # written out by the GM tool. | |
Stephen White
2013/06/11 17:29:39
Thinking about this some more, maybe gm_json.py sh
epoger
2013/06/11 18:10:20
I think this is very topical in the context of rec
| |
28 # Make sure that the 'gm' dir is in the PYTHONPATH, but add it at the *end* | |
29 # so any dirs that are already in the PYTHONPATH will be preferred. | |
30 # | |
31 # This assumes that the 'gm' directory has been checked out as a sibling of | |
32 # the 'tools' directory containing this script, which will be the case if | |
33 # 'trunk' was checked out as a single unit. | |
27 GM_DIRECTORY = os.path.realpath( | 34 GM_DIRECTORY = os.path.realpath( |
28 os.path.join(os.path.dirname(os.path.dirname(__file__)), 'gm')) | 35 os.path.join(os.path.dirname(os.path.dirname(__file__)), 'gm')) |
29 if GM_DIRECTORY not in sys.path: | 36 if GM_DIRECTORY not in sys.path: |
30 sys.path.append(GM_DIRECTORY) | 37 sys.path.append(GM_DIRECTORY) |
31 import gm_json | 38 import gm_json |
32 | 39 |
33 | 40 |
34 # Mapping of gm-expectations subdir (under | 41 # Mapping of gm-expectations subdir (under |
35 # https://skia.googlecode.com/svn/gm-expected/ ) | 42 # https://skia.googlecode.com/svn/gm-expected/ ) |
36 # to builder name (see list at http://108.170.217.252:10117/builders ) | 43 # to builder name (see list at http://108.170.217.252:10117/builders ) |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
72 # subdirs: which platform subdirectories to rebaseline; if not specified, | 79 # subdirs: which platform subdirectories to rebaseline; if not specified, |
73 # rebaseline all platform subdirectories | 80 # rebaseline all platform subdirectories |
74 # tests: list of tests to rebaseline, or None if we should rebaseline | 81 # tests: list of tests to rebaseline, or None if we should rebaseline |
75 # whatever files the JSON results summary file tells us to | 82 # whatever files the JSON results summary file tells us to |
76 # configs: which configs to run for each test; this should only be | 83 # configs: which configs to run for each test; this should only be |
77 # specified if the list of tests was also specified (otherwise, | 84 # specified if the list of tests was also specified (otherwise, |
78 # the JSON file will give us test names and configs) | 85 # the JSON file will give us test names and configs) |
79 # dry_run: if True, instead of actually downloading files or adding | 86 # dry_run: if True, instead of actually downloading files or adding |
80 # files to checkout, display a list of operations that | 87 # files to checkout, display a list of operations that |
81 # we would normally perform | 88 # we would normally perform |
89 # add_new: if True, add expectations for tests which don't have any yet | |
82 def __init__(self, json_base_url, json_filename, | 90 def __init__(self, json_base_url, json_filename, |
83 subdirs=None, tests=None, configs=None, dry_run=False): | 91 subdirs=None, tests=None, configs=None, dry_run=False, |
92 add_new=False): | |
84 if configs and not tests: | 93 if configs and not tests: |
85 raise ValueError('configs should only be specified if tests ' + | 94 raise ValueError('configs should only be specified if tests ' + |
86 'were specified also') | 95 'were specified also') |
87 self._tests = tests | 96 self._tests = tests |
88 self._configs = configs | 97 self._configs = configs |
89 if not subdirs: | 98 if not subdirs: |
90 self._subdirs = sorted(SUBDIR_MAPPING.keys()) | 99 self._subdirs = sorted(SUBDIR_MAPPING.keys()) |
91 self._missing_json_is_fatal = False | 100 self._missing_json_is_fatal = False |
92 else: | 101 else: |
93 self._subdirs = subdirs | 102 self._subdirs = subdirs |
94 self._missing_json_is_fatal = True | 103 self._missing_json_is_fatal = True |
95 self._json_base_url = json_base_url | 104 self._json_base_url = json_base_url |
96 self._json_filename = json_filename | 105 self._json_filename = json_filename |
97 self._dry_run = dry_run | 106 self._dry_run = dry_run |
107 self._add_new = add_new | |
98 self._is_svn_checkout = ( | 108 self._is_svn_checkout = ( |
99 os.path.exists('.svn') or | 109 os.path.exists('.svn') or |
100 os.path.exists(os.path.join(os.pardir, '.svn'))) | 110 os.path.exists(os.path.join(os.pardir, '.svn'))) |
101 self._is_git_checkout = ( | 111 self._is_git_checkout = ( |
102 os.path.exists('.git') or | 112 os.path.exists('.git') or |
103 os.path.exists(os.path.join(os.pardir, '.git'))) | 113 os.path.exists(os.path.join(os.pardir, '.git'))) |
104 | 114 |
105 # If dry_run is False, execute subprocess.call(cmd). | 115 # If dry_run is False, execute subprocess.call(cmd). |
106 # If dry_run is True, print the command we would have otherwise run. | 116 # If dry_run is True, print the command we would have otherwise run. |
107 # Raises a CommandFailedException if the command fails. | 117 # Raises a CommandFailedException if the command fails. |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
147 # ['imageblur', 'xfermodes'] | 157 # ['imageblur', 'xfermodes'] |
148 # | 158 # |
149 # If the JSON actual result summary file cannot be loaded, the behavior | 159 # If the JSON actual result summary file cannot be loaded, the behavior |
150 # depends on self._missing_json_is_fatal: | 160 # depends on self._missing_json_is_fatal: |
151 # - if true: execution will halt with an exception | 161 # - if true: execution will halt with an exception |
152 # - if false: we will log an error message but return an empty list so we | 162 # - if false: we will log an error message but return an empty list so we |
153 # go on to the next platform | 163 # go on to the next platform |
154 # | 164 # |
155 # params: | 165 # params: |
156 # json_url: URL pointing to a JSON actual result summary file | 166 # json_url: URL pointing to a JSON actual result summary file |
167 # add_new: if True, then return files listed in any of these sections: | |
168 # - JSONKEY_ACTUALRESULTS_FAILED | |
169 # - JSONKEY_ACTUALRESULTS_FAILUREIGNORED | |
epoger
2013/06/12 14:24:11
Patchset 4 removes FAILUREIGNORED from the list of
| |
170 # - JSONKEY_ACTUALRESULTS_NOCOMPARISON | |
171 # if False, then return files listed in these sections: | |
172 # - JSONKEY_ACTUALRESULTS_FAILED | |
173 # - JSONKEY_ACTUALRESULTS_FAILUREIGNORED | |
157 # | 174 # |
158 # TODO(epoger): add a parameter indicating whether "no-comparison" | 175 def _GetFilesToRebaseline(self, json_url, add_new): |
159 # results (those for which we don't have any expectations yet) | |
160 # should be rebaselined. For now, we only return failed expectations. | |
161 def _GetFilesToRebaseline(self, json_url): | |
162 if self._dry_run: | 176 if self._dry_run: |
163 print '' | 177 print '' |
164 print '#' | 178 print '#' |
165 print ('# Getting files to rebaseline from JSON summary URL %s ...' | 179 print ('# Getting files to rebaseline from JSON summary URL %s ...' |
166 % json_url) | 180 % json_url) |
167 try: | 181 try: |
168 json_contents = self._GetContentsOfUrl(json_url) | 182 json_contents = self._GetContentsOfUrl(json_url) |
169 except urllib2.HTTPError: | 183 except urllib2.HTTPError: |
170 message = 'unable to load JSON summary URL %s' % json_url | 184 message = 'unable to load JSON summary URL %s' % json_url |
171 if self._missing_json_is_fatal: | 185 if self._missing_json_is_fatal: |
172 raise ValueError(message) | 186 raise ValueError(message) |
173 else: | 187 else: |
174 print '# %s' % message | 188 print '# %s' % message |
175 return [] | 189 return [] |
176 | 190 |
177 json_dict = gm_json.LoadFromString(json_contents) | 191 json_dict = gm_json.LoadFromString(json_contents) |
178 actual_results = json_dict[gm_json.JSONKEY_ACTUALRESULTS] | 192 actual_results = json_dict[gm_json.JSONKEY_ACTUALRESULTS] |
193 sections = [gm_json.JSONKEY_ACTUALRESULTS_FAILED, | |
194 gm_json.JSONKEY_ACTUALRESULTS_FAILUREIGNORED] | |
195 if add_new: | |
196 sections.append(gm_json.JSONKEY_ACTUALRESULTS_NOCOMPARISON) | |
179 | 197 |
180 files_to_rebaseline = [] | 198 files_to_rebaseline = [] |
181 failed_results = actual_results[gm_json.JSONKEY_ACTUALRESULTS_FAILED] | 199 for section in sections: |
182 if failed_results: | 200 section_results = actual_results[section] |
183 files_to_rebaseline.extend(failed_results.keys()) | 201 if section_results: |
202 files_to_rebaseline.extend(section_results.keys()) | |
184 | 203 |
185 print '# ... found files_to_rebaseline %s' % files_to_rebaseline | 204 print '# ... found files_to_rebaseline %s' % files_to_rebaseline |
186 if self._dry_run: | 205 if self._dry_run: |
187 print '#' | 206 print '#' |
188 return files_to_rebaseline | 207 return files_to_rebaseline |
189 | 208 |
190 # Rebaseline a single file. | 209 # Rebaseline a single file. |
191 def _RebaselineOneFile(self, expectations_subdir, builder_name, | 210 def _RebaselineOneFile(self, expectations_subdir, builder_name, |
192 infilename, outfilename): | 211 infilename, outfilename): |
193 if self._dry_run: | 212 if self._dry_run: |
(...skipping 14 matching lines...) Expand all Loading... | |
208 # then fail if any of those expected combinations are | 227 # then fail if any of those expected combinations are |
209 # missing... but then this tool would become useless every | 228 # missing... but then this tool would become useless every |
210 # time someone tweaked the configs on the bots without | 229 # time someone tweaked the configs on the bots without |
211 # updating this script. | 230 # updating this script. |
212 try: | 231 try: |
213 self._DownloadFile(source_url=url, dest_filename=outfilename) | 232 self._DownloadFile(source_url=url, dest_filename=outfilename) |
214 except CommandFailedException: | 233 except CommandFailedException: |
215 print '# Couldn\'t fetch ' + url | 234 print '# Couldn\'t fetch ' + url |
216 return | 235 return |
217 | 236 |
218 # Add this file to version control (if it isn't already). | 237 # Add this file to version control (if appropriate). |
219 if self._is_svn_checkout: | 238 if self._add_new: |
Stephen White
2013/06/11 17:29:39
Actually, at least for git, I think we want to do
epoger
2013/06/11 18:10:20
Brian and/or Rob, do you have any thoughts on this
robertphillips
2013/06/12 14:13:41
I would greatly prefer if the addition of new base
| |
220 cmd = [ 'svn', 'add', '--quiet', outfilename ] | 239 if self._is_svn_checkout: |
221 self._Call(cmd) | 240 cmd = [ 'svn', 'add', '--quiet', outfilename ] |
222 cmd = [ 'svn', 'propset', '--quiet', 'svn:mime-type', 'image/png', | 241 self._Call(cmd) |
223 outfilename ]; | 242 cmd = [ 'svn', 'propset', '--quiet', 'svn:mime-type', |
224 self._Call(cmd) | 243 'image/png', outfilename ]; |
225 elif self._is_git_checkout: | 244 self._Call(cmd) |
226 cmd = [ 'git', 'add', outfilename ] | 245 elif self._is_git_checkout: |
227 self._Call(cmd) | 246 cmd = [ 'git', 'add', outfilename ] |
247 self._Call(cmd) | |
228 | 248 |
229 # Rebaseline the given configs for a single test. | 249 # Rebaseline the given configs for a single test. |
230 # | 250 # |
231 # params: | 251 # params: |
232 # expectations_subdir | 252 # expectations_subdir |
233 # builder_name | 253 # builder_name |
234 # test: a single test to rebaseline | 254 # test: a single test to rebaseline |
235 def _RebaselineOneTest(self, expectations_subdir, builder_name, test): | 255 def _RebaselineOneTest(self, expectations_subdir, builder_name, test): |
236 if self._configs: | 256 if self._configs: |
237 configs = self._configs | 257 configs = self._configs |
(...skipping 24 matching lines...) Expand all Loading... | |
262 builder_name = SUBDIR_MAPPING[subdir] | 282 builder_name = SUBDIR_MAPPING[subdir] |
263 if self._tests: | 283 if self._tests: |
264 for test in self._tests: | 284 for test in self._tests: |
265 self._RebaselineOneTest(expectations_subdir=subdir, | 285 self._RebaselineOneTest(expectations_subdir=subdir, |
266 builder_name=builder_name, | 286 builder_name=builder_name, |
267 test=test) | 287 test=test) |
268 else: # get the raw list of files that need rebaselining from JSON | 288 else: # get the raw list of files that need rebaselining from JSON |
269 json_url = '/'.join([self._json_base_url, | 289 json_url = '/'.join([self._json_base_url, |
270 subdir, builder_name, subdir, | 290 subdir, builder_name, subdir, |
271 self._json_filename]) | 291 self._json_filename]) |
272 filenames = self._GetFilesToRebaseline(json_url=json_url) | 292 filenames = self._GetFilesToRebaseline(json_url=json_url, |
293 add_new=self._add_new) | |
273 for filename in filenames: | 294 for filename in filenames: |
274 outfilename = os.path.join(subdir, filename); | 295 outfilename = os.path.join(subdir, filename); |
275 self._RebaselineOneFile(expectations_subdir=subdir, | 296 self._RebaselineOneFile(expectations_subdir=subdir, |
276 builder_name=builder_name, | 297 builder_name=builder_name, |
277 infilename=filename, | 298 infilename=filename, |
278 outfilename=outfilename) | 299 outfilename=outfilename) |
279 | 300 |
280 # main... | 301 # main... |
281 | 302 |
282 parser = argparse.ArgumentParser() | 303 parser = argparse.ArgumentParser() |
304 parser.add_argument('--add-new', action='store_true', | |
305 help='in addition to the standard behavior of ' + | |
306 'updating expectations for failing tests, add ' + | |
307 'expectations for tests which don\'t have expectations ' + | |
308 'yet.') | |
283 parser.add_argument('--configs', metavar='CONFIG', nargs='+', | 309 parser.add_argument('--configs', metavar='CONFIG', nargs='+', |
284 help='which configurations to rebaseline, e.g. ' + | 310 help='which configurations to rebaseline, e.g. ' + |
285 '"--configs 565 8888"; if unspecified, run a default ' + | 311 '"--configs 565 8888"; if unspecified, run a default ' + |
286 'set of configs. This should ONLY be specified if ' + | 312 'set of configs. This should ONLY be specified if ' + |
287 '--tests has also been specified.') | 313 '--tests has also been specified.') |
288 parser.add_argument('--dry-run', action='store_true', | 314 parser.add_argument('--dry-run', action='store_true', |
289 help='instead of actually downloading files or adding ' + | 315 help='instead of actually downloading files or adding ' + |
290 'files to checkout, display a list of operations that ' + | 316 'files to checkout, display a list of operations that ' + |
291 'we would normally perform') | 317 'we would normally perform') |
292 parser.add_argument('--json-base-url', | 318 parser.add_argument('--json-base-url', |
(...skipping 10 matching lines...) Expand all Loading... | |
303 '"--subdirs %s"' % ' '.join(sorted(SUBDIR_MAPPING.keys()))) | 329 '"--subdirs %s"' % ' '.join(sorted(SUBDIR_MAPPING.keys()))) |
304 parser.add_argument('--tests', metavar='TEST', nargs='+', | 330 parser.add_argument('--tests', metavar='TEST', nargs='+', |
305 help='which tests to rebaseline, e.g. ' + | 331 help='which tests to rebaseline, e.g. ' + |
306 '"--tests aaclip bigmatrix"; if unspecified, then all ' + | 332 '"--tests aaclip bigmatrix"; if unspecified, then all ' + |
307 'failing tests (according to the actual-results.json ' + | 333 'failing tests (according to the actual-results.json ' + |
308 'file) will be rebaselined.') | 334 'file) will be rebaselined.') |
309 args = parser.parse_args() | 335 args = parser.parse_args() |
310 rebaseliner = Rebaseliner(tests=args.tests, configs=args.configs, | 336 rebaseliner = Rebaseliner(tests=args.tests, configs=args.configs, |
311 subdirs=args.subdirs, dry_run=args.dry_run, | 337 subdirs=args.subdirs, dry_run=args.dry_run, |
312 json_base_url=args.json_base_url, | 338 json_base_url=args.json_base_url, |
313 json_filename=args.json_filename) | 339 json_filename=args.json_filename, |
340 add_new=args.add_new) | |
314 rebaseliner.RebaselineAll() | 341 rebaseliner.RebaselineAll() |
OLD | NEW |