Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(61)

Side by Side Diff: tools/rebaseline.py

Issue 16363006: rebaseline.py: add --add-new option, and only add new expectations if it is set (Closed) Base URL: http://skia.googlecode.com/svn/trunk/
Patch Set: remove_FAILURE_IGNORED Created 7 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | tools/tests/rebaseline/output/all/output-expected/stdout » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/python 1 #!/usr/bin/python
2 2
3 ''' 3 '''
4 Copyright 2012 Google Inc. 4 Copyright 2012 Google Inc.
5 5
6 Use of this source code is governed by a BSD-style license that can be 6 Use of this source code is governed by a BSD-style license that can be
7 found in the LICENSE file. 7 found in the LICENSE file.
8 ''' 8 '''
9 9
10 ''' 10 '''
11 Rebaselines the given GM tests, on all bots and all configurations. 11 Rebaselines the given GM tests, on all bots and all configurations.
12 Must be run from the gm-expected directory. If run from a git or SVN 12 Must be run from the gm-expected directory. If run from a git or SVN
13 checkout, the files will be added to the staging area for commit. 13 checkout, the files will be added to the staging area for commit.
14 ''' 14 '''
15 15
16 # System-level imports 16 # System-level imports
17 import argparse 17 import argparse
18 import os 18 import os
19 import subprocess 19 import subprocess
20 import sys 20 import sys
21 import urllib2 21 import urllib2
22 22
23 # Imports from within Skia 23 # Imports from within Skia
24 # 24 #
25 # Make sure that they are in the PYTHONPATH, but add them at the *end* 25 # We need to add the 'gm' directory, so that we can import gm_json.py within
26 # so any that are already in the PYTHONPATH will be preferred. 26 # that directory. That script allows us to parse the actual-results.json file
27 # written out by the GM tool.
28 # Make sure that the 'gm' dir is in the PYTHONPATH, but add it at the *end*
29 # so any dirs that are already in the PYTHONPATH will be preferred.
30 #
31 # This assumes that the 'gm' directory has been checked out as a sibling of
32 # the 'tools' directory containing this script, which will be the case if
33 # 'trunk' was checked out as a single unit.
27 GM_DIRECTORY = os.path.realpath( 34 GM_DIRECTORY = os.path.realpath(
28 os.path.join(os.path.dirname(os.path.dirname(__file__)), 'gm')) 35 os.path.join(os.path.dirname(os.path.dirname(__file__)), 'gm'))
29 if GM_DIRECTORY not in sys.path: 36 if GM_DIRECTORY not in sys.path:
30 sys.path.append(GM_DIRECTORY) 37 sys.path.append(GM_DIRECTORY)
31 import gm_json 38 import gm_json
32 39
33 40
34 # Mapping of gm-expectations subdir (under 41 # Mapping of gm-expectations subdir (under
35 # https://skia.googlecode.com/svn/gm-expected/ ) 42 # https://skia.googlecode.com/svn/gm-expected/ )
36 # to builder name (see list at http://108.170.217.252:10117/builders ) 43 # to builder name (see list at http://108.170.217.252:10117/builders )
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
72 # subdirs: which platform subdirectories to rebaseline; if not specified, 79 # subdirs: which platform subdirectories to rebaseline; if not specified,
73 # rebaseline all platform subdirectories 80 # rebaseline all platform subdirectories
74 # tests: list of tests to rebaseline, or None if we should rebaseline 81 # tests: list of tests to rebaseline, or None if we should rebaseline
75 # whatever files the JSON results summary file tells us to 82 # whatever files the JSON results summary file tells us to
76 # configs: which configs to run for each test; this should only be 83 # configs: which configs to run for each test; this should only be
77 # specified if the list of tests was also specified (otherwise, 84 # specified if the list of tests was also specified (otherwise,
78 # the JSON file will give us test names and configs) 85 # the JSON file will give us test names and configs)
79 # dry_run: if True, instead of actually downloading files or adding 86 # dry_run: if True, instead of actually downloading files or adding
80 # files to checkout, display a list of operations that 87 # files to checkout, display a list of operations that
81 # we would normally perform 88 # we would normally perform
89 # add_new: if True, add expectations for tests which don't have any yet
82 def __init__(self, json_base_url, json_filename, 90 def __init__(self, json_base_url, json_filename,
83 subdirs=None, tests=None, configs=None, dry_run=False): 91 subdirs=None, tests=None, configs=None, dry_run=False,
92 add_new=False):
84 if configs and not tests: 93 if configs and not tests:
85 raise ValueError('configs should only be specified if tests ' + 94 raise ValueError('configs should only be specified if tests ' +
86 'were specified also') 95 'were specified also')
87 self._tests = tests 96 self._tests = tests
88 self._configs = configs 97 self._configs = configs
89 if not subdirs: 98 if not subdirs:
90 self._subdirs = sorted(SUBDIR_MAPPING.keys()) 99 self._subdirs = sorted(SUBDIR_MAPPING.keys())
91 self._missing_json_is_fatal = False 100 self._missing_json_is_fatal = False
92 else: 101 else:
93 self._subdirs = subdirs 102 self._subdirs = subdirs
94 self._missing_json_is_fatal = True 103 self._missing_json_is_fatal = True
95 self._json_base_url = json_base_url 104 self._json_base_url = json_base_url
96 self._json_filename = json_filename 105 self._json_filename = json_filename
97 self._dry_run = dry_run 106 self._dry_run = dry_run
107 self._add_new = add_new
98 self._is_svn_checkout = ( 108 self._is_svn_checkout = (
99 os.path.exists('.svn') or 109 os.path.exists('.svn') or
100 os.path.exists(os.path.join(os.pardir, '.svn'))) 110 os.path.exists(os.path.join(os.pardir, '.svn')))
101 self._is_git_checkout = ( 111 self._is_git_checkout = (
102 os.path.exists('.git') or 112 os.path.exists('.git') or
103 os.path.exists(os.path.join(os.pardir, '.git'))) 113 os.path.exists(os.path.join(os.pardir, '.git')))
104 114
105 # If dry_run is False, execute subprocess.call(cmd). 115 # If dry_run is False, execute subprocess.call(cmd).
106 # If dry_run is True, print the command we would have otherwise run. 116 # If dry_run is True, print the command we would have otherwise run.
107 # Raises a CommandFailedException if the command fails. 117 # Raises a CommandFailedException if the command fails.
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
147 # ['imageblur', 'xfermodes'] 157 # ['imageblur', 'xfermodes']
148 # 158 #
149 # If the JSON actual result summary file cannot be loaded, the behavior 159 # If the JSON actual result summary file cannot be loaded, the behavior
150 # depends on self._missing_json_is_fatal: 160 # depends on self._missing_json_is_fatal:
151 # - if true: execution will halt with an exception 161 # - if true: execution will halt with an exception
152 # - if false: we will log an error message but return an empty list so we 162 # - if false: we will log an error message but return an empty list so we
153 # go on to the next platform 163 # go on to the next platform
154 # 164 #
155 # params: 165 # params:
156 # json_url: URL pointing to a JSON actual result summary file 166 # json_url: URL pointing to a JSON actual result summary file
167 # add_new: if True, then return files listed in any of these sections:
168 # - JSONKEY_ACTUALRESULTS_FAILED
169 # - JSONKEY_ACTUALRESULTS_NOCOMPARISON
170 # if False, then return files listed in these sections:
171 # - JSONKEY_ACTUALRESULTS_FAILED
157 # 172 #
158 # TODO(epoger): add a parameter indicating whether "no-comparison" 173 def _GetFilesToRebaseline(self, json_url, add_new):
159 # results (those for which we don't have any expectations yet)
160 # should be rebaselined. For now, we only return failed expectations.
161 def _GetFilesToRebaseline(self, json_url):
162 if self._dry_run: 174 if self._dry_run:
163 print '' 175 print ''
164 print '#' 176 print '#'
165 print ('# Getting files to rebaseline from JSON summary URL %s ...' 177 print ('# Getting files to rebaseline from JSON summary URL %s ...'
166 % json_url) 178 % json_url)
167 try: 179 try:
168 json_contents = self._GetContentsOfUrl(json_url) 180 json_contents = self._GetContentsOfUrl(json_url)
169 except urllib2.HTTPError: 181 except urllib2.HTTPError:
170 message = 'unable to load JSON summary URL %s' % json_url 182 message = 'unable to load JSON summary URL %s' % json_url
171 if self._missing_json_is_fatal: 183 if self._missing_json_is_fatal:
172 raise ValueError(message) 184 raise ValueError(message)
173 else: 185 else:
174 print '# %s' % message 186 print '# %s' % message
175 return [] 187 return []
176 188
177 json_dict = gm_json.LoadFromString(json_contents) 189 json_dict = gm_json.LoadFromString(json_contents)
178 actual_results = json_dict[gm_json.JSONKEY_ACTUALRESULTS] 190 actual_results = json_dict[gm_json.JSONKEY_ACTUALRESULTS]
191 sections = [gm_json.JSONKEY_ACTUALRESULTS_FAILED]
192 if add_new:
193 sections.append(gm_json.JSONKEY_ACTUALRESULTS_NOCOMPARISON)
179 194
180 files_to_rebaseline = [] 195 files_to_rebaseline = []
181 failed_results = actual_results[gm_json.JSONKEY_ACTUALRESULTS_FAILED] 196 for section in sections:
182 if failed_results: 197 section_results = actual_results[section]
183 files_to_rebaseline.extend(failed_results.keys()) 198 if section_results:
199 files_to_rebaseline.extend(section_results.keys())
184 200
185 print '# ... found files_to_rebaseline %s' % files_to_rebaseline 201 print '# ... found files_to_rebaseline %s' % files_to_rebaseline
186 if self._dry_run: 202 if self._dry_run:
187 print '#' 203 print '#'
188 return files_to_rebaseline 204 return files_to_rebaseline
189 205
190 # Rebaseline a single file. 206 # Rebaseline a single file.
191 def _RebaselineOneFile(self, expectations_subdir, builder_name, 207 def _RebaselineOneFile(self, expectations_subdir, builder_name,
192 infilename, outfilename): 208 infilename, outfilename):
193 if self._dry_run: 209 if self._dry_run:
(...skipping 14 matching lines...) Expand all
208 # then fail if any of those expected combinations are 224 # then fail if any of those expected combinations are
209 # missing... but then this tool would become useless every 225 # missing... but then this tool would become useless every
210 # time someone tweaked the configs on the bots without 226 # time someone tweaked the configs on the bots without
211 # updating this script. 227 # updating this script.
212 try: 228 try:
213 self._DownloadFile(source_url=url, dest_filename=outfilename) 229 self._DownloadFile(source_url=url, dest_filename=outfilename)
214 except CommandFailedException: 230 except CommandFailedException:
215 print '# Couldn\'t fetch ' + url 231 print '# Couldn\'t fetch ' + url
216 return 232 return
217 233
218 # Add this file to version control (if it isn't already). 234 # Add this file to version control (if appropriate).
219 if self._is_svn_checkout: 235 if self._add_new:
220 cmd = [ 'svn', 'add', '--quiet', outfilename ] 236 if self._is_svn_checkout:
221 self._Call(cmd) 237 cmd = [ 'svn', 'add', '--quiet', outfilename ]
222 cmd = [ 'svn', 'propset', '--quiet', 'svn:mime-type', 'image/png', 238 self._Call(cmd)
223 outfilename ]; 239 cmd = [ 'svn', 'propset', '--quiet', 'svn:mime-type',
224 self._Call(cmd) 240 'image/png', outfilename ];
225 elif self._is_git_checkout: 241 self._Call(cmd)
226 cmd = [ 'git', 'add', outfilename ] 242 elif self._is_git_checkout:
227 self._Call(cmd) 243 cmd = [ 'git', 'add', outfilename ]
244 self._Call(cmd)
228 245
229 # Rebaseline the given configs for a single test. 246 # Rebaseline the given configs for a single test.
230 # 247 #
231 # params: 248 # params:
232 # expectations_subdir 249 # expectations_subdir
233 # builder_name 250 # builder_name
234 # test: a single test to rebaseline 251 # test: a single test to rebaseline
235 def _RebaselineOneTest(self, expectations_subdir, builder_name, test): 252 def _RebaselineOneTest(self, expectations_subdir, builder_name, test):
236 if self._configs: 253 if self._configs:
237 configs = self._configs 254 configs = self._configs
(...skipping 24 matching lines...) Expand all
262 builder_name = SUBDIR_MAPPING[subdir] 279 builder_name = SUBDIR_MAPPING[subdir]
263 if self._tests: 280 if self._tests:
264 for test in self._tests: 281 for test in self._tests:
265 self._RebaselineOneTest(expectations_subdir=subdir, 282 self._RebaselineOneTest(expectations_subdir=subdir,
266 builder_name=builder_name, 283 builder_name=builder_name,
267 test=test) 284 test=test)
268 else: # get the raw list of files that need rebaselining from JSON 285 else: # get the raw list of files that need rebaselining from JSON
269 json_url = '/'.join([self._json_base_url, 286 json_url = '/'.join([self._json_base_url,
270 subdir, builder_name, subdir, 287 subdir, builder_name, subdir,
271 self._json_filename]) 288 self._json_filename])
272 filenames = self._GetFilesToRebaseline(json_url=json_url) 289 filenames = self._GetFilesToRebaseline(json_url=json_url,
290 add_new=self._add_new)
273 for filename in filenames: 291 for filename in filenames:
274 outfilename = os.path.join(subdir, filename); 292 outfilename = os.path.join(subdir, filename);
275 self._RebaselineOneFile(expectations_subdir=subdir, 293 self._RebaselineOneFile(expectations_subdir=subdir,
276 builder_name=builder_name, 294 builder_name=builder_name,
277 infilename=filename, 295 infilename=filename,
278 outfilename=outfilename) 296 outfilename=outfilename)
279 297
280 # main... 298 # main...
281 299
282 parser = argparse.ArgumentParser() 300 parser = argparse.ArgumentParser()
301 parser.add_argument('--add-new', action='store_true',
302 help='in addition to the standard behavior of ' +
303 'updating expectations for failing tests, add ' +
304 'expectations for tests which don\'t have expectations ' +
305 'yet.')
283 parser.add_argument('--configs', metavar='CONFIG', nargs='+', 306 parser.add_argument('--configs', metavar='CONFIG', nargs='+',
284 help='which configurations to rebaseline, e.g. ' + 307 help='which configurations to rebaseline, e.g. ' +
285 '"--configs 565 8888"; if unspecified, run a default ' + 308 '"--configs 565 8888"; if unspecified, run a default ' +
286 'set of configs. This should ONLY be specified if ' + 309 'set of configs. This should ONLY be specified if ' +
287 '--tests has also been specified.') 310 '--tests has also been specified.')
288 parser.add_argument('--dry-run', action='store_true', 311 parser.add_argument('--dry-run', action='store_true',
289 help='instead of actually downloading files or adding ' + 312 help='instead of actually downloading files or adding ' +
290 'files to checkout, display a list of operations that ' + 313 'files to checkout, display a list of operations that ' +
291 'we would normally perform') 314 'we would normally perform')
292 parser.add_argument('--json-base-url', 315 parser.add_argument('--json-base-url',
(...skipping 10 matching lines...) Expand all
303 '"--subdirs %s"' % ' '.join(sorted(SUBDIR_MAPPING.keys()))) 326 '"--subdirs %s"' % ' '.join(sorted(SUBDIR_MAPPING.keys())))
304 parser.add_argument('--tests', metavar='TEST', nargs='+', 327 parser.add_argument('--tests', metavar='TEST', nargs='+',
305 help='which tests to rebaseline, e.g. ' + 328 help='which tests to rebaseline, e.g. ' +
306 '"--tests aaclip bigmatrix"; if unspecified, then all ' + 329 '"--tests aaclip bigmatrix"; if unspecified, then all ' +
307 'failing tests (according to the actual-results.json ' + 330 'failing tests (according to the actual-results.json ' +
308 'file) will be rebaselined.') 331 'file) will be rebaselined.')
309 args = parser.parse_args() 332 args = parser.parse_args()
310 rebaseliner = Rebaseliner(tests=args.tests, configs=args.configs, 333 rebaseliner = Rebaseliner(tests=args.tests, configs=args.configs,
311 subdirs=args.subdirs, dry_run=args.dry_run, 334 subdirs=args.subdirs, dry_run=args.dry_run,
312 json_base_url=args.json_base_url, 335 json_base_url=args.json_base_url,
313 json_filename=args.json_filename) 336 json_filename=args.json_filename,
337 add_new=args.add_new)
314 rebaseliner.RebaselineAll() 338 rebaseliner.RebaselineAll()
OLDNEW
« no previous file with comments | « no previous file | tools/tests/rebaseline/output/all/output-expected/stdout » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698