OLD | NEW |
1 #!/usr/bin/python | 1 #!/usr/bin/python |
2 | 2 |
3 ''' | 3 ''' |
4 Copyright 2012 Google Inc. | 4 Copyright 2012 Google Inc. |
5 | 5 |
6 Use of this source code is governed by a BSD-style license that can be | 6 Use of this source code is governed by a BSD-style license that can be |
7 found in the LICENSE file. | 7 found in the LICENSE file. |
8 ''' | 8 ''' |
9 | 9 |
10 ''' | 10 ''' |
11 Rebaselines the given GM tests, on all bots and all configurations. | 11 Rebaselines the given GM tests, on all bots and all configurations. |
12 Must be run from the gm-expected directory. If run from a git or SVN | 12 Must be run from the gm-expected directory. If run from a git or SVN |
13 checkout, the files will be added to the staging area for commit. | 13 checkout, the files will be added to the staging area for commit. |
14 ''' | 14 ''' |
15 | 15 |
| 16 # System-level imports |
16 import argparse | 17 import argparse |
17 import os | 18 import os |
18 import subprocess | 19 import subprocess |
19 import sys | 20 import sys |
| 21 import urllib2 |
| 22 |
| 23 # Imports from within Skia |
| 24 # |
| 25 # Make sure that they are in the PYTHONPATH, but add them at the *end* |
| 26 # so any that are already in the PYTHONPATH will be preferred. |
| 27 GM_DIRECTORY = os.path.realpath( |
| 28 os.path.join(os.path.dirname(os.path.dirname(__file__)), 'gm')) |
| 29 if GM_DIRECTORY not in sys.path: |
| 30 sys.path.append(GM_DIRECTORY) |
| 31 import gm_json |
| 32 |
20 | 33 |
21 # Mapping of gm-expectations subdir (under | 34 # Mapping of gm-expectations subdir (under |
22 # https://skia.googlecode.com/svn/gm-expected/ ) | 35 # https://skia.googlecode.com/svn/gm-expected/ ) |
23 # to builder name (see list at http://108.170.217.252:10117/builders ) | 36 # to builder name (see list at http://108.170.217.252:10117/builders ) |
24 SUBDIR_MAPPING = { | 37 SUBDIR_MAPPING = { |
25 'base-shuttle-win7-intel-float': | 38 'base-shuttle-win7-intel-float': |
26 'Test-Win7-ShuttleA-HD2000-x86-Release', | 39 'Test-Win7-ShuttleA-HD2000-x86-Release', |
27 'base-shuttle-win7-intel-angle': | 40 'base-shuttle-win7-intel-angle': |
28 'Test-Win7-ShuttleA-HD2000-x86-Release-ANGLE', | 41 'Test-Win7-ShuttleA-HD2000-x86-Release-ANGLE', |
29 'base-shuttle-win7-intel-directwrite': | 42 'base-shuttle-win7-intel-directwrite': |
(...skipping 16 matching lines...) Expand all Loading... |
46 'Test-Android-Nexus10-MaliT604-Arm7-Release', | 59 'Test-Android-Nexus10-MaliT604-Arm7-Release', |
47 } | 60 } |
48 | 61 |
49 | 62 |
50 class CommandFailedException(Exception): | 63 class CommandFailedException(Exception): |
51 pass | 64 pass |
52 | 65 |
53 class Rebaseliner(object): | 66 class Rebaseliner(object): |
54 | 67 |
55 # params: | 68 # params: |
56 # tests: list of tests to rebaseline | 69 # tests: list of tests to rebaseline, or None if we should rebaseline |
| 70 # whatever tests the actual-results.json file tells us to |
| 71 # json_base_url: base URL from which to read json_filename |
| 72 # json_filename: filename (under json_base_url) from which to read a |
| 73 # summary of results |
57 # configs: which configs to run for each test | 74 # configs: which configs to run for each test |
58 # subdirs: which platform subdirectories to rebaseline; if an empty list, | 75 # subdirs: which platform subdirectories to rebaseline; if an empty list, |
59 # rebaseline all platform subdirectories | 76 # rebaseline all platform subdirectories |
60 # dry_run: if True, instead of actually downloading files or adding | 77 # dry_run: if True, instead of actually downloading files or adding |
61 # files to checkout, display a list of operations that | 78 # files to checkout, display a list of operations that |
62 # we would normally perform | 79 # we would normally perform |
63 def __init__(self, tests, configs=[], subdirs=[], dry_run=False): | 80 def __init__(self, tests, json_base_url, json_filename, |
64 if not tests: | 81 configs=[], subdirs=[], dry_run=False): |
65 raise Exception('at least one test must be specified') | |
66 self._tests = tests | 82 self._tests = tests |
67 self._configs = configs | 83 self._configs = configs |
68 if not subdirs: | 84 if not subdirs: |
69 self._subdirs = sorted(SUBDIR_MAPPING.keys()) | 85 self._subdirs = sorted(SUBDIR_MAPPING.keys()) |
70 else: | 86 else: |
71 self._subdirs = subdirs | 87 self._subdirs = subdirs |
| 88 self._json_base_url = json_base_url |
| 89 self._json_filename = json_filename |
72 self._dry_run = dry_run | 90 self._dry_run = dry_run |
73 self._is_svn_checkout = ( | 91 self._is_svn_checkout = ( |
74 os.path.exists('.svn') or | 92 os.path.exists('.svn') or |
75 os.path.exists(os.path.join(os.pardir, '.svn'))) | 93 os.path.exists(os.path.join(os.pardir, '.svn'))) |
76 self._is_git_checkout = ( | 94 self._is_git_checkout = ( |
77 os.path.exists('.git') or | 95 os.path.exists('.git') or |
78 os.path.exists(os.path.join(os.pardir, '.git'))) | 96 os.path.exists(os.path.join(os.pardir, '.git'))) |
79 | 97 |
80 # If dry_run is False, execute subprocess.call(cmd). | 98 # If dry_run is False, execute subprocess.call(cmd). |
81 # If dry_run is True, print the command we would have otherwise run. | 99 # If dry_run is True, print the command we would have otherwise run. |
(...skipping 12 matching lines...) Expand all Loading... |
94 # so that we don't corrupt the existing file if it fails midway thru. | 112 # so that we don't corrupt the existing file if it fails midway thru. |
95 temp_filename = os.path.join(os.path.dirname(dest_filename), | 113 temp_filename = os.path.join(os.path.dirname(dest_filename), |
96 '.temp-' + os.path.basename(dest_filename)) | 114 '.temp-' + os.path.basename(dest_filename)) |
97 | 115 |
98 # TODO(epoger): Replace calls to "curl"/"mv" (which will only work on | 116 # TODO(epoger): Replace calls to "curl"/"mv" (which will only work on |
99 # Unix) with a Python HTTP library (which should work cross-platform) | 117 # Unix) with a Python HTTP library (which should work cross-platform) |
100 self._Call([ 'curl', '--fail', '--silent', source_url, | 118 self._Call([ 'curl', '--fail', '--silent', source_url, |
101 '--output', temp_filename ]) | 119 '--output', temp_filename ]) |
102 self._Call([ 'mv', temp_filename, dest_filename ]) | 120 self._Call([ 'mv', temp_filename, dest_filename ]) |
103 | 121 |
| 122 # Returns the full contents of a URL, as a single string. |
| 123 # |
| 124 # Unlike standard URL handling, we allow relative "file:" URLs; |
| 125 # for example, "file:one/two" resolves to the file ./one/two |
| 126 # (relative to current working dir) |
| 127 def _GetContentsOfUrl(self, url): |
| 128 file_prefix = 'file:' |
| 129 if url.startswith(file_prefix): |
| 130 filename = url[len(file_prefix):] |
| 131 return open(filename, 'r').read() |
| 132 else: |
| 133 return urllib2.urlopen(url).read() |
| 134 |
| 135 # Returns a list of tests that require rebaselining. |
| 136 # |
| 137 # params: |
| 138 # json_url: URL pointing to a JSON actual result summary file |
| 139 # |
| 140 # TODO(epoger): add a parameter indicating whether "no-comparison" |
| 141 # results (those for which we don't have any expectations yet) |
| 142 # should be rebaselined. For now, we only return failed expectations. |
| 143 def _GetTestsToRebaseline(self, json_url): |
| 144 print ('# Getting tests to rebaseline from JSON summary URL %s ...' |
| 145 % json_url) |
| 146 json_contents = self._GetContentsOfUrl(json_url) |
| 147 json_dict = gm_json.LoadFromString(json_contents) |
| 148 actual_results = json_dict[gm_json.JSONKEY_ACTUALRESULTS] |
| 149 |
| 150 tests_to_rebaseline = [] |
| 151 failed_results = actual_results[gm_json.JSONKEY_ACTUALRESULTS_FAILED] |
| 152 if failed_results: |
| 153 tests_to_rebaseline.extend(failed_results.keys()) |
| 154 |
| 155 print '# ... found tests_to_rebaseline %s' % tests_to_rebaseline |
| 156 return tests_to_rebaseline |
| 157 |
104 # Rebaseline a single file. | 158 # Rebaseline a single file. |
105 def _RebaselineOneFile(self, expectations_subdir, builder_name, | 159 def _RebaselineOneFile(self, expectations_subdir, builder_name, |
106 infilename, outfilename): | 160 infilename, outfilename): |
107 url = ('http://skia-autogen.googlecode.com/svn/gm-actual/' + | 161 url = ('http://skia-autogen.googlecode.com/svn/gm-actual/' + |
108 expectations_subdir + '/' + builder_name + '/' + | 162 expectations_subdir + '/' + builder_name + '/' + |
109 expectations_subdir + '/' + infilename) | 163 expectations_subdir + '/' + infilename) |
110 | 164 |
111 # Try to download this file, but if that fails, keep going... | 165 # Try to download this file, but if that fails, keep going... |
112 # | 166 # |
113 # This not treated as a fatal failure because not all | 167 # This not treated as a fatal failure because not all |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
157 infilename = test + '_' + config + '.png' | 211 infilename = test + '_' + config + '.png' |
158 print '# ' + infilename | 212 print '# ' + infilename |
159 outfilename = os.path.join(expectations_subdir, infilename); | 213 outfilename = os.path.join(expectations_subdir, infilename); |
160 self._RebaselineOneFile(expectations_subdir=expectations_subdir, | 214 self._RebaselineOneFile(expectations_subdir=expectations_subdir, |
161 builder_name=builder_name, | 215 builder_name=builder_name, |
162 infilename=infilename, | 216 infilename=infilename, |
163 outfilename=outfilename) | 217 outfilename=outfilename) |
164 | 218 |
165 # Rebaseline all platforms/tests/types we specified in the constructor. | 219 # Rebaseline all platforms/tests/types we specified in the constructor. |
166 def RebaselineAll(self): | 220 def RebaselineAll(self): |
167 for test in self._tests: | 221 for subdir in self._subdirs: |
168 for subdir in self._subdirs: | 222 if not subdir in SUBDIR_MAPPING.keys(): |
169 if not subdir in SUBDIR_MAPPING.keys(): | 223 raise Exception(('unrecognized platform subdir "%s"; ' + |
170 raise Exception(('unrecognized platform subdir "%s"; ' + | 224 'should be one of %s') % ( |
171 'should be one of %s') % ( | 225 subdir, SUBDIR_MAPPING.keys())) |
172 subdir, SUBDIR_MAPPING.keys())) | 226 builder_name = SUBDIR_MAPPING[subdir] |
173 builder_name = SUBDIR_MAPPING[subdir] | 227 if self._tests: |
| 228 tests = self._tests |
| 229 else: |
| 230 json_url = '/'.join([self._json_base_url, |
| 231 subdir, builder_name, subdir, |
| 232 self._json_filename]) |
| 233 tests = self._GetTestsToRebaseline(json_url=json_url) |
| 234 for test in tests: |
174 self._RebaselineOneTest(expectations_subdir=subdir, | 235 self._RebaselineOneTest(expectations_subdir=subdir, |
175 builder_name=builder_name, | 236 builder_name=builder_name, |
176 test=test) | 237 test=test) |
177 | 238 |
178 | 239 |
179 # main... | 240 # main... |
180 | 241 |
181 parser = argparse.ArgumentParser() | 242 parser = argparse.ArgumentParser() |
182 parser.add_argument('--configs', metavar='CONFIG', nargs='+', | 243 parser.add_argument('--configs', metavar='CONFIG', nargs='+', |
183 help='which configurations to rebaseline, e.g. ' + | 244 help='which configurations to rebaseline, e.g. ' + |
184 '"--configs 565 8888"; if unspecified, run a default ' + | 245 '"--configs 565 8888"; if unspecified, run a default ' + |
185 'set of configs') | 246 'set of configs') |
186 parser.add_argument('--dry_run', action='store_true', | 247 parser.add_argument('--dry_run', action='store_true', |
187 help='instead of actually downloading files or adding ' + | 248 help='instead of actually downloading files or adding ' + |
188 'files to checkout, display a list of operations that ' + | 249 'files to checkout, display a list of operations that ' + |
189 'we would normally perform') | 250 'we would normally perform') |
| 251 parser.add_argument('--json_base_url', |
| 252 help='base URL from which to read JSON_FILENAME ' + |
| 253 'files; defaults to %(default)s', |
| 254 default='http://skia-autogen.googlecode.com/svn/gm-actual') |
| 255 parser.add_argument('--json_filename', |
| 256 help='filename (under JSON_BASE_URL) to read a summary ' + |
| 257 'of results from; defaults to %(default)s', |
| 258 default='actual-results.json') |
190 parser.add_argument('--subdirs', metavar='SUBDIR', nargs='+', | 259 parser.add_argument('--subdirs', metavar='SUBDIR', nargs='+', |
191 help='which platform subdirectories to rebaseline; ' + | 260 help='which platform subdirectories to rebaseline; ' + |
192 'if unspecified, rebaseline all subdirs, same as ' + | 261 'if unspecified, rebaseline all subdirs, same as ' + |
193 '"--subdirs %s"' % ' '.join(sorted(SUBDIR_MAPPING.keys()))) | 262 '"--subdirs %s"' % ' '.join(sorted(SUBDIR_MAPPING.keys()))) |
194 parser.add_argument('--tests', metavar='TEST', nargs='+', required=True, | 263 parser.add_argument('--tests', metavar='TEST', nargs='+', |
195 help='which tests to rebaseline, e.g. ' + | 264 help='which tests to rebaseline, e.g. ' + |
196 '"--tests aaclip bigmatrix"') | 265 '"--tests aaclip bigmatrix"; if unspecified, then all ' + |
| 266 'failing tests (according to the actual-results.json ' + |
| 267 'file) will be rebaselined.') |
197 args = parser.parse_args() | 268 args = parser.parse_args() |
198 rebaseliner = Rebaseliner(tests=args.tests, configs=args.configs, | 269 rebaseliner = Rebaseliner(tests=args.tests, configs=args.configs, |
199 subdirs=args.subdirs, dry_run=args.dry_run) | 270 subdirs=args.subdirs, dry_run=args.dry_run, |
| 271 json_base_url=args.json_base_url, |
| 272 json_filename=args.json_filename) |
200 rebaseliner.RebaselineAll() | 273 rebaseliner.RebaselineAll() |
OLD | NEW |