OLD | NEW |
1 #!/usr/bin/python | 1 #!/usr/bin/python |
2 | 2 |
3 """ | 3 """ |
4 Copyright 2014 Google Inc. | 4 Copyright 2014 Google Inc. |
5 | 5 |
6 Use of this source code is governed by a BSD-style license that can be | 6 Use of this source code is governed by a BSD-style license that can be |
7 found in the LICENSE file. | 7 found in the LICENSE file. |
8 | 8 |
9 Compare results of two render_pictures runs. | 9 Compare results of two render_pictures runs. |
| 10 |
| 11 TODO(epoger): Start using this module to compare ALL images (whether they |
| 12 were generated from GMs or SKPs), and rename it accordingly. |
10 """ | 13 """ |
11 | 14 |
12 # System-level imports | 15 # System-level imports |
13 import logging | 16 import logging |
14 import os | 17 import os |
| 18 import shutil |
| 19 import tempfile |
15 import time | 20 import time |
16 | 21 |
17 # Must fix up PYTHONPATH before importing from within Skia | 22 # Must fix up PYTHONPATH before importing from within Skia |
18 import fix_pythonpath # pylint: disable=W0611 | 23 import fix_pythonpath # pylint: disable=W0611 |
19 | 24 |
20 # Imports from within Skia | 25 # Imports from within Skia |
| 26 from py.utils import gs_utils |
21 from py.utils import url_utils | 27 from py.utils import url_utils |
| 28 import buildbot_globals |
| 29 import column |
22 import gm_json | 30 import gm_json |
23 import imagediffdb | 31 import imagediffdb |
24 import imagepair | 32 import imagepair |
25 import imagepairset | 33 import imagepairset |
26 import results | 34 import results |
27 | 35 |
28 # URL under which all render_pictures images can be found in Google Storage. | 36 # URL under which all render_pictures images can be found in Google Storage. |
29 # | 37 # |
30 # pylint: disable=C0301 | 38 # TODO(epoger): In order to allow live-view of GMs and other images, read this |
31 # TODO(epoger): Move this default value into | 39 # from the input summary files, or allow the caller to set it within the |
32 # https://skia.googlesource.com/buildbot/+/master/site_config/global_variables.j
son | 40 # GET_live_results call. |
33 # pylint: enable=C0301 | 41 DEFAULT_IMAGE_BASE_GS_URL = 'gs://' + buildbot_globals.Get('skp_images_bucket') |
34 DEFAULT_IMAGE_BASE_URL = ( | 42 |
35 'http://chromium-skia-gm.commondatastorage.googleapis.com/' | 43 # Column descriptors, and display preferences for them. |
36 'render_pictures/images') | 44 COLUMN__RESULT_TYPE = results.KEY__EXTRACOLUMNS__RESULT_TYPE |
| 45 COLUMN__SOURCE_SKP = 'sourceSkpFile' |
| 46 COLUMN__TILED_OR_WHOLE = 'tiledOrWhole' |
| 47 COLUMN__TILENUM = 'tilenum' |
| 48 FREEFORM_COLUMN_IDS = [ |
| 49 COLUMN__TILENUM, |
| 50 ] |
| 51 ORDERED_COLUMN_IDS = [ |
| 52 COLUMN__RESULT_TYPE, |
| 53 COLUMN__SOURCE_SKP, |
| 54 COLUMN__TILED_OR_WHOLE, |
| 55 COLUMN__TILENUM, |
| 56 ] |
37 | 57 |
38 | 58 |
39 class RenderedPicturesComparisons(results.BaseComparisons): | 59 class RenderedPicturesComparisons(results.BaseComparisons): |
40 """Loads results from two different render_pictures runs into an ImagePairSet. | 60 """Loads results from multiple render_pictures runs into an ImagePairSet. |
41 """ | 61 """ |
42 | 62 |
43 def __init__(self, subdirs, actuals_root, | 63 def __init__(self, actuals_dirs, expectations_dirs, image_diff_db, |
44 generated_images_root=results.DEFAULT_GENERATED_IMAGES_ROOT, | 64 image_base_gs_url=DEFAULT_IMAGE_BASE_GS_URL, |
45 image_base_url=DEFAULT_IMAGE_BASE_URL, | 65 diff_base_url=None, actuals_label='actuals', |
46 diff_base_url=None): | 66 expectations_label='expectations', gs=None, |
| 67 truncate_results=False): |
47 """ | 68 """ |
48 Args: | 69 Args: |
49 actuals_root: root directory containing all render_pictures-generated | 70 actuals_dirs: list of root directories to copy all JSON summaries from, |
50 JSON files | 71 and to use as actual results |
51 subdirs: (string, string) tuple; pair of subdirectories within | 72 expectations_dirs: list of root directories to copy all JSON summaries |
52 actuals_root to compare | 73 from, and to use as expected results |
53 generated_images_root: directory within which to create all pixel diffs; | 74 image_diff_db: ImageDiffDB instance |
54 if this directory does not yet exist, it will be created | 75 image_base_gs_url: "gs://" URL pointing at the Google Storage bucket/dir |
55 image_base_url: URL under which all render_pictures result images can | 76 under which all render_pictures result images can |
56 be found; this will be used to read images for comparison within | 77 be found; this will be used to read images for comparison within |
57 this code, and included in the ImagePairSet so its consumers know | 78 this code, and included in the ImagePairSet (as an HTTP URL) so its |
58 where to download the images from | 79 consumers know where to download the images from |
59 diff_base_url: base URL within which the client should look for diff | 80 diff_base_url: base URL within which the client should look for diff |
60 images; if not specified, defaults to a "file:///" URL representation | 81 images; if not specified, defaults to a "file:///" URL representation |
61 of generated_images_root | 82 of image_diff_db's storage_root |
| 83 actuals_label: description to use for actual results |
| 84 expectations_label: description to use for expected results |
| 85 gs: instance of GSUtils object we can use to download summary files |
| 86 truncate_results: FOR TESTING ONLY: if True, truncate the set of images |
| 87 we process, to speed up testing. |
62 """ | 88 """ |
63 time_start = int(time.time()) | 89 super(RenderedPicturesComparisons, self).__init__() |
64 self._image_diff_db = imagediffdb.ImageDiffDB(generated_images_root) | 90 self._image_diff_db = image_diff_db |
65 self._image_base_url = image_base_url | 91 self._image_base_gs_url = image_base_gs_url |
66 self._diff_base_url = ( | 92 self._diff_base_url = ( |
67 diff_base_url or | 93 diff_base_url or |
68 url_utils.create_filepath_url(generated_images_root)) | 94 url_utils.create_filepath_url(image_diff_db.storage_root)) |
69 self._load_result_pairs(actuals_root, subdirs) | 95 self._actuals_label = actuals_label |
70 self._timestamp = int(time.time()) | 96 self._expectations_label = expectations_label |
71 logging.info('Results complete; took %d seconds.' % | 97 self._gs = gs |
72 (self._timestamp - time_start)) | 98 self.truncate_results = truncate_results |
73 | 99 |
74 def _load_result_pairs(self, actuals_root, subdirs): | 100 tempdir = tempfile.mkdtemp() |
75 """Loads all JSON files found within two subdirs in actuals_root, | 101 try: |
76 compares across those two subdirs, and stores the summary in self._results. | 102 actuals_root = os.path.join(tempdir, 'actuals') |
| 103 expectations_root = os.path.join(tempdir, 'expectations') |
| 104 for source_dir in actuals_dirs: |
| 105 self._copy_dir_contents(source_dir=source_dir, dest_dir=actuals_root) |
| 106 for source_dir in expectations_dirs: |
| 107 self._copy_dir_contents(source_dir=source_dir, |
| 108 dest_dir=expectations_root) |
| 109 |
| 110 time_start = int(time.time()) |
| 111 self._results = self._load_result_pairs(actuals_root, expectations_root) |
| 112 self._timestamp = int(time.time()) |
| 113 logging.info('Number of download file collisions: %s' % |
| 114 imagediffdb.global_file_collisions) |
| 115 logging.info('Results complete; took %d seconds.' % |
| 116 (self._timestamp - time_start)) |
| 117 finally: |
| 118 shutil.rmtree(tempdir) |
| 119 |
| 120 def _load_result_pairs(self, actuals_root, expectations_root): |
| 121 """Loads all JSON image summaries from 2 directory trees and compares them. |
77 | 122 |
78 Args: | 123 Args: |
79 actuals_root: root directory containing all render_pictures-generated | 124 actuals_root: root directory containing JSON summaries of actual results |
80 JSON files | 125 expectations_root: root dir containing JSON summaries of expected results |
81 subdirs: (string, string) tuple; pair of subdirectories within | 126 |
82 actuals_root to compare | 127 Returns the summary of all image diff results. |
83 """ | 128 """ |
84 logging.info( | 129 logging.info('Reading JSON image summaries from dirs %s and %s...' % ( |
85 'Reading actual-results JSON files from %s subdirs within %s...' % ( | 130 actuals_root, expectations_root)) |
86 subdirs, actuals_root)) | 131 actuals_dicts = self._read_dicts_from_root(actuals_root) |
87 subdirA, subdirB = subdirs | 132 expectations_dicts = self._read_dicts_from_root(expectations_root) |
88 subdirA_dicts = self._read_dicts_from_root( | 133 logging.info('Comparing summary dicts...') |
89 os.path.join(actuals_root, subdirA)) | |
90 subdirB_dicts = self._read_dicts_from_root( | |
91 os.path.join(actuals_root, subdirB)) | |
92 logging.info('Comparing subdirs %s and %s...' % (subdirA, subdirB)) | |
93 | 134 |
94 all_image_pairs = imagepairset.ImagePairSet( | 135 all_image_pairs = imagepairset.ImagePairSet( |
95 descriptions=subdirs, | 136 descriptions=(self._actuals_label, self._expectations_label), |
96 diff_base_url=self._diff_base_url) | 137 diff_base_url=self._diff_base_url) |
97 failing_image_pairs = imagepairset.ImagePairSet( | 138 failing_image_pairs = imagepairset.ImagePairSet( |
98 descriptions=subdirs, | 139 descriptions=(self._actuals_label, self._expectations_label), |
99 diff_base_url=self._diff_base_url) | 140 diff_base_url=self._diff_base_url) |
100 | 141 |
| 142 # Override settings for columns that should be filtered using freeform text. |
| 143 for column_id in FREEFORM_COLUMN_IDS: |
| 144 factory = column.ColumnHeaderFactory( |
| 145 header_text=column_id, use_freeform_filter=True) |
| 146 all_image_pairs.set_column_header_factory( |
| 147 column_id=column_id, column_header_factory=factory) |
| 148 failing_image_pairs.set_column_header_factory( |
| 149 column_id=column_id, column_header_factory=factory) |
| 150 |
101 all_image_pairs.ensure_extra_column_values_in_summary( | 151 all_image_pairs.ensure_extra_column_values_in_summary( |
102 column_id=results.KEY__EXTRACOLUMNS__RESULT_TYPE, values=[ | 152 column_id=COLUMN__RESULT_TYPE, values=[ |
103 results.KEY__RESULT_TYPE__FAILED, | 153 results.KEY__RESULT_TYPE__FAILED, |
104 results.KEY__RESULT_TYPE__NOCOMPARISON, | 154 results.KEY__RESULT_TYPE__NOCOMPARISON, |
105 results.KEY__RESULT_TYPE__SUCCEEDED, | 155 results.KEY__RESULT_TYPE__SUCCEEDED, |
106 ]) | 156 ]) |
107 failing_image_pairs.ensure_extra_column_values_in_summary( | 157 failing_image_pairs.ensure_extra_column_values_in_summary( |
108 column_id=results.KEY__EXTRACOLUMNS__RESULT_TYPE, values=[ | 158 column_id=COLUMN__RESULT_TYPE, values=[ |
109 results.KEY__RESULT_TYPE__FAILED, | 159 results.KEY__RESULT_TYPE__FAILED, |
110 results.KEY__RESULT_TYPE__NOCOMPARISON, | 160 results.KEY__RESULT_TYPE__NOCOMPARISON, |
111 ]) | 161 ]) |
112 | 162 |
113 common_dict_paths = sorted(set(subdirA_dicts.keys() + subdirB_dicts.keys())) | 163 union_dict_paths = sorted(set( |
114 num_common_dict_paths = len(common_dict_paths) | 164 actuals_dicts.keys() + expectations_dicts.keys())) |
| 165 num_union_dict_paths = len(union_dict_paths) |
115 dict_num = 0 | 166 dict_num = 0 |
116 for dict_path in common_dict_paths: | 167 for dict_path in union_dict_paths: |
117 dict_num += 1 | 168 dict_num += 1 |
118 logging.info('Generating pixel diffs for dict #%d of %d, "%s"...' % | 169 logging.info('Generating pixel diffs for dict #%d of %d, "%s"...' % |
119 (dict_num, num_common_dict_paths, dict_path)) | 170 (dict_num, num_union_dict_paths, dict_path)) |
120 dictA = subdirA_dicts[dict_path] | 171 |
121 dictB = subdirB_dicts[dict_path] | 172 dictA = actuals_dicts.get(dict_path, None) |
122 self._validate_dict_version(dictA) | 173 if dictA: |
123 self._validate_dict_version(dictB) | 174 self._validate_dict_version(dictA) |
124 dictA_results = dictA[gm_json.JSONKEY_ACTUALRESULTS] | 175 dictA_results = dictA[gm_json.JSONKEY_ACTUALRESULTS] |
125 dictB_results = dictB[gm_json.JSONKEY_ACTUALRESULTS] | 176 dictA_keys = dictA_results.keys() |
126 skp_names = sorted(set(dictA_results.keys() + dictB_results.keys())) | 177 else: |
| 178 dictA_keys = [] |
| 179 |
| 180 dictB = expectations_dicts.get(dict_path, None) |
| 181 if dictB: |
| 182 self._validate_dict_version(dictB) |
| 183 dictB_results = dictB[gm_json.JSONKEY_ACTUALRESULTS] |
| 184 dictB_keys = dictB_results.keys() |
| 185 else: |
| 186 dictB_keys = [] |
| 187 |
| 188 skp_names = sorted(set(dictA_keys + dictB_keys)) |
| 189 if self.truncate_results: |
| 190 skp_names = skp_names[1:3] |
127 for skp_name in skp_names: | 191 for skp_name in skp_names: |
128 imagepairs_for_this_skp = [] | 192 imagepairs_for_this_skp = [] |
129 | 193 |
130 whole_image_A = RenderedPicturesComparisons.get_multilevel( | 194 whole_image_A = RenderedPicturesComparisons.get_multilevel( |
131 dictA_results, skp_name, gm_json.JSONKEY_SOURCE_WHOLEIMAGE) | 195 dictA_results, skp_name, gm_json.JSONKEY_SOURCE_WHOLEIMAGE) |
132 whole_image_B = RenderedPicturesComparisons.get_multilevel( | 196 whole_image_B = RenderedPicturesComparisons.get_multilevel( |
133 dictB_results, skp_name, gm_json.JSONKEY_SOURCE_WHOLEIMAGE) | 197 dictB_results, skp_name, gm_json.JSONKEY_SOURCE_WHOLEIMAGE) |
134 imagepairs_for_this_skp.append(self._create_image_pair( | 198 imagepairs_for_this_skp.append(self._create_image_pair( |
135 test=skp_name, config=gm_json.JSONKEY_SOURCE_WHOLEIMAGE, | 199 image_dict_A=whole_image_A, image_dict_B=whole_image_B, |
136 image_dict_A=whole_image_A, image_dict_B=whole_image_B)) | 200 source_skp_name=skp_name, tilenum=None)) |
137 | 201 |
138 tiled_images_A = RenderedPicturesComparisons.get_multilevel( | 202 tiled_images_A = RenderedPicturesComparisons.get_multilevel( |
139 dictA_results, skp_name, gm_json.JSONKEY_SOURCE_TILEDIMAGES) | 203 dictA_results, skp_name, gm_json.JSONKEY_SOURCE_TILEDIMAGES) |
140 tiled_images_B = RenderedPicturesComparisons.get_multilevel( | 204 tiled_images_B = RenderedPicturesComparisons.get_multilevel( |
141 dictB_results, skp_name, gm_json.JSONKEY_SOURCE_TILEDIMAGES) | 205 dictB_results, skp_name, gm_json.JSONKEY_SOURCE_TILEDIMAGES) |
142 # TODO(epoger): Report an error if we find tiles for A but not B? | 206 # TODO(epoger): Report an error if we find tiles for A but not B? |
143 if tiled_images_A and tiled_images_B: | 207 if tiled_images_A and tiled_images_B: |
144 # TODO(epoger): Report an error if we find a different number of tiles | 208 # TODO(epoger): Report an error if we find a different number of tiles |
145 # for A and B? | 209 # for A and B? |
146 num_tiles = len(tiled_images_A) | 210 num_tiles = len(tiled_images_A) |
147 for tile_num in range(num_tiles): | 211 for tile_num in range(num_tiles): |
148 imagepairs_for_this_skp.append(self._create_image_pair( | 212 imagepairs_for_this_skp.append(self._create_image_pair( |
149 test=skp_name, | |
150 config='%s-%d' % (gm_json.JSONKEY_SOURCE_TILEDIMAGES, tile_num), | |
151 image_dict_A=tiled_images_A[tile_num], | 213 image_dict_A=tiled_images_A[tile_num], |
152 image_dict_B=tiled_images_B[tile_num])) | 214 image_dict_B=tiled_images_B[tile_num], |
| 215 source_skp_name=skp_name, tilenum=tile_num)) |
153 | 216 |
154 for one_imagepair in imagepairs_for_this_skp: | 217 for one_imagepair in imagepairs_for_this_skp: |
155 if one_imagepair: | 218 if one_imagepair: |
156 all_image_pairs.add_image_pair(one_imagepair) | 219 all_image_pairs.add_image_pair(one_imagepair) |
157 result_type = one_imagepair.extra_columns_dict\ | 220 result_type = one_imagepair.extra_columns_dict\ |
158 [results.KEY__EXTRACOLUMNS__RESULT_TYPE] | 221 [COLUMN__RESULT_TYPE] |
159 if result_type != results.KEY__RESULT_TYPE__SUCCEEDED: | 222 if result_type != results.KEY__RESULT_TYPE__SUCCEEDED: |
160 failing_image_pairs.add_image_pair(one_imagepair) | 223 failing_image_pairs.add_image_pair(one_imagepair) |
161 | 224 |
162 # pylint: disable=W0201 | 225 return { |
163 self._results = { | 226 results.KEY__HEADER__RESULTS_ALL: all_image_pairs.as_dict( |
164 results.KEY__HEADER__RESULTS_ALL: all_image_pairs.as_dict(), | 227 column_ids_in_order=ORDERED_COLUMN_IDS), |
165 results.KEY__HEADER__RESULTS_FAILURES: failing_image_pairs.as_dict(), | 228 results.KEY__HEADER__RESULTS_FAILURES: failing_image_pairs.as_dict( |
| 229 column_ids_in_order=ORDERED_COLUMN_IDS), |
166 } | 230 } |
167 | 231 |
168 def _validate_dict_version(self, result_dict): | 232 def _validate_dict_version(self, result_dict): |
169 """Raises Exception if the dict is not the type/version we know how to read. | 233 """Raises Exception if the dict is not the type/version we know how to read. |
170 | 234 |
171 Args: | 235 Args: |
172 result_dict: dictionary holding output of render_pictures | 236 result_dict: dictionary holding output of render_pictures |
173 """ | 237 """ |
174 expected_header_type = 'ChecksummedImages' | 238 expected_header_type = 'ChecksummedImages' |
175 expected_header_revision = 1 | 239 expected_header_revision = 1 |
176 | 240 |
177 header = result_dict[gm_json.JSONKEY_HEADER] | 241 header = result_dict[gm_json.JSONKEY_HEADER] |
178 header_type = header[gm_json.JSONKEY_HEADER_TYPE] | 242 header_type = header[gm_json.JSONKEY_HEADER_TYPE] |
179 if header_type != expected_header_type: | 243 if header_type != expected_header_type: |
180 raise Exception('expected header_type "%s", but got "%s"' % ( | 244 raise Exception('expected header_type "%s", but got "%s"' % ( |
181 expected_header_type, header_type)) | 245 expected_header_type, header_type)) |
182 header_revision = header[gm_json.JSONKEY_HEADER_REVISION] | 246 header_revision = header[gm_json.JSONKEY_HEADER_REVISION] |
183 if header_revision != expected_header_revision: | 247 if header_revision != expected_header_revision: |
184 raise Exception('expected header_revision %d, but got %d' % ( | 248 raise Exception('expected header_revision %d, but got %d' % ( |
185 expected_header_revision, header_revision)) | 249 expected_header_revision, header_revision)) |
186 | 250 |
187 def _create_image_pair(self, test, config, image_dict_A, image_dict_B): | 251 def _create_image_pair(self, image_dict_A, image_dict_B, source_skp_name, |
| 252 tilenum): |
188 """Creates an ImagePair object for this pair of images. | 253 """Creates an ImagePair object for this pair of images. |
189 | 254 |
190 Args: | 255 Args: |
191 test: string; name of the test | |
192 config: string; name of the config | |
193 image_dict_A: dict with JSONKEY_IMAGE_* keys, or None if no image | 256 image_dict_A: dict with JSONKEY_IMAGE_* keys, or None if no image |
194 image_dict_B: dict with JSONKEY_IMAGE_* keys, or None if no image | 257 image_dict_B: dict with JSONKEY_IMAGE_* keys, or None if no image |
| 258 source_skp_name: string; name of the source SKP file |
| 259 tilenum: which tile, or None if a wholeimage |
195 | 260 |
196 Returns: | 261 Returns: |
197 An ImagePair object, or None if both image_dict_A and image_dict_B are | 262 An ImagePair object, or None if both image_dict_A and image_dict_B are |
198 None. | 263 None. |
199 """ | 264 """ |
200 if (not image_dict_A) and (not image_dict_B): | 265 if (not image_dict_A) and (not image_dict_B): |
201 return None | 266 return None |
202 | 267 |
203 def _checksum_and_relative_url(dic): | 268 def _checksum_and_relative_url(dic): |
204 if dic: | 269 if dic: |
(...skipping 11 matching lines...) Expand all Loading... |
216 if not imageA_checksum: | 281 if not imageA_checksum: |
217 result_type = results.KEY__RESULT_TYPE__NOCOMPARISON | 282 result_type = results.KEY__RESULT_TYPE__NOCOMPARISON |
218 elif not imageB_checksum: | 283 elif not imageB_checksum: |
219 result_type = results.KEY__RESULT_TYPE__NOCOMPARISON | 284 result_type = results.KEY__RESULT_TYPE__NOCOMPARISON |
220 elif imageA_checksum == imageB_checksum: | 285 elif imageA_checksum == imageB_checksum: |
221 result_type = results.KEY__RESULT_TYPE__SUCCEEDED | 286 result_type = results.KEY__RESULT_TYPE__SUCCEEDED |
222 else: | 287 else: |
223 result_type = results.KEY__RESULT_TYPE__FAILED | 288 result_type = results.KEY__RESULT_TYPE__FAILED |
224 | 289 |
225 extra_columns_dict = { | 290 extra_columns_dict = { |
226 results.KEY__EXTRACOLUMNS__CONFIG: config, | 291 COLUMN__RESULT_TYPE: result_type, |
227 results.KEY__EXTRACOLUMNS__RESULT_TYPE: result_type, | 292 COLUMN__SOURCE_SKP: source_skp_name, |
228 results.KEY__EXTRACOLUMNS__TEST: test, | |
229 # TODO(epoger): Right now, the client UI crashes if it receives | |
230 # results that do not include this column. | |
231 # Until we fix that, keep the client happy. | |
232 results.KEY__EXTRACOLUMNS__BUILDER: 'TODO', | |
233 } | 293 } |
| 294 if tilenum == None: |
| 295 extra_columns_dict[COLUMN__TILED_OR_WHOLE] = 'whole' |
| 296 extra_columns_dict[COLUMN__TILENUM] = 'N/A' |
| 297 else: |
| 298 extra_columns_dict[COLUMN__TILED_OR_WHOLE] = 'tiled' |
| 299 extra_columns_dict[COLUMN__TILENUM] = str(tilenum) |
234 | 300 |
235 try: | 301 try: |
236 return imagepair.ImagePair( | 302 return imagepair.ImagePair( |
237 image_diff_db=self._image_diff_db, | 303 image_diff_db=self._image_diff_db, |
238 base_url=self._image_base_url, | 304 base_url=self._image_base_gs_url, |
239 imageA_relative_url=imageA_relative_url, | 305 imageA_relative_url=imageA_relative_url, |
240 imageB_relative_url=imageB_relative_url, | 306 imageB_relative_url=imageB_relative_url, |
241 extra_columns=extra_columns_dict) | 307 extra_columns=extra_columns_dict) |
242 except (KeyError, TypeError): | 308 except (KeyError, TypeError): |
243 logging.exception( | 309 logging.exception( |
244 'got exception while creating ImagePair for' | 310 'got exception while creating ImagePair for' |
245 ' test="%s", config="%s", urlPair=("%s","%s")' % ( | 311 ' urlPair=("%s","%s"), source_skp_name="%s", tilenum="%s"' % ( |
246 test, config, imageA_relative_url, imageB_relative_url)) | 312 imageA_relative_url, imageB_relative_url, source_skp_name, |
| 313 tilenum)) |
247 return None | 314 return None |
248 | 315 |
| 316 def _copy_dir_contents(self, source_dir, dest_dir): |
| 317 """Copy all contents of source_dir into dest_dir, recursing into subdirs. |
249 | 318 |
250 # TODO(epoger): Add main() so this can be called by vm_run_skia_try.sh | 319 Args: |
| 320 source_dir: path to source dir (GS URL or local filepath) |
| 321 dest_dir: path to destination dir (local filepath) |
| 322 |
| 323 The copy operates as a "merge with overwrite": any files in source_dir will |
| 324 be "overlaid" on top of the existing content in dest_dir. Existing files |
| 325 with the same names will be overwritten. |
| 326 """ |
| 327 if gs_utils.GSUtils.is_gs_url(source_dir): |
| 328 (bucket, path) = gs_utils.GSUtils.split_gs_url(source_dir) |
| 329 self._gs.download_dir_contents(source_bucket=bucket, source_dir=path, |
| 330 dest_dir=dest_dir) |
| 331 else: |
| 332 shutil.copytree(source_dir, dest_dir) |
OLD | NEW |