OLD | NEW |
(Empty) | |
| 1 #!/usr/bin/python |
| 2 |
| 3 """ |
| 4 Copyright 2014 Google Inc. |
| 5 |
| 6 Use of this source code is governed by a BSD-style license that can be |
| 7 found in the LICENSE file. |
| 8 |
| 9 Expectations on local disk that we can modify. |
| 10 """ |
| 11 |
| 12 # System-level imports |
| 13 import logging |
| 14 import os |
| 15 import re |
| 16 |
| 17 # Must fix up PYTHONPATH before importing from within Skia |
| 18 import rs_fixpypath # pylint: disable=W0611 |
| 19 |
| 20 # Imports from within Skia |
| 21 from py.utils import git_utils |
| 22 import compare_rendered_pictures |
| 23 import gm_json |
| 24 import imagepair |
| 25 import results |
| 26 |
| 27 FILEPATH_RE = re.compile('.+/' + gm_json.IMAGE_FILENAME_PATTERN) |
| 28 |
| 29 SKIA_REPO = os.path.abspath(os.path.join( |
| 30 os.path.dirname(__file__), os.pardir, os.pardir, '.git')) |
| 31 |
| 32 |
| 33 class WritableExpectations(git_utils.NewGitCheckout): |
| 34 """Expectations on local disk that we can modify.""" |
| 35 |
| 36 def __init__(self, set_descriptions): |
| 37 """Creates a sandbox on local disk containing writable expectations. |
| 38 |
| 39 You must use the 'with' statement to create this object in such a way that |
| 40 it cleans up after itself: |
| 41 |
| 42 with WritableExpectations(*args) as writable_expectations: |
| 43 # make modifications |
| 44 # use the modified results |
| 45 # the sandbox on local disk is automatically cleaned up here |
| 46 |
| 47 Args: |
| 48 set_descriptions: SET_DESCRIPTIONS dict describing the set we want to |
| 49 update expectations within; this tells us the subdirectory within the |
| 50 Skia repo where we keep these expectations, and the commithash at |
| 51 which the user evaluated new baselines. |
| 52 """ |
| 53 file_section = set_descriptions[results.KEY__SET_DESCRIPTIONS__SECTION] |
| 54 assert file_section == gm_json.JSONKEY_EXPECTEDRESULTS |
| 55 |
| 56 source_dir = _unicode_to_ascii( |
| 57 set_descriptions[results.KEY__SET_DESCRIPTIONS__DIR]) |
| 58 assert source_dir.startswith(compare_rendered_pictures.REPO_URL_PREFIX) |
| 59 repo_subdir = source_dir[len(compare_rendered_pictures.REPO_URL_PREFIX):] |
| 60 repo_revision = _unicode_to_ascii( |
| 61 set_descriptions[results.KEY__SET_DESCRIPTIONS__REPO_REVISION]) |
| 62 |
| 63 logging.info('Creating a writable Skia checkout at revision "%s"...' % |
| 64 repo_revision) |
| 65 super(WritableExpectations, self).__init__( |
| 66 repository=SKIA_REPO, commit=repo_revision, subdir=repo_subdir) |
| 67 |
| 68 def modify(self, modifications): |
| 69 """Modify the contents of the checkout, using modifications from the UI. |
| 70 |
| 71 Args: |
| 72 modifications: data[KEY__LIVE_EDITS__MODIFICATIONS] coming back from the |
| 73 rebaseline_server UI frontend |
| 74 """ |
| 75 logging.info('Reading in dicts from writable Skia checkout in %s ...' % |
| 76 self.root) |
| 77 dicts = results.BaseComparisons.read_dicts_from_root(self.root) |
| 78 |
| 79 # Make sure we have expected-results sections in all our output dicts. |
| 80 for pathname, adict in dicts.iteritems(): |
| 81 if not adict: |
| 82 adict = {} |
| 83 if not adict.get(gm_json.JSONKEY_EXPECTEDRESULTS, None): |
| 84 adict[gm_json.JSONKEY_EXPECTEDRESULTS] = {} |
| 85 dicts[pathname] = adict |
| 86 |
| 87 for modification in modifications: |
| 88 expectations = modification[imagepair.KEY__IMAGEPAIRS__EXPECTATIONS] |
| 89 _add_image_info_to_expectations( |
| 90 expectations=expectations, |
| 91 filepath=modification[imagepair.KEY__IMAGEPAIRS__IMAGE_B_URL]) |
| 92 extra_columns = modification[imagepair.KEY__IMAGEPAIRS__EXTRACOLUMNS] |
| 93 dictname = modification[imagepair.KEY__IMAGEPAIRS__SOURCE_JSON_FILE] |
| 94 dict_to_modify = dicts[dictname][gm_json.JSONKEY_EXPECTEDRESULTS] |
| 95 test_name = extra_columns[compare_rendered_pictures.COLUMN__SOURCE_SKP] |
| 96 test_record = dict_to_modify.get(test_name, {}) |
| 97 if (extra_columns[compare_rendered_pictures.COLUMN__TILED_OR_WHOLE] == |
| 98 compare_rendered_pictures.COLUMN__TILED_OR_WHOLE__TILED): |
| 99 test_tiles_list = test_record.get( |
| 100 gm_json.JSONKEY_SOURCE_TILEDIMAGES, []) |
| 101 tilenum = int(extra_columns[compare_rendered_pictures.COLUMN__TILENUM]) |
| 102 _replace_list_item(test_tiles_list, tilenum, expectations) |
| 103 test_record[gm_json.JSONKEY_SOURCE_TILEDIMAGES] = test_tiles_list |
| 104 else: |
| 105 test_record[gm_json.JSONKEY_SOURCE_WHOLEIMAGE] = expectations |
| 106 dict_to_modify[test_name] = test_record |
| 107 |
| 108 # Write the modified files back to disk. |
| 109 self._write_dicts_to_root(meta_dict=dicts, root=self.root) |
| 110 |
| 111 def get_diffs(self): |
| 112 """Return patchfile describing any modifications to this checkout.""" |
| 113 return self._run_in_git_root(args=[git_utils.GIT, 'diff']) |
| 114 |
| 115 @staticmethod |
| 116 def _write_dicts_to_root(meta_dict, root): |
| 117 """Write out multiple dictionaries in JSON format. |
| 118 |
| 119 Args: |
| 120 meta_dict: a builder-keyed meta-dictionary containing all the JSON |
| 121 dictionaries we want to write out |
| 122 root: path to root of directory tree within which to write files |
| 123 """ |
| 124 if not os.path.isdir(root): |
| 125 raise IOError('no directory found at path %s' % root) |
| 126 |
| 127 for rel_path in meta_dict.keys(): |
| 128 full_path = os.path.join(root, rel_path) |
| 129 gm_json.WriteToFile(meta_dict[rel_path], full_path) |
| 130 |
| 131 |
| 132 def _unicode_to_ascii(unicode_string): |
| 133 """Returns the plain ASCII form of a unicode string. |
| 134 |
| 135 TODO(stephana): We created this because we get unicode strings out of the |
| 136 JSON file, while the git filenames and revision tags are plain ASCII. |
| 137 There may be a better way to handle this... maybe set the JSON util to just |
| 138 return ASCII strings? |
| 139 """ |
| 140 return unicode_string.encode('ascii', 'ignore') |
| 141 |
| 142 |
| 143 def _replace_list_item(a_list, index, value): |
| 144 """Replaces value at index "index" within a_list. |
| 145 |
| 146 Args: |
| 147 a_list: a list |
| 148 index: index indicating which item in a_list to replace |
| 149 value: value to set a_list[index] to |
| 150 |
| 151 If a_list does not contain this index, it will be extended with None entries |
| 152 to that length. |
| 153 """ |
| 154 length = len(a_list) |
| 155 while index >= length: |
| 156 a_list.append(None) |
| 157 length += 1 |
| 158 a_list[index] = value |
| 159 |
| 160 |
| 161 def _add_image_info_to_expectations(expectations, filepath): |
| 162 """Add JSONKEY_IMAGE_* info to an existing expectations dictionary. |
| 163 |
| 164 TODO(stephana): This assumes that the checksumAlgorithm and checksumValue |
| 165 can be derived from the filepath, which is currently true but may not always |
| 166 be true. |
| 167 |
| 168 Args: |
| 169 expectations: the expectations dict to augment |
| 170 filepath: relative path to the image file |
| 171 """ |
| 172 (checksum_algorithm, checksum_value) = FILEPATH_RE.match(filepath).groups() |
| 173 expectations[gm_json.JSONKEY_IMAGE_CHECKSUMALGORITHM] = checksum_algorithm |
| 174 expectations[gm_json.JSONKEY_IMAGE_CHECKSUMVALUE] = checksum_value |
| 175 expectations[gm_json.JSONKEY_IMAGE_FILEPATH] = filepath |
OLD | NEW |