Chromium Code Reviews| Index: gm/rebaseline_server/compare_rendered_pictures.py |
| diff --git a/gm/rebaseline_server/compare_rendered_pictures.py b/gm/rebaseline_server/compare_rendered_pictures.py |
| index a48d1c57637d58b93a2a227840b6ccbc45677ac6..ea9aad68a021b755e9ff6d884c535a7894352df0 100755 |
| --- a/gm/rebaseline_server/compare_rendered_pictures.py |
| +++ b/gm/rebaseline_server/compare_rendered_pictures.py |
| @@ -7,18 +7,26 @@ Use of this source code is governed by a BSD-style license that can be |
| found in the LICENSE file. |
| Compare results of two render_pictures runs. |
| + |
| +TODO(epoger): Start using this module to compare ALL images (whether they |
| +were generated from GMs or SKPs), and rename it accordingly. |
| """ |
| # System-level imports |
| import logging |
| import os |
| +import shutil |
| +import tempfile |
| import time |
| # Must fix up PYTHONPATH before importing from within Skia |
| import fix_pythonpath # pylint: disable=W0611 |
| # Imports from within Skia |
| +from py.utils import gs_utils |
| from py.utils import url_utils |
| +import buildbot_globals |
| +import column |
| import gm_json |
| import imagediffdb |
| import imagepair |
| @@ -27,103 +35,159 @@ import results |
| # URL under which all render_pictures images can be found in Google Storage. |
| # |
| -# pylint: disable=C0301 |
| -# TODO(epoger): Move this default value into |
| -# https://skia.googlesource.com/buildbot/+/master/site_config/global_variables.json |
| -# pylint: enable=C0301 |
| -DEFAULT_IMAGE_BASE_URL = ( |
| - 'http://chromium-skia-gm.commondatastorage.googleapis.com/' |
| - 'render_pictures/images') |
| +# TODO(epoger): In order to allow live-view of GMs and other images, read this |
| +# from the input summary files, or allow the caller to set it within the |
| +# GET_live_results call. |
| +DEFAULT_IMAGE_BASE_GS_URL = 'gs://' + buildbot_globals.Get('skp_images_bucket') |
| + |
| +# Column descriptors, and display preferences for them. |
| +COLUMN__RESULT_TYPE = results.KEY__EXTRACOLUMNS__RESULT_TYPE |
| +COLUMN__SOURCE_SKP = 'sourceSkpFile' |
| +COLUMN__TILED_OR_WHOLE = 'tiledOrWhole' |
| +COLUMN__TILENUM = 'tilenum' |
| +FREEFORM_COLUMN_IDS = [ |
| + COLUMN__TILENUM, |
| +] |
| +ORDERED_COLUMN_IDS = [ |
| + COLUMN__RESULT_TYPE, |
| + COLUMN__SOURCE_SKP, |
| + COLUMN__TILED_OR_WHOLE, |
| + COLUMN__TILENUM, |
| +] |
| class RenderedPicturesComparisons(results.BaseComparisons): |
| - """Loads results from two different render_pictures runs into an ImagePairSet. |
| + """Loads results from multiple render_pictures runs into an ImagePairSet. |
| """ |
| - def __init__(self, subdirs, actuals_root, |
| - generated_images_root=results.DEFAULT_GENERATED_IMAGES_ROOT, |
| - image_base_url=DEFAULT_IMAGE_BASE_URL, |
| - diff_base_url=None): |
| + def __init__(self, actuals_dirs, expectations_dirs, image_diff_db, |
| + image_base_gs_url=DEFAULT_IMAGE_BASE_GS_URL, |
| + diff_base_url=None, actuals_label='actuals', |
| + expectations_label='expectations', gs=None, |
| + truncate_results=False): |
| """ |
| Args: |
| - actuals_root: root directory containing all render_pictures-generated |
| - JSON files |
| - subdirs: (string, string) tuple; pair of subdirectories within |
| - actuals_root to compare |
| - generated_images_root: directory within which to create all pixel diffs; |
| - if this directory does not yet exist, it will be created |
| - image_base_url: URL under which all render_pictures result images can |
| + actuals_dirs: list of root directories to copy all JSON summaries from, |
| + and to use as actual results |
| + expectations_dirs: list of root directories to copy all JSON summaries |
| + from, and to use as expected results |
| + image_diff_db: ImageDiffDB instance |
| + image_base_gs_url: "gs://" URL pointing at the Google Storage bucket/dir |
| + under which all render_pictures result images can |
| be found; this will be used to read images for comparison within |
| - this code, and included in the ImagePairSet so its consumers know |
| - where to download the images from |
| + this code, and included in the ImagePairSet (as an HTTP URL) so its |
| + consumers know where to download the images from |
| diff_base_url: base URL within which the client should look for diff |
| images; if not specified, defaults to a "file:///" URL representation |
| - of generated_images_root |
| + of image_diff_db's storage_root |
| + actuals_label: description to use for actual results |
| + expectations_label: description to use for expected results |
| + gs: instance of GSUtils object we can use to download summary files |
| + truncate_results: FOR TESTING ONLY: if True, truncate the set of images |
|
rmistry
2014/08/04 20:52:49
Another way to do this instead of making it part o
epoger
2014/08/05 03:34:09
Yeah, I meant manual testing. Modified the commen
rmistry
2014/08/05 13:46:54
Acknowledged.
|
| + we process, to speed up testing. |
| """ |
| - time_start = int(time.time()) |
| - self._image_diff_db = imagediffdb.ImageDiffDB(generated_images_root) |
| - self._image_base_url = image_base_url |
| + super(RenderedPicturesComparisons, self).__init__() |
| + self._image_diff_db = image_diff_db |
| + self._image_base_gs_url = image_base_gs_url |
| self._diff_base_url = ( |
| diff_base_url or |
| - url_utils.create_filepath_url(generated_images_root)) |
| - self._load_result_pairs(actuals_root, subdirs) |
| - self._timestamp = int(time.time()) |
| - logging.info('Results complete; took %d seconds.' % |
| - (self._timestamp - time_start)) |
| + url_utils.create_filepath_url(image_diff_db.storage_root)) |
| + self._actuals_label = actuals_label |
| + self._expectations_label = expectations_label |
| + self._gs = gs |
| + self.truncate_results = truncate_results |
| - def _load_result_pairs(self, actuals_root, subdirs): |
| - """Loads all JSON files found within two subdirs in actuals_root, |
| - compares across those two subdirs, and stores the summary in self._results. |
| + tempdir = tempfile.mkdtemp() |
| + try: |
| + actuals_root = os.path.join(tempdir, 'actuals') |
| + expectations_root = os.path.join(tempdir, 'expectations') |
| + for source_dir in actuals_dirs: |
| + self._copy_dir_contents(source_dir=source_dir, dest_dir=actuals_root) |
| + for source_dir in expectations_dirs: |
| + self._copy_dir_contents(source_dir=source_dir, |
| + dest_dir=expectations_root) |
| + |
| + time_start = int(time.time()) |
| + self._results = self._load_result_pairs(actuals_root, expectations_root) |
| + self._timestamp = int(time.time()) |
| + logging.info('Number of download file collisions: %s' % |
| + imagediffdb.global_file_collisions) |
| + logging.info('Results complete; took %d seconds.' % |
| + (self._timestamp - time_start)) |
| + finally: |
| + shutil.rmtree(tempdir) |
| + |
| + def _load_result_pairs(self, actuals_root, expectations_root): |
| + """Loads all JSON image summaries from 2 directory trees and compares them. |
| Args: |
| - actuals_root: root directory containing all render_pictures-generated |
| - JSON files |
| - subdirs: (string, string) tuple; pair of subdirectories within |
| - actuals_root to compare |
| + actuals_root: root directory containing JSON summaries of actual results |
| + expectations_root: root dir containing JSON summaries of expected results |
| + |
| + Returns the summary of all image diff results. |
| """ |
| - logging.info( |
| - 'Reading actual-results JSON files from %s subdirs within %s...' % ( |
| - subdirs, actuals_root)) |
| - subdirA, subdirB = subdirs |
| - subdirA_dicts = self._read_dicts_from_root( |
| - os.path.join(actuals_root, subdirA)) |
| - subdirB_dicts = self._read_dicts_from_root( |
| - os.path.join(actuals_root, subdirB)) |
| - logging.info('Comparing subdirs %s and %s...' % (subdirA, subdirB)) |
| + logging.info('Reading JSON image summaries from dirs %s and %s...' % ( |
| + actuals_root, expectations_root)) |
| + actuals_dicts = self._read_dicts_from_root(actuals_root) |
| + expectations_dicts = self._read_dicts_from_root(expectations_root) |
| + logging.info('Comparing summary dicts...') |
| all_image_pairs = imagepairset.ImagePairSet( |
| - descriptions=subdirs, |
| + descriptions=(self._actuals_label, self._expectations_label), |
| diff_base_url=self._diff_base_url) |
| failing_image_pairs = imagepairset.ImagePairSet( |
| - descriptions=subdirs, |
| + descriptions=(self._actuals_label, self._expectations_label), |
| diff_base_url=self._diff_base_url) |
| + # Override settings for columns that should be filtered using freeform text. |
| + for column_id in FREEFORM_COLUMN_IDS: |
| + factory = column.ColumnHeaderFactory( |
| + header_text=column_id, use_freeform_filter=True) |
| + all_image_pairs.set_column_header_factory( |
| + column_id=column_id, column_header_factory=factory) |
| + failing_image_pairs.set_column_header_factory( |
| + column_id=column_id, column_header_factory=factory) |
| + |
| all_image_pairs.ensure_extra_column_values_in_summary( |
| - column_id=results.KEY__EXTRACOLUMNS__RESULT_TYPE, values=[ |
| + column_id=COLUMN__RESULT_TYPE, values=[ |
| results.KEY__RESULT_TYPE__FAILED, |
| results.KEY__RESULT_TYPE__NOCOMPARISON, |
| results.KEY__RESULT_TYPE__SUCCEEDED, |
| ]) |
| failing_image_pairs.ensure_extra_column_values_in_summary( |
| - column_id=results.KEY__EXTRACOLUMNS__RESULT_TYPE, values=[ |
| + column_id=COLUMN__RESULT_TYPE, values=[ |
| results.KEY__RESULT_TYPE__FAILED, |
| results.KEY__RESULT_TYPE__NOCOMPARISON, |
| ]) |
| - common_dict_paths = sorted(set(subdirA_dicts.keys() + subdirB_dicts.keys())) |
| - num_common_dict_paths = len(common_dict_paths) |
| + union_dict_paths = sorted(set( |
| + actuals_dicts.keys() + expectations_dicts.keys())) |
| + num_union_dict_paths = len(union_dict_paths) |
| dict_num = 0 |
| - for dict_path in common_dict_paths: |
| + for dict_path in union_dict_paths: |
| dict_num += 1 |
| logging.info('Generating pixel diffs for dict #%d of %d, "%s"...' % |
| - (dict_num, num_common_dict_paths, dict_path)) |
| - dictA = subdirA_dicts[dict_path] |
| - dictB = subdirB_dicts[dict_path] |
| - self._validate_dict_version(dictA) |
| - self._validate_dict_version(dictB) |
| - dictA_results = dictA[gm_json.JSONKEY_ACTUALRESULTS] |
| - dictB_results = dictB[gm_json.JSONKEY_ACTUALRESULTS] |
| - skp_names = sorted(set(dictA_results.keys() + dictB_results.keys())) |
| + (dict_num, num_union_dict_paths, dict_path)) |
| + |
| + dictA = actuals_dicts.get(dict_path, None) |
| + if dictA: |
| + self._validate_dict_version(dictA) |
|
rmistry
2014/08/04 20:52:50
We should be able to extract out 174-178 to 180-18
epoger
2014/08/05 03:34:09
That's a little tricky, because that extracted fun
rmistry
2014/08/05 13:46:54
Yes it does. Thanks.
|
| + dictA_results = dictA[gm_json.JSONKEY_ACTUALRESULTS] |
| + dictA_keys = dictA_results.keys() |
| + else: |
| + dictA_keys = [] |
| + |
| + dictB = expectations_dicts.get(dict_path, None) |
| + if dictB: |
| + self._validate_dict_version(dictB) |
| + dictB_results = dictB[gm_json.JSONKEY_ACTUALRESULTS] |
|
rmistry
2014/08/04 20:52:50
Why is this not JSONKEY_EXPECTEDRESULTS ?
epoger
2014/08/05 03:34:09
Because, for now, I set this up to compare two set
rmistry
2014/08/05 13:46:54
SetA and SetB makes things a lot more intuitive fo
|
| + dictB_keys = dictB_results.keys() |
| + else: |
| + dictB_keys = [] |
| + |
| + skp_names = sorted(set(dictA_keys + dictB_keys)) |
| + if self.truncate_results: |
| + skp_names = skp_names[1:3] |
|
rmistry
2014/08/04 20:52:50
Could you add a comment explaining the 1:3
epoger
2014/08/05 03:34:09
Done.
|
| for skp_name in skp_names: |
| imagepairs_for_this_skp = [] |
| @@ -132,8 +196,8 @@ class RenderedPicturesComparisons(results.BaseComparisons): |
| whole_image_B = RenderedPicturesComparisons.get_multilevel( |
| dictB_results, skp_name, gm_json.JSONKEY_SOURCE_WHOLEIMAGE) |
| imagepairs_for_this_skp.append(self._create_image_pair( |
| - test=skp_name, config=gm_json.JSONKEY_SOURCE_WHOLEIMAGE, |
| - image_dict_A=whole_image_A, image_dict_B=whole_image_B)) |
| + image_dict_A=whole_image_A, image_dict_B=whole_image_B, |
| + source_skp_name=skp_name, tilenum=None)) |
| tiled_images_A = RenderedPicturesComparisons.get_multilevel( |
| dictA_results, skp_name, gm_json.JSONKEY_SOURCE_TILEDIMAGES) |
| @@ -146,23 +210,23 @@ class RenderedPicturesComparisons(results.BaseComparisons): |
| num_tiles = len(tiled_images_A) |
| for tile_num in range(num_tiles): |
| imagepairs_for_this_skp.append(self._create_image_pair( |
| - test=skp_name, |
| - config='%s-%d' % (gm_json.JSONKEY_SOURCE_TILEDIMAGES, tile_num), |
| image_dict_A=tiled_images_A[tile_num], |
| - image_dict_B=tiled_images_B[tile_num])) |
| + image_dict_B=tiled_images_B[tile_num], |
| + source_skp_name=skp_name, tilenum=tile_num)) |
| for one_imagepair in imagepairs_for_this_skp: |
| if one_imagepair: |
| all_image_pairs.add_image_pair(one_imagepair) |
| result_type = one_imagepair.extra_columns_dict\ |
| - [results.KEY__EXTRACOLUMNS__RESULT_TYPE] |
| + [COLUMN__RESULT_TYPE] |
| if result_type != results.KEY__RESULT_TYPE__SUCCEEDED: |
| failing_image_pairs.add_image_pair(one_imagepair) |
| - # pylint: disable=W0201 |
| - self._results = { |
| - results.KEY__HEADER__RESULTS_ALL: all_image_pairs.as_dict(), |
| - results.KEY__HEADER__RESULTS_FAILURES: failing_image_pairs.as_dict(), |
| + return { |
| + results.KEY__HEADER__RESULTS_ALL: all_image_pairs.as_dict( |
| + column_ids_in_order=ORDERED_COLUMN_IDS), |
| + results.KEY__HEADER__RESULTS_FAILURES: failing_image_pairs.as_dict( |
| + column_ids_in_order=ORDERED_COLUMN_IDS), |
| } |
| def _validate_dict_version(self, result_dict): |
| @@ -184,14 +248,15 @@ class RenderedPicturesComparisons(results.BaseComparisons): |
| raise Exception('expected header_revision %d, but got %d' % ( |
| expected_header_revision, header_revision)) |
| - def _create_image_pair(self, test, config, image_dict_A, image_dict_B): |
| + def _create_image_pair(self, image_dict_A, image_dict_B, source_skp_name, |
| + tilenum): |
| """Creates an ImagePair object for this pair of images. |
| Args: |
| - test: string; name of the test |
| - config: string; name of the config |
| image_dict_A: dict with JSONKEY_IMAGE_* keys, or None if no image |
| image_dict_B: dict with JSONKEY_IMAGE_* keys, or None if no image |
| + source_skp_name: string; name of the source SKP file |
| + tilenum: which tile, or None if a wholeimage |
| Returns: |
| An ImagePair object, or None if both image_dict_A and image_dict_B are |
| @@ -223,28 +288,45 @@ class RenderedPicturesComparisons(results.BaseComparisons): |
| result_type = results.KEY__RESULT_TYPE__FAILED |
| extra_columns_dict = { |
| - results.KEY__EXTRACOLUMNS__CONFIG: config, |
| - results.KEY__EXTRACOLUMNS__RESULT_TYPE: result_type, |
| - results.KEY__EXTRACOLUMNS__TEST: test, |
| - # TODO(epoger): Right now, the client UI crashes if it receives |
| - # results that do not include this column. |
| - # Until we fix that, keep the client happy. |
| - results.KEY__EXTRACOLUMNS__BUILDER: 'TODO', |
| + COLUMN__RESULT_TYPE: result_type, |
| + COLUMN__SOURCE_SKP: source_skp_name, |
| } |
| + if tilenum == None: |
| + extra_columns_dict[COLUMN__TILED_OR_WHOLE] = 'whole' |
|
rmistry
2014/08/04 20:52:50
Change 'whole' and 'tiled' into a module level con
epoger
2014/08/05 03:34:09
For these column VALUES (as opposed to KEYS), I'd
rmistry
2014/08/05 13:46:54
sgtm
|
| + extra_columns_dict[COLUMN__TILENUM] = 'N/A' |
| + else: |
| + extra_columns_dict[COLUMN__TILED_OR_WHOLE] = 'tiled' |
| + extra_columns_dict[COLUMN__TILENUM] = str(tilenum) |
| try: |
| return imagepair.ImagePair( |
| image_diff_db=self._image_diff_db, |
| - base_url=self._image_base_url, |
| + base_url=self._image_base_gs_url, |
| imageA_relative_url=imageA_relative_url, |
| imageB_relative_url=imageB_relative_url, |
| extra_columns=extra_columns_dict) |
| except (KeyError, TypeError): |
| logging.exception( |
| 'got exception while creating ImagePair for' |
| - ' test="%s", config="%s", urlPair=("%s","%s")' % ( |
| - test, config, imageA_relative_url, imageB_relative_url)) |
| + ' urlPair=("%s","%s"), source_skp_name="%s", tilenum="%s"' % ( |
| + imageA_relative_url, imageB_relative_url, source_skp_name, |
| + tilenum)) |
| return None |
| + def _copy_dir_contents(self, source_dir, dest_dir): |
| + """Copy all contents of source_dir into dest_dir, recursing into subdirs. |
| -# TODO(epoger): Add main() so this can be called by vm_run_skia_try.sh |
| + Args: |
| + source_dir: path to source dir (GS URL or local filepath) |
| + dest_dir: path to destination dir (local filepath) |
| + |
| + The copy operates as a "merge with overwrite": any files in source_dir will |
| + be "overlaid" on top of the existing content in dest_dir. Existing files |
| + with the same names will be overwritten. |
| + """ |
| + if gs_utils.GSUtils.is_gs_url(source_dir): |
| + (bucket, path) = gs_utils.GSUtils.split_gs_url(source_dir) |
| + self._gs.download_dir_contents(source_bucket=bucket, source_dir=path, |
| + dest_dir=dest_dir) |
| + else: |
| + shutil.copytree(source_dir, dest_dir) |