Index: cc/tiles/gpu_image_decode_controller.h |
diff --git a/cc/tiles/gpu_image_decode_controller.h b/cc/tiles/gpu_image_decode_controller.h |
index 390ef8ed972cbdc45ccc59cc5efe79c194edbc9f..2f21f8e039f728dc251f48186fa087ef8b699702 100644 |
--- a/cc/tiles/gpu_image_decode_controller.h |
+++ b/cc/tiles/gpu_image_decode_controller.h |
@@ -5,18 +5,56 @@ |
#ifndef CC_TILES_GPU_IMAGE_DECODE_CONTROLLER_H_ |
#define CC_TILES_GPU_IMAGE_DECODE_CONTROLLER_H_ |
+#include <list> |
vmpstr
2016/03/28 23:55:53
I don't see list usage... Or am I missing somethin
ericrk
2016/03/29 23:11:30
removed.
|
#include <unordered_map> |
#include <unordered_set> |
+#include <vector> |
+#include "base/containers/mru_cache.h" |
+#include "base/memory/discardable_memory.h" |
+#include "base/memory/scoped_ptr.h" |
#include "base/synchronization/lock.h" |
+#include "base/trace_event/memory_dump_provider.h" |
#include "cc/base/cc_export.h" |
+#include "cc/resources/resource_format.h" |
#include "cc/tiles/image_decode_controller.h" |
+#include "skia/ext/refptr.h" |
+ |
+class SkImageTextureData; |
namespace cc { |
+class ContextProvider; |
+ |
+// GpuImageDecodeController handles the decode and upload of images that will |
+// be used by Skia's GPU raster path. It also maintains a cache of these |
+// decoded/ uploaded images for later re-use. |
vmpstr
2016/03/28 23:55:53
either "decoded / uploaded" or "decoded/uploaded"
ericrk
2016/03/29 23:11:30
Done.
|
+// |
+// Generally, when an image is required for raster, GpuImageDecodeController |
+// creates two tasks, one to decode the image, and one to upload the image to |
+// the GPU. These tasks are completed before the raster task which depends on |
+// the image. We need to seperate decode and upload tasks, as decode can occur |
+// simultaneously on multiple threads, while upload requires the GL context |
+// lockmust happen on our non-concurrent raster thread. |
vmpstr
2016/03/28 23:55:53
s/lockmust/lock and must/
ericrk
2016/03/29 23:11:30
Done.
|
+// |
+// Decoded and Uploaded image data share a single cache entry. Depending on how |
+// far we've progressed, this cache entry may contain CPU-side decoded data, |
+// GPU-side uploaded data, or both. Because CPU-side decoded data is stored in |
+// discardable memory, and is only locked for short periods of time (until the |
+// upload completes), this memory is not counted against our sized cache |
+// limits. Uploaded GPU memory, being non- discardable, always counts against |
vmpstr
2016/03/28 23:55:53
s/non- discardable/non-discardable/ :P
ericrk
2016/03/29 23:11:30
Done.
|
+// our limits. |
+// |
+// In cases where the number of images needed exceeds our cache limits, we |
+// operate in an "at-raster" mode. In this mode, there are no decode/upload |
+// tasks, and images are decoded/uploaded as needed, immediately before being |
+// used in raster. Cache entries for at-raster tasks are marked as such, which |
+// prevents future tasks from taking a dependency on them and extending their |
+// lifetime longer than is necessary. |
class CC_EXPORT GpuImageDecodeController : public ImageDecodeController { |
public: |
- GpuImageDecodeController(); |
+ explicit GpuImageDecodeController(ContextProvider* context, |
+ ResourceFormat decode_format); |
~GpuImageDecodeController() override; |
// ImageDecodeController overrides. |
@@ -29,16 +67,113 @@ class CC_EXPORT GpuImageDecodeController : public ImageDecodeController { |
const DecodedDrawImage& decoded_image) override; |
void ReduceCacheUsage() override; |
+ // Called by Decode / Upload tasks. |
void DecodeImage(const DrawImage& image); |
+ void UploadImage(const DrawImage& image); |
+ void DecodeTaskCompleted(const DrawImage& image); |
+ void UploadTaskCompleted(const DrawImage& image); |
- void RemovePendingTaskForImage(const DrawImage& image); |
+ // For testing only. |
+ void SetCachedItemLimitForTesting(size_t limit) { |
+ cached_items_limit_ = limit; |
+ } |
+ void SetCachedBytesLimitForTesting(size_t limit) { |
+ cached_bytes_limit_ = limit; |
+ } |
+ size_t GetBytesUsedForTesting() const { return bytes_used_; } |
private: |
+ class ScopedRefImageLocked; |
+ |
+ enum class DecodedDataMode { GPU, CPU }; |
+ |
+ // Stores the CPU-side decoded bits of an image and supporting fields. |
+ struct DecodedImageData { |
+ DecodedImageData(); |
+ ~DecodedImageData(); |
+ |
+ // May be null if image not yet decoded. |
+ scoped_ptr<base::DiscardableMemory> data; |
+ uint32_t ref_count; |
+ bool is_locked; |
+ }; |
+ |
+ // Stores the GPU-side image and supporting fields. |
+ struct UploadedImageData { |
+ UploadedImageData(); |
+ ~UploadedImageData(); |
+ |
+ // May be null if image not yet uploaded / prepared. |
+ skia::RefPtr<SkImage> image; |
+ // True if the image is pending upload, in which case it counts against our |
+ // memory budget even though the uploaded_image does not yet exist. |
+ bool pending; |
+ uint32_t ref_count; |
+ }; |
+ |
+ struct ImageData { |
+ ImageData(DecodedDataMode mode, size_t size); |
+ ~ImageData(); |
+ |
+ const DecodedDataMode mode; |
+ const size_t size; |
+ bool is_at_raster; |
+ |
+ DecodedImageData decode; |
+ UploadedImageData upload; |
+ }; |
+ |
+ // Similar to GetTaskForImageAndRef, but gets the dependent decode task |
+ // rather than the upload task. |
vmpstr
2016/03/28 23:55:53
Can you add a comment to the GetTaskForImageAndRef
ericrk
2016/03/29 23:11:30
Done.
|
+ scoped_refptr<ImageDecodeTask> GetImageDecodeTaskAndRefWithClassLock( |
+ const DrawImage& image, |
+ uint64_t prepare_tiles_id); |
+ |
+ void RefImageDecodeWithClassLock(const DrawImage& draw_image); |
vmpstr
2016/03/28 23:55:53
Are all private functions WithClassLock? :) I don'
ericrk
2016/03/29 23:11:30
Done.
|
+ void UnrefImageDecodeWithClassLock(const DrawImage& draw_image); |
+ void RefImageWithClassLock(const DrawImage& draw_image); |
+ void UnrefImageWithClassLock(const DrawImage& draw_image); |
+ |
+ // Ensures that the cache can hold an element of |required_size|, freeing |
+ // unreferenced cache entries if necessary to make room. |
+ bool EnsureCapacityWithClassLock(size_t required_size); |
+ bool CanFitSizeWithClassLock(size_t size) const; |
+ bool CanFitCountWithClassLock(size_t num_elements) const; |
+ |
+ void DecodeImageIfNecessaryWithClassLock(const DrawImage& draw_image, |
+ ImageData* image_data); |
+ void UploadImageIfNecessaryWithClassAndContextLock( |
+ const DrawImage& draw_image, |
+ ImageData* image_data); |
+ void DeletePendingImagesWithClassAndContextLock(); |
+ |
+ scoped_ptr<GpuImageDecodeController::ImageData> CreateImageData( |
+ const DrawImage& image); |
+ SkImageInfo CreateImageInfoForDrawImage(const DrawImage& draw_image) const; |
+ |
+ const ResourceFormat format_; |
+ ContextProvider* context_; |
+ GrContextThreadSafeProxy* context_threadsafe_proxy_; |
+ |
+ // All members below this point must only be accessed while holding |lock_|. |
base::Lock lock_; |
- std::unordered_set<uint32_t> prerolled_images_; |
std::unordered_map<uint32_t, scoped_refptr<ImageDecodeTask>> |
- pending_image_tasks_; |
+ pending_image_upload_tasks_; |
+ std::unordered_map<uint32_t, scoped_refptr<ImageDecodeTask>> |
+ pending_image_decode_tasks_; |
+ |
+ using ImageDataMRUCache = base::MRUCache<uint32_t, scoped_ptr<ImageData>>; |
+ ImageDataMRUCache image_data_; |
+ |
+ size_t cached_items_limit_; |
+ size_t cached_bytes_limit_; |
+ size_t bytes_used_; |
+ |
+ // We can't release GPU backed SkImages without holding the Skia context lock, |
+ // so we add them to this list and defer deletion until the next time the lock |
+ // is held. |
+ std::vector<skia::RefPtr<SkImage>> images_pending_deletion_; |
}; |
} // namespace cc |