Chromium Code Reviews| Index: cc/tiles/gpu_image_decode_controller.cc |
| diff --git a/cc/tiles/gpu_image_decode_controller.cc b/cc/tiles/gpu_image_decode_controller.cc |
| index da9bd207010071c937c8c6490c1d6511db1b7ab8..c2684c9bf40fd2e6bc6c7241b13004692656859b 100644 |
| --- a/cc/tiles/gpu_image_decode_controller.cc |
| +++ b/cc/tiles/gpu_image_decode_controller.cc |
| @@ -4,35 +4,81 @@ |
| #include "cc/tiles/gpu_image_decode_controller.h" |
| +#include "base/memory/discardable_memory_allocator.h" |
| +#include "base/memory/scoped_ptr.h" |
| +#include "base/strings/stringprintf.h" |
| +#include "base/thread_task_runner_handle.h" |
| #include "cc/debug/devtools_instrumentation.h" |
| +#include "cc/output/context_provider.h" |
| #include "cc/raster/tile_task_runner.h" |
| +#include "gpu/command_buffer/client/context_support.h" |
| +#include "gpu/command_buffer/client/gles2_interface.h" |
| +#include "gpu_image_decode_controller.h" |
| #include "skia/ext/refptr.h" |
| +#include "skia/ext/texture_handle.h" |
| +#include "third_party/skia/include/core/SkCanvas.h" |
| +#include "third_party/skia/include/core/SkSurface.h" |
| +#include "third_party/skia/include/gpu/GrContext.h" |
| +#include "third_party/skia/include/gpu/GrTexture.h" |
| +#include "ui/gfx/skia_util.h" |
| +#include "ui/gl/trace_util.h" |
| namespace cc { |
| +namespace { |
| +static const int kMaxGpuImageBytes = 1024 * 1024 * 96; |
| +static const int kMaxDiscardableItems = 1000; |
| + |
| +// Returns true if an image would not be drawn and should therefore be |
| +// skipped rather than decoded. |
| +bool SkipImage(const DrawImage& draw_image) { |
| + if (!SkIRect::Intersects(draw_image.src_rect(), draw_image.image()->bounds())) |
| + return true; |
| + if (draw_image.matrix_is_decomposable() && |
|
vmpstr
2016/03/28 23:55:52
If matrix is not decomposable, then the scale is s
ericrk
2016/03/29 23:11:29
ah, good point.
|
| + (std::abs(draw_image.scale().width()) < |
| + std::numeric_limits<float>::epsilon() || |
| + std::abs(draw_image.scale().height()) < |
| + std::numeric_limits<float>::epsilon())) |
|
vmpstr
2016/03/28 23:55:53
Can you add braces for this if-statement please?
ericrk
2016/03/29 23:11:30
Done.
|
| + return true; |
| + return false; |
| +} |
| + |
| +SkImage::DeferredTextureImageUsageParams ParamsFromDrawImage( |
| + const DrawImage& draw_image) { |
| + SkImage::DeferredTextureImageUsageParams params; |
| + params.fMatrix = draw_image.matrix(); |
| + params.fQuality = draw_image.filter_quality(); |
| + |
| + return params; |
| +} |
| + |
| +} // namespace |
| + |
| +// Task which decodes an image and stores the result in discardable memory. |
| +// This task does not use GPU resources and can be run on any thread. |
| class ImageDecodeTaskImpl : public ImageDecodeTask { |
| public: |
| ImageDecodeTaskImpl(GpuImageDecodeController* controller, |
| - const DrawImage& image, |
| + const DrawImage& draw_image, |
| uint64_t source_prepare_tiles_id) |
| : controller_(controller), |
| - image_(image), |
| - image_ref_(skia::SharePtr(image.image())), |
| - source_prepare_tiles_id_(source_prepare_tiles_id) {} |
| + image_(draw_image), |
| + image_ref_(skia::SharePtr(draw_image.image())), |
| + source_prepare_tiles_id_(source_prepare_tiles_id) { |
| + DCHECK(!SkipImage(draw_image)); |
| + } |
| // Overridden from Task: |
| void RunOnWorkerThread() override { |
| TRACE_EVENT2("cc", "ImageDecodeTaskImpl::RunOnWorkerThread", "mode", "gpu", |
| "source_prepare_tiles_id", source_prepare_tiles_id_); |
| - devtools_instrumentation::ScopedImageDecodeTask image_decode_task( |
| - image_ref_.get()); |
| controller_->DecodeImage(image_); |
| } |
| // Overridden from TileTask: |
| void ScheduleOnOriginThread(TileTaskClient* client) override {} |
| void CompleteOnOriginThread(TileTaskClient* client) override { |
| - controller_->RemovePendingTaskForImage(image_); |
| + controller_->DecodeTaskCompleted(image_); |
| } |
| protected: |
| @@ -42,61 +88,590 @@ class ImageDecodeTaskImpl : public ImageDecodeTask { |
| GpuImageDecodeController* controller_; |
| DrawImage image_; |
| skia::RefPtr<const SkImage> image_ref_; |
| - uint64_t source_prepare_tiles_id_; |
| + const uint64_t source_prepare_tiles_id_; |
| DISALLOW_COPY_AND_ASSIGN(ImageDecodeTaskImpl); |
| }; |
| -GpuImageDecodeController::GpuImageDecodeController() {} |
| +// Task which creates an image from decoded data. Typically this involves |
| +// uploading data to the GPU, which requires this task be run on the non- |
| +// concurrent thread. |
| +class ImageUploadTaskImpl : public ImageDecodeTask { |
| + public: |
| + ImageUploadTaskImpl(GpuImageDecodeController* controller, |
| + const DrawImage& draw_image, |
| + scoped_refptr<ImageDecodeTask> decode_dependency, |
| + uint64_t source_prepare_tiles_id) |
| + : ImageDecodeTask(std::move(decode_dependency)), |
| + controller_(controller), |
| + image_(draw_image), |
| + image_ref_(skia::SharePtr(draw_image.image())), |
| + source_prepare_tiles_id_(source_prepare_tiles_id) { |
| + DCHECK(!SkipImage(draw_image)); |
| + } |
| + |
| + // Override from Task: |
| + void RunOnWorkerThread() override { |
| + TRACE_EVENT2("cc", "ImageUploadTaskImpl::RunOnWorkerThread", "mode", "gpu", |
| + "source_prepare_tiles_id", source_prepare_tiles_id_); |
| + controller_->UploadImage(image_); |
| + } |
| + |
| + void ScheduleOnOriginThread(TileTaskClient* client) override {} |
| + void CompleteOnOriginThread(TileTaskClient* client) override { |
| + controller_->UploadTaskCompleted(image_); |
| + } |
| + |
| + // Override from ImageDecodeTask: |
| + bool SupportsConcurrentExecution() const override { return false; } |
| + |
| + protected: |
| + ~ImageUploadTaskImpl() override {} |
| + |
| + private: |
| + GpuImageDecodeController* controller_; |
| + DrawImage image_; |
| + skia::RefPtr<const SkImage> image_ref_; |
| + uint64_t source_prepare_tiles_id_; |
| + |
| + DISALLOW_COPY_AND_ASSIGN(ImageUploadTaskImpl); |
| +}; |
| + |
| +GpuImageDecodeController::DecodedImageData::DecodedImageData() |
| + : ref_count(0), is_locked(false) {} |
| + |
| +GpuImageDecodeController::DecodedImageData::~DecodedImageData() = default; |
| + |
| +GpuImageDecodeController::UploadedImageData::UploadedImageData() |
| + : pending(false), ref_count(0) {} |
| + |
| +GpuImageDecodeController::UploadedImageData::~UploadedImageData() = default; |
| + |
| +GpuImageDecodeController::ImageData::ImageData(DecodedDataMode mode, |
| + size_t size) |
| + : mode(mode), size(size), is_at_raster(false) {} |
| + |
| +GpuImageDecodeController::ImageData::~ImageData() = default; |
| + |
| +GpuImageDecodeController::GpuImageDecodeController(ContextProvider* context, |
| + ResourceFormat decode_format) |
| + : format_(decode_format), |
| + context_(context), |
| + context_threadsafe_proxy_(context->GrContext()->threadSafeProxy()), |
| + image_data_(ImageDataMRUCache::NO_AUTO_EVICT), |
| + cached_items_limit_(kMaxDiscardableItems), |
| + cached_bytes_limit_(kMaxGpuImageBytes), |
| + bytes_used_(0) {} |
| GpuImageDecodeController::~GpuImageDecodeController() {} |
| bool GpuImageDecodeController::GetTaskForImageAndRef( |
| - const DrawImage& image, |
| + const DrawImage& draw_image, |
| uint64_t prepare_tiles_id, |
| scoped_refptr<ImageDecodeTask>* task) { |
| - auto image_id = image.image()->uniqueID(); |
| + if (SkipImage(draw_image)) { |
| + *task = nullptr; |
| + return false; |
| + } |
| + |
| base::AutoLock lock(lock_); |
| - if (prerolled_images_.count(image_id) != 0) { |
| + const auto image_id = draw_image.image()->uniqueID(); |
| + |
| + auto found = image_data_.Get(image_id); |
| + if (found != image_data_.end() && found->second->upload.image) { |
| + // The image is already uploaded. If it is not at-raster, or if it will now |
| + // fit in the cache (can be converted to not at-raster) ref and return. |
| + if (!found->second->is_at_raster || |
| + EnsureCapacityWithClassLock(found->second->size)) { |
| + found->second->is_at_raster = false; |
| + RefImageWithClassLock(draw_image); |
| + *task = nullptr; |
| + return true; |
| + } |
| + |
| + // Image is at-raster and doesn't fit in the cache. Keep using the image as |
| + // at-raster. |
| *task = nullptr; |
| return false; |
| } |
| + // We didn't have a pre-uploaded image. Try to find an existing upload task. |
| scoped_refptr<ImageDecodeTask>& existing_task = |
| - pending_image_tasks_[image_id]; |
| - if (!existing_task) { |
| - existing_task = make_scoped_refptr( |
| - new ImageDecodeTaskImpl(this, image, prepare_tiles_id)); |
| + pending_image_upload_tasks_[image_id]; |
| + if (existing_task) { |
| + // We had an existing upload task, ref to the image and return the task. |
|
vmpstr
2016/03/28 23:55:53
either "add ref to the image" or "ref the image"
ericrk
2016/03/29 23:11:30
Done.
|
| + RefImageWithClassLock(draw_image); |
| + *task = existing_task; |
| + return true; |
| + } |
| + |
| + // We will be creating a new upload task. If necessary, create a placeholder |
| + // ImageData to hold the result. |
| + scoped_ptr<ImageData> new_data; |
| + ImageData* data; |
| + if (found == image_data_.end()) { |
| + new_data = CreateImageData(draw_image); |
| + data = new_data.get(); |
| + } else { |
| + data = found->second.get(); |
| } |
| + |
| + // Ensure that the image we're about to decode/upload will fit in memory. |
| + // Otherwise do an at-raster decode. |
| + if (!EnsureCapacityWithClassLock(data->size)) { |
| + *task = nullptr; |
| + return false; |
| + } |
| + |
| + // If we had to create new image data, add it to our map now that we know it |
| + // will fit. |
| + if (new_data) |
| + found = image_data_.Put(image_id, std::move(new_data)); |
| + |
| + // Increment |bytes_used_| to reserve room for the pending image. As there is |
| + // no |image| created yet, mark the data as |pending|. This lets us account |
| + // for the image's size correctly if we cancel this task before uploading. |
| + DCHECK(!data->upload.pending); |
| + bytes_used_ += data->size; |
|
vmpstr
2016/03/28 23:55:53
There's some extremely rare but possible overflow
ericrk
2016/03/29 23:11:30
Updated CanFitSize to use checked math. Now that a
|
| + data->upload.pending = true; |
| + |
| + // Ref image and create an upload task. We will release this ref in |
|
vmpstr
2016/03/28 23:55:52
nit: create both upload and decode tasks.
ericrk
2016/03/29 23:11:30
Done.
|
| + // UploadTaskCompleted. |
| + RefImageWithClassLock(draw_image); |
| + existing_task = make_scoped_refptr(new ImageUploadTaskImpl( |
| + this, draw_image, |
| + GetImageDecodeTaskAndRefWithClassLock(draw_image, prepare_tiles_id), |
| + prepare_tiles_id)); |
| + |
| + RefImageWithClassLock(draw_image); |
|
vmpstr
2016/03/28 23:55:52
There's two Refs here (lines 240 and 246). Is that
ericrk
2016/03/29 23:11:30
This was intentional - one ref is kept alive by th
|
| *task = existing_task; |
| - return false; |
| + return true; |
| } |
| -void GpuImageDecodeController::UnrefImage(const DrawImage& image) { |
| - NOTREACHED(); |
| +void GpuImageDecodeController::UnrefImage(const DrawImage& draw_image) { |
| + base::AutoLock lock(lock_); |
| + UnrefImageWithClassLock(draw_image); |
| } |
| DecodedDrawImage GpuImageDecodeController::GetDecodedImageForDraw( |
| const DrawImage& draw_image) { |
| - return DecodedDrawImage(draw_image.image(), draw_image.filter_quality()); |
| + TRACE_EVENT0("cc", "GpuImageDecodeController::GetDecodedImageForDraw"); |
| + if (SkipImage(draw_image)) |
| + return DecodedDrawImage(nullptr, draw_image.filter_quality()); |
| + |
| + // We are being called during raster. The context lock must already be |
| + // acquired by the caller. |
| + context_->GetLock()->AssertAcquired(); |
|
vmpstr
2016/03/28 23:55:52
Since we have public functions that need context l
ericrk
2016/03/29 23:11:29
I agree.
|
| + base::AutoLock lock(lock_); |
| + |
| + const uint32_t unique_id = draw_image.image()->uniqueID(); |
| + auto found = image_data_.Peek(unique_id); |
| + if (found == image_data_.end()) { |
| + // We didn't find the image, we are doing at-raster decode. |
|
vmpstr
2016/03/28 23:55:53
Is it possible that the raster task started runnin
ericrk
2016/03/29 23:11:29
Going to just always assume at-raster here.
|
| + auto data = CreateImageData(draw_image); |
| + data->is_at_raster = true; |
| + DCHECK(!EnsureCapacityWithClassLock(data->size)); |
|
vmpstr
2016/03/28 23:55:52
I'm not sure if I asked you to add this, but since
ericrk
2016/03/29 23:11:30
Removed this, it works fine either way. We can inv
|
| + |
| + // Increment |bytes_used_| to reserve room for the pending image. As there |
| + // is no |image| created yet, mark the data as |pending|. |
| + bytes_used_ += data->size; |
|
vmpstr
2016/03/28 23:55:53
This is a little bit questionable for me. If this
ericrk
2016/03/29 23:11:30
Ah, fair point... I think a lot of stuff gets easi
|
| + data->upload.pending = true; |
| + |
| + found = image_data_.Put(unique_id, std::move(data)); |
| + } |
| + |
| + // Ref the image and decode so that they stay alive while we are |
| + // decoding/uploading. |
| + RefImageWithClassLock(draw_image); |
| + RefImageDecodeWithClassLock(draw_image); |
| + |
| + // We may or may not need to decode and upload the image we've found, the |
| + // following functions early-out to if we already decoded. |
| + DecodeImageIfNecessaryWithClassLock(draw_image, found->second.get()); |
| + UploadImageIfNecessaryWithClassAndContextLock(draw_image, |
| + found->second.get()); |
| + // Unref the image decode, but not the image. The image ref will be released |
| + // in DrawWithImageFinished. |
| + UnrefImageDecodeWithClassLock(draw_image); |
| + |
| + SkImage* image = found->second->upload.image.get(); |
| + DCHECK(image); |
| + |
| + DecodedDrawImage decoded_draw_image(image, draw_image.filter_quality()); |
| + decoded_draw_image.set_at_raster_decode(found->second->is_at_raster); |
| + return decoded_draw_image; |
| } |
| void GpuImageDecodeController::DrawWithImageFinished( |
| - const DrawImage& image, |
| - const DecodedDrawImage& decoded_image) {} |
| + const DrawImage& draw_image, |
| + const DecodedDrawImage& decoded_draw_image) { |
| + if (SkipImage(draw_image)) |
| + return; |
| -void GpuImageDecodeController::ReduceCacheUsage() {} |
| + base::AutoLock lock(lock_); |
| + UnrefImageWithClassLock(draw_image); |
| + |
| + // We are mid-draw and holding the context lock, ensure we clean up any |
| + // textures (especially at-raster), which may have just been marked for |
| + // deletion by UnrefImage. |
| + DeletePendingImagesWithClassAndContextLock(); |
| +} |
| -void GpuImageDecodeController::DecodeImage(const DrawImage& image) { |
| - image.image()->preroll(); |
| +void GpuImageDecodeController::ReduceCacheUsage() { |
| base::AutoLock lock(lock_); |
| - prerolled_images_.insert(image.image()->uniqueID()); |
| + EnsureCapacityWithClassLock(0); |
| } |
| -void GpuImageDecodeController::RemovePendingTaskForImage( |
| - const DrawImage& image) { |
| +void GpuImageDecodeController::DecodeImage(const DrawImage& draw_image) { |
| base::AutoLock lock(lock_); |
| - pending_image_tasks_.erase(image.image()->uniqueID()); |
| + auto found = image_data_.Peek(draw_image.image()->uniqueID()); |
| + DCHECK(found != image_data_.end()); |
| + DCHECK(!found->second->is_at_raster); |
| + DecodeImageIfNecessaryWithClassLock(draw_image, found->second.get()); |
| +} |
| + |
| +void GpuImageDecodeController::UploadImage(const DrawImage& draw_image) { |
| + ContextProvider::ScopedContextLock context_lock(context_); |
| + base::AutoLock lock(lock_); |
| + auto found = image_data_.Peek(draw_image.image()->uniqueID()); |
| + DCHECK(found != image_data_.end()); |
| + DCHECK(!found->second->is_at_raster); |
| + |
| + UploadImageIfNecessaryWithClassAndContextLock(draw_image, |
| + found->second.get()); |
| +} |
| + |
| +void GpuImageDecodeController::DecodeTaskCompleted( |
| + const DrawImage& draw_image) { |
| + base::AutoLock lock(lock_); |
| + // Decode task is complete, remove it from our list of pending tasks. |
| + pending_image_decode_tasks_.erase(draw_image.image()->uniqueID()); |
| + |
| + // While the decode task is active, we keep a ref on the decoded data. |
| + // Release that ref now that the decode is completed. |
| + UnrefImageDecodeWithClassLock(draw_image); |
| +} |
| + |
| +void GpuImageDecodeController::UploadTaskCompleted( |
| + const DrawImage& draw_image) { |
| + base::AutoLock lock(lock_); |
| + const uint32_t image_id = draw_image.image()->uniqueID(); |
| + // Upload task is complete, remove it from our list of pending tasks. |
| + pending_image_upload_tasks_.erase(image_id); |
| + |
| + // If the image was never uploaded (|pending| is true), we need to subtract |
| + // the pending bytes. |
|
vmpstr
2016/03/28 23:55:52
Otherwise, I guess the bytes are still accounted f
ericrk
2016/03/29 23:11:30
Moved all memory tracking to RefCountChanged/Ensur
|
| + auto found = image_data_.Peek(image_id); |
| + DCHECK(found != image_data_.end()); |
| + if (found->second->upload.pending) { |
| + bytes_used_ -= found->second->size; |
| + found->second->upload.pending = false; |
| + } |
| + |
| + // While the upload task is active, we keep a ref on both the image it will be |
| + // populating, as well as the decode it needs to populate it. Release these |
| + // refs now. |
| + UnrefImageDecodeWithClassLock(draw_image); |
| + UnrefImageWithClassLock(draw_image); |
| +} |
| + |
| +// Checks if an existing image decode exists. If not, returns a task to produce |
|
vmpstr
2016/03/28 23:55:53
I guess normally it's not possible that we're tryi
ericrk
2016/03/29 23:11:30
We create this when we create the upload task - so
|
| +// the requested decode. |
| +scoped_refptr<ImageDecodeTask> |
| +GpuImageDecodeController::GetImageDecodeTaskAndRefWithClassLock( |
| + const DrawImage& draw_image, |
| + uint64_t prepare_tiles_id) { |
| + lock_.AssertAcquired(); |
| + uint32_t image_id = draw_image.image()->uniqueID(); |
| + // This ref is kept alive while an upload task may need this decode. We |
| + // release this ref in UploadTaskCompleted. |
| + RefImageDecodeWithClassLock(draw_image); |
| + |
| + auto found = image_data_.Peek(image_id); |
| + if (found != image_data_.end() && found->second->decode.is_locked) { |
| + // We should never be creating a decode task for an at raster image. |
| + DCHECK(!found->second->is_at_raster); |
| + // We should never be creating a decode for an already-uploaded image. |
| + DCHECK(!found->second->upload.image); |
| + return nullptr; |
| + } |
| + |
| + scoped_refptr<ImageDecodeTask>& existing_task = |
| + pending_image_decode_tasks_[image_id]; |
| + if (!existing_task) { |
| + // Ref image and create a decode task. This ref will be released in |
| + // DecodeTaskCompleted. |
| + RefImageDecodeWithClassLock(draw_image); |
| + existing_task = make_scoped_refptr( |
| + new ImageDecodeTaskImpl(this, draw_image, prepare_tiles_id)); |
| + } |
| + return existing_task; |
| +} |
| + |
| +void GpuImageDecodeController::RefImageDecodeWithClassLock( |
| + const DrawImage& draw_image) { |
| + lock_.AssertAcquired(); |
| + auto found = image_data_.Peek(draw_image.image()->uniqueID()); |
| + DCHECK(found != image_data_.end()); |
| + // Sanity-check our ref-count. |
| + DCHECK_GE(found->second->decode.ref_count, 0u); |
| + ++found->second->decode.ref_count; |
| +} |
| + |
| +void GpuImageDecodeController::UnrefImageDecodeWithClassLock( |
| + const DrawImage& draw_image) { |
| + lock_.AssertAcquired(); |
| + auto found = image_data_.Peek(draw_image.image()->uniqueID()); |
| + DCHECK(found != image_data_.end()); |
| + DCHECK_GT(found->second->decode.ref_count, 0u); |
| + --found->second->decode.ref_count; |
| + if (found->second->decode.ref_count == 0) { |
| + if (found->second->decode.is_locked) { |
| + found->second->decode.is_locked = false; |
| + found->second->decode.data->Unlock(); |
| + } |
| + EnsureCapacityWithClassLock(0); |
| + } |
| +} |
| + |
| +void GpuImageDecodeController::RefImageWithClassLock( |
| + const DrawImage& draw_image) { |
| + lock_.AssertAcquired(); |
| + auto image_id = draw_image.image()->uniqueID(); |
| + auto found = image_data_.Peek(image_id); |
| + DCHECK(found != image_data_.end()); |
| + ++found->second->upload.ref_count; |
| +} |
| + |
| +void GpuImageDecodeController::UnrefImageWithClassLock( |
| + const DrawImage& draw_image) { |
| + lock_.AssertAcquired(); |
| + auto image_id = draw_image.image()->uniqueID(); |
| + auto found = image_data_.Peek(image_id); |
| + DCHECK(found != image_data_.end()); |
| + DCHECK_GT(found->second->upload.ref_count, 0u); |
| + --found->second->upload.ref_count; |
| + if (found->second->upload.ref_count == 0) { |
| + // If the ref count has reached 0, the image should no longer be pending. |
| + DCHECK(!found->second->upload.pending); |
| + |
| + // If this image was created as at-raster, it may now fit into the cache. |
| + // If so, we'd like to convert it to non-at-raster. Otherwise we'd like to |
| + // delete the image. |
| + // |
| + // We can handle both cases by converting the image into non-at-raster and |
| + // then calling EnsureCapacityWithClassLock. If the image now fits, |
| + // EnsureCapacityWithClassLock will leave it in the cache, otherwise it |
| + // will be removed (independent of the at-raster flag). |
| + found->second->is_at_raster = false; |
| + EnsureCapacityWithClassLock(0); |
| + } |
| +} |
| + |
| +// Ensures that we can fit a new image of size |required_size| in our cache. In |
| +// doing so, this function will free unreferenced image data as necessary to |
| +// create rooom. |
| +bool GpuImageDecodeController::EnsureCapacityWithClassLock( |
| + size_t required_size) { |
| + lock_.AssertAcquired(); |
| + const size_t new_elements = required_size ? 1 : 0; |
| + if (CanFitSizeWithClassLock(required_size) && |
| + CanFitCountWithClassLock(new_elements)) |
| + return true; |
| + |
| + // While we are over memory or item capacity, we iterate through our set of |
| + // cached image data in LRU order. For each image, we can do two things: |
| + // 1) We can free the uploaded image, reducing the memory usage of the cache |
| + // and 2) we can remove the entry entirely, reducing the count of elements in |
| + // the cache. |
| + for (auto it = image_data_.rbegin(); it != image_data_.rend();) { |
| + if (it->second->decode.ref_count != 0 || |
| + it->second->upload.ref_count != 0) { |
| + ++it; |
| + continue; |
| + } |
| + |
| + // Current entry has no refs. Ensure it is not locked or pending. |
| + DCHECK(!it->second->decode.is_locked); |
| + DCHECK(!it->second->upload.pending); |
| + |
| + // Free the uploaded image if possible. |
| + if (it->second->upload.image) { |
| + DCHECK_GE(bytes_used_, it->second->size); |
| + bytes_used_ -= it->second->size; |
| + images_pending_deletion_.push_back(std::move(it->second->upload.image)); |
| + it->second->upload.image = nullptr; |
| + } |
| + |
| + // Free the entire entry if necessary. |
| + if (!CanFitCountWithClassLock(new_elements)) { |
| + it = image_data_.Erase(it); |
| + } else { |
| + ++it; |
| + } |
| + |
| + if (CanFitSizeWithClassLock(required_size) && |
| + CanFitCountWithClassLock(new_elements)) |
| + return true; |
| + } |
| + |
| + // We couldn't reach a state where we are under our memory / count limits. |
| + return false; |
| +} |
| + |
| +bool GpuImageDecodeController::CanFitSizeWithClassLock(size_t size) const { |
| + return bytes_used_ + size <= cached_bytes_limit_; |
|
vmpstr
2016/03/28 23:55:52
There's a possible overflow here which would erron
ericrk
2016/03/29 23:11:30
Added safe math. This function is used to gate all
|
| +} |
| + |
| +bool GpuImageDecodeController::CanFitCountWithClassLock( |
| + size_t num_elements) const { |
| + return image_data_.size() + num_elements <= cached_items_limit_; |
| +} |
| + |
| +void GpuImageDecodeController::DecodeImageIfNecessaryWithClassLock( |
| + const DrawImage& draw_image, |
| + ImageData* image_data) { |
| + lock_.AssertAcquired(); |
| + |
| + if (image_data->upload.image) { |
| + // We already have an uploaded image, no reason to decode. |
| + return; |
| + } |
| + |
| + if (image_data->decode.data && |
| + (image_data->decode.is_locked || image_data->decode.data->Lock())) { |
| + // We already decoded this, or we just needed to lock, early out. |
| + image_data->decode.is_locked = true; |
| + return; |
| + } |
| + |
| + TRACE_EVENT0("cc", "GpuImageDecodeController::DecodeImage"); |
| + DCHECK_GT(image_data->decode.ref_count, 0u); |
| + |
| + image_data->decode.data = nullptr; |
| + scoped_ptr<base::DiscardableMemory> backing_memory; |
| + { |
| + base::AutoUnlock unlock(lock_); |
| + switch (image_data->mode) { |
| + case DecodedDataMode::CPU: { |
| + backing_memory = |
| + base::DiscardableMemoryAllocator::GetInstance() |
| + ->AllocateLockedDiscardableMemory(image_data->size); |
| + SkImageInfo image_info = CreateImageInfoForDrawImage(draw_image); |
| + if (!draw_image.image()->readPixels(image_info, backing_memory->data(), |
| + image_info.minRowBytes(), 0, 0, |
| + SkImage::kDisallow_CachingHint)) { |
| + backing_memory->Unlock(); |
| + return; |
| + } |
| + break; |
| + } |
| + case DecodedDataMode::GPU: { |
| + backing_memory = |
| + base::DiscardableMemoryAllocator::GetInstance() |
| + ->AllocateLockedDiscardableMemory(image_data->size); |
| + auto params = ParamsFromDrawImage(draw_image); |
| + if (!draw_image.image()->getDeferredTextureImageData( |
| + *context_threadsafe_proxy_, ¶ms, 1, |
| + backing_memory->data())) { |
| + backing_memory->Unlock(); |
| + return; |
| + } |
| + break; |
| + } |
| + } |
| + } |
| + |
| + if (!image_data->decode.data) { |
|
vmiura
2016/03/29 01:33:17
Can this be unconditional, since image_data->decod
ericrk
2016/03/29 23:11:30
Not in this case, as we unlock during the actual d
|
| + image_data->decode.data = std::move(backing_memory); |
| + DCHECK(!image_data->decode.is_locked); |
| + image_data->decode.is_locked = true; |
| + } |
| +} |
| + |
| +void GpuImageDecodeController::UploadImageIfNecessaryWithClassAndContextLock( |
| + const DrawImage& draw_image, |
| + ImageData* image_data) { |
| + context_->GetLock()->AssertAcquired(); |
| + lock_.AssertAcquired(); |
| + |
| + if (image_data->upload.image) { |
| + // Someone has uploaded this image before us (at raster). |
| + return; |
| + } |
| + |
| + TRACE_EVENT0("cc", "GpuImageDecodeController::UploadImage"); |
| + DCHECK(image_data->decode.is_locked); |
| + DCHECK_GT(image_data->decode.ref_count, 0u); |
| + DCHECK_GT(image_data->upload.ref_count, 0u); |
| + |
| + // We are about to upload a new image and are holding the context lock. |
| + // Ensure that any images which have been marked for deletion are actually |
| + // cleaned up so we don't exceed our memory limit during this upload. |
| + DeletePendingImagesWithClassAndContextLock(); |
| + |
| + skia::RefPtr<SkImage> uploaded_image; |
| + { |
| + base::AutoUnlock unlock(lock_); |
| + switch (image_data->mode) { |
| + case DecodedDataMode::CPU: { |
| + SkImageInfo image_info = CreateImageInfoForDrawImage(draw_image); |
| + uploaded_image = skia::AdoptRef(SkImage::NewFromRaster( |
| + image_info, image_data->decode.data->data(), |
| + image_info.minRowBytes(), [](const void*, void*) {}, nullptr)); |
| + break; |
| + } |
| + case DecodedDataMode::GPU: { |
| + uploaded_image = |
| + skia::AdoptRef(SkImage::NewFromDeferredTextureImageData( |
| + context_->GrContext(), image_data->decode.data->data(), |
| + SkBudgeted::kNo)); |
| + break; |
| + } |
| + } |
| + } |
| + DCHECK(uploaded_image); |
| + |
| + // At-raster may have decoded this while we were unlocked. If so, ignore our |
| + // result. |
| + if (!image_data->upload.image) { |
| + image_data->upload.image = std::move(uploaded_image); |
| + DCHECK(image_data->upload.pending); |
| + image_data->upload.pending = false; |
| + } |
| +} |
| + |
| +scoped_ptr<GpuImageDecodeController::ImageData> |
| +GpuImageDecodeController::CreateImageData(const DrawImage& draw_image) { |
| + DecodedDataMode mode; |
| + SkImageInfo info = CreateImageInfoForDrawImage(draw_image); |
| + SkImage::DeferredTextureImageUsageParams params = |
| + ParamsFromDrawImage(draw_image); |
| + size_t data_size = draw_image.image()->getDeferredTextureImageData( |
| + *context_threadsafe_proxy_, ¶ms, 1, nullptr); |
| + |
| + if (data_size == 0) { |
| + // Can't upload image, too large or other failure. Try to use SW fallback. |
| + data_size = info.getSafeSize(info.minRowBytes()); |
| + mode = DecodedDataMode::CPU; |
| + } else { |
| + mode = DecodedDataMode::GPU; |
| + } |
| + |
| + return make_scoped_ptr(new ImageData(mode, data_size)); |
| +} |
| + |
| +void GpuImageDecodeController::DeletePendingImagesWithClassAndContextLock() { |
| + context_->GetLock()->AssertAcquired(); |
| + lock_.AssertAcquired(); |
| + images_pending_deletion_.clear(); |
| +} |
| + |
| +SkImageInfo GpuImageDecodeController::CreateImageInfoForDrawImage( |
| + const DrawImage& draw_image) const { |
| + return SkImageInfo::Make( |
| + draw_image.image()->width(), draw_image.image()->height(), |
| + ResourceFormatToClosestSkColorType(format_), kPremul_SkAlphaType); |
| } |
| } // namespace cc |