Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "cc/tiles/gpu_image_decode_controller.h" | 5 #include "cc/tiles/gpu_image_decode_controller.h" |
| 6 | 6 |
| 7 #include "base/memory/discardable_memory_allocator.h" | |
| 8 #include "base/memory/scoped_ptr.h" | |
| 9 #include "base/numerics/safe_math.h" | |
| 10 #include "base/strings/stringprintf.h" | |
| 11 #include "base/thread_task_runner_handle.h" | |
| 7 #include "cc/debug/devtools_instrumentation.h" | 12 #include "cc/debug/devtools_instrumentation.h" |
| 13 #include "cc/output/context_provider.h" | |
| 8 #include "cc/raster/tile_task_runner.h" | 14 #include "cc/raster/tile_task_runner.h" |
| 15 #include "gpu/command_buffer/client/context_support.h" | |
| 16 #include "gpu/command_buffer/client/gles2_interface.h" | |
| 17 #include "gpu_image_decode_controller.h" | |
| 9 #include "skia/ext/refptr.h" | 18 #include "skia/ext/refptr.h" |
| 19 #include "skia/ext/texture_handle.h" | |
| 20 #include "third_party/skia/include/core/SkCanvas.h" | |
| 21 #include "third_party/skia/include/core/SkSurface.h" | |
| 22 #include "third_party/skia/include/gpu/GrContext.h" | |
| 23 #include "third_party/skia/include/gpu/GrTexture.h" | |
| 24 #include "ui/gfx/skia_util.h" | |
| 25 #include "ui/gl/trace_util.h" | |
| 10 | 26 |
| 11 namespace cc { | 27 namespace cc { |
| 28 namespace { | |
| 12 | 29 |
| 30 static const int kMaxGpuImageBytes = 1024 * 1024 * 96; | |
| 31 static const int kMaxDiscardableItems = 2000; | |
| 32 | |
| 33 // Returns true if an image would not be drawn and should therefore be | |
| 34 // skipped rather than decoded. | |
| 35 bool SkipImage(const DrawImage& draw_image) { | |
| 36 if (!SkIRect::Intersects(draw_image.src_rect(), draw_image.image()->bounds())) | |
| 37 return true; | |
| 38 if (std::abs(draw_image.scale().width()) < | |
| 39 std::numeric_limits<float>::epsilon() || | |
| 40 std::abs(draw_image.scale().height()) < | |
| 41 std::numeric_limits<float>::epsilon()) { | |
| 42 return true; | |
| 43 } | |
| 44 return false; | |
| 45 } | |
| 46 | |
| 47 SkImage::DeferredTextureImageUsageParams ParamsFromDrawImage( | |
| 48 const DrawImage& draw_image) { | |
| 49 SkImage::DeferredTextureImageUsageParams params; | |
| 50 params.fMatrix = draw_image.matrix(); | |
| 51 params.fQuality = draw_image.filter_quality(); | |
| 52 | |
| 53 return params; | |
| 54 } | |
| 55 | |
| 56 } // namespace | |
| 57 | |
| 58 // Task which decodes an image and stores the result in discardable memory. | |
| 59 // This task does not use GPU resources and can be run on any thread. | |
| 13 class ImageDecodeTaskImpl : public ImageDecodeTask { | 60 class ImageDecodeTaskImpl : public ImageDecodeTask { |
| 14 public: | 61 public: |
| 15 ImageDecodeTaskImpl(GpuImageDecodeController* controller, | 62 ImageDecodeTaskImpl(GpuImageDecodeController* controller, |
| 16 const DrawImage& image, | 63 const DrawImage& draw_image, |
| 17 uint64_t source_prepare_tiles_id) | 64 uint64_t source_prepare_tiles_id) |
| 18 : controller_(controller), | 65 : controller_(controller), |
| 19 image_(image), | 66 image_(draw_image), |
| 20 image_ref_(skia::SharePtr(image.image())), | 67 image_ref_(skia::SharePtr(draw_image.image())), |
| 21 source_prepare_tiles_id_(source_prepare_tiles_id) {} | 68 source_prepare_tiles_id_(source_prepare_tiles_id) { |
| 69 DCHECK(!SkipImage(draw_image)); | |
| 70 } | |
| 22 | 71 |
| 23 // Overridden from Task: | 72 // Overridden from Task: |
| 24 void RunOnWorkerThread() override { | 73 void RunOnWorkerThread() override { |
| 25 TRACE_EVENT2("cc", "ImageDecodeTaskImpl::RunOnWorkerThread", "mode", "gpu", | 74 TRACE_EVENT2("cc", "ImageDecodeTaskImpl::RunOnWorkerThread", "mode", "gpu", |
| 26 "source_prepare_tiles_id", source_prepare_tiles_id_); | 75 "source_prepare_tiles_id", source_prepare_tiles_id_); |
| 27 devtools_instrumentation::ScopedImageDecodeTask image_decode_task( | |
| 28 image_ref_.get()); | |
| 29 controller_->DecodeImage(image_); | 76 controller_->DecodeImage(image_); |
| 30 } | 77 } |
| 31 | 78 |
| 32 // Overridden from TileTask: | 79 // Overridden from TileTask: |
| 33 void ScheduleOnOriginThread(TileTaskClient* client) override {} | 80 void ScheduleOnOriginThread(TileTaskClient* client) override {} |
| 34 void CompleteOnOriginThread(TileTaskClient* client) override { | 81 void CompleteOnOriginThread(TileTaskClient* client) override { |
| 35 controller_->RemovePendingTaskForImage(image_); | 82 controller_->DecodeTaskCompleted(image_); |
| 36 } | 83 } |
| 37 | 84 |
| 38 protected: | 85 protected: |
| 39 ~ImageDecodeTaskImpl() override {} | 86 ~ImageDecodeTaskImpl() override {} |
| 40 | 87 |
| 41 private: | 88 private: |
| 42 GpuImageDecodeController* controller_; | 89 GpuImageDecodeController* controller_; |
| 43 DrawImage image_; | 90 DrawImage image_; |
| 44 skia::RefPtr<const SkImage> image_ref_; | 91 skia::RefPtr<const SkImage> image_ref_; |
| 92 const uint64_t source_prepare_tiles_id_; | |
| 93 | |
| 94 DISALLOW_COPY_AND_ASSIGN(ImageDecodeTaskImpl); | |
| 95 }; | |
| 96 | |
| 97 // Task which creates an image from decoded data. Typically this involves | |
| 98 // uploading data to the GPU, which requires this task be run on the non- | |
| 99 // concurrent thread. | |
| 100 class ImageUploadTaskImpl : public ImageDecodeTask { | |
| 101 public: | |
| 102 ImageUploadTaskImpl(GpuImageDecodeController* controller, | |
| 103 const DrawImage& draw_image, | |
| 104 scoped_refptr<ImageDecodeTask> decode_dependency, | |
| 105 uint64_t source_prepare_tiles_id) | |
| 106 : ImageDecodeTask(std::move(decode_dependency)), | |
| 107 controller_(controller), | |
| 108 image_(draw_image), | |
| 109 image_ref_(skia::SharePtr(draw_image.image())), | |
| 110 source_prepare_tiles_id_(source_prepare_tiles_id) { | |
| 111 DCHECK(!SkipImage(draw_image)); | |
| 112 } | |
| 113 | |
| 114 // Override from Task: | |
| 115 void RunOnWorkerThread() override { | |
| 116 TRACE_EVENT2("cc", "ImageUploadTaskImpl::RunOnWorkerThread", "mode", "gpu", | |
| 117 "source_prepare_tiles_id", source_prepare_tiles_id_); | |
| 118 controller_->UploadImage(image_); | |
| 119 } | |
| 120 | |
| 121 void ScheduleOnOriginThread(TileTaskClient* client) override {} | |
| 122 void CompleteOnOriginThread(TileTaskClient* client) override { | |
| 123 controller_->UploadTaskCompleted(image_); | |
| 124 } | |
| 125 | |
| 126 // Override from ImageDecodeTask: | |
| 127 bool SupportsConcurrentExecution() const override { return false; } | |
| 128 | |
| 129 protected: | |
| 130 ~ImageUploadTaskImpl() override {} | |
| 131 | |
| 132 private: | |
| 133 GpuImageDecodeController* controller_; | |
| 134 DrawImage image_; | |
| 135 skia::RefPtr<const SkImage> image_ref_; | |
| 45 uint64_t source_prepare_tiles_id_; | 136 uint64_t source_prepare_tiles_id_; |
| 46 | 137 |
| 47 DISALLOW_COPY_AND_ASSIGN(ImageDecodeTaskImpl); | 138 DISALLOW_COPY_AND_ASSIGN(ImageUploadTaskImpl); |
| 48 }; | 139 }; |
| 49 | 140 |
| 50 GpuImageDecodeController::GpuImageDecodeController() {} | 141 GpuImageDecodeController::DecodedImageData::DecodedImageData() |
| 51 | 142 : ref_count(0), is_locked(false), decode_failure(false) {} |
| 52 GpuImageDecodeController::~GpuImageDecodeController() {} | 143 |
| 144 GpuImageDecodeController::DecodedImageData::~DecodedImageData() = default; | |
| 145 | |
| 146 GpuImageDecodeController::UploadedImageData::UploadedImageData() | |
| 147 : budgeted(false), ref_count(0) {} | |
| 148 | |
| 149 GpuImageDecodeController::UploadedImageData::~UploadedImageData() = default; | |
| 150 | |
| 151 GpuImageDecodeController::ImageData::ImageData(DecodedDataMode mode, | |
| 152 size_t size) | |
| 153 : mode(mode), size(size), is_at_raster(false) {} | |
| 154 | |
| 155 GpuImageDecodeController::ImageData::~ImageData() = default; | |
| 156 | |
| 157 GpuImageDecodeController::GpuImageDecodeController(ContextProvider* context, | |
| 158 ResourceFormat decode_format) | |
| 159 : format_(decode_format), | |
| 160 context_(context), | |
| 161 context_threadsafe_proxy_( | |
| 162 skia::AdoptRef(context->GrContext()->threadSafeProxy())), | |
| 163 image_data_(ImageDataMRUCache::NO_AUTO_EVICT), | |
| 164 cached_items_limit_(kMaxDiscardableItems), | |
| 165 cached_bytes_limit_(kMaxGpuImageBytes), | |
| 166 bytes_used_(0) {} | |
| 167 | |
| 168 GpuImageDecodeController::~GpuImageDecodeController() { | |
| 169 // SetShouldAggressivelyFreeResources will zero our limits and free all | |
| 170 // outstanding image memory. | |
| 171 SetShouldAggressivelyFreeResources(true); | |
| 172 } | |
| 53 | 173 |
| 54 bool GpuImageDecodeController::GetTaskForImageAndRef( | 174 bool GpuImageDecodeController::GetTaskForImageAndRef( |
| 55 const DrawImage& image, | 175 const DrawImage& draw_image, |
| 56 uint64_t prepare_tiles_id, | 176 uint64_t prepare_tiles_id, |
| 57 scoped_refptr<ImageDecodeTask>* task) { | 177 scoped_refptr<ImageDecodeTask>* task) { |
| 58 auto image_id = image.image()->uniqueID(); | 178 if (SkipImage(draw_image)) { |
| 59 base::AutoLock lock(lock_); | |
| 60 if (prerolled_images_.count(image_id) != 0) { | |
| 61 *task = nullptr; | 179 *task = nullptr; |
| 62 return false; | 180 return false; |
| 63 } | 181 } |
| 64 | 182 |
| 183 base::AutoLock lock(lock_); | |
| 184 const auto image_id = draw_image.image()->uniqueID(); | |
| 185 | |
| 186 auto found = image_data_.Get(image_id); | |
| 187 if (found != image_data_.end()) { | |
| 188 ImageData* image_data = found->second.get(); | |
| 189 if (image_data->is_at_raster) { | |
| 190 // Image is at-raster, just return, this usage will be at-raster as well. | |
| 191 *task = nullptr; | |
| 192 return false; | |
| 193 } | |
| 194 | |
| 195 if (image_data->upload.image) { | |
| 196 // The image is already uploaded, ref and return. | |
| 197 RefImage(draw_image); | |
| 198 *task = nullptr; | |
| 199 return true; | |
| 200 } | |
| 201 } | |
| 202 | |
| 203 // We didn't have a pre-uploaded image, so we need an upload task. Try to find | |
| 204 // an existing one. | |
| 65 scoped_refptr<ImageDecodeTask>& existing_task = | 205 scoped_refptr<ImageDecodeTask>& existing_task = |
| 66 pending_image_tasks_[image_id]; | 206 pending_image_upload_tasks_[image_id]; |
| 67 if (!existing_task) { | 207 if (existing_task) { |
| 68 existing_task = make_scoped_refptr( | 208 // We had an existing upload task, ref the image and return the task. |
| 69 new ImageDecodeTaskImpl(this, image, prepare_tiles_id)); | 209 RefImage(draw_image); |
| 70 } | 210 *task = existing_task; |
| 211 return true; | |
| 212 } | |
| 213 | |
| 214 // We will be creating a new upload task. If necessary, create a placeholder | |
| 215 // ImageData to hold the result. | |
| 216 scoped_ptr<ImageData> new_data; | |
| 217 ImageData* data; | |
| 218 if (found == image_data_.end()) { | |
| 219 new_data = CreateImageData(draw_image); | |
| 220 data = new_data.get(); | |
| 221 } else { | |
| 222 data = found->second.get(); | |
| 223 } | |
| 224 | |
| 225 // Ensure that the image we're about to decode/upload will fit in memory. | |
| 226 if (!EnsureCapacity(data->size)) { | |
| 227 // Image will not fit, do an at-raster decode. | |
| 228 *task = nullptr; | |
| 229 return false; | |
| 230 } | |
| 231 | |
| 232 // If we had to create new image data, add it to our map now that we know it | |
| 233 // will fit. | |
| 234 if (new_data) | |
| 235 found = image_data_.Put(image_id, std::move(new_data)); | |
| 236 | |
| 237 // Ref image and create a upload and decode tasks. We will release this ref | |
| 238 // in UploadTaskCompleted. | |
| 239 RefImage(draw_image); | |
| 240 existing_task = make_scoped_refptr(new ImageUploadTaskImpl( | |
| 241 this, draw_image, GetImageDecodeTaskAndRef(draw_image, prepare_tiles_id), | |
| 242 prepare_tiles_id)); | |
| 243 | |
| 244 // Ref the image again - this ref is owned by the caller, and it is their | |
| 245 // responsibility to release it by calling UnrefImage. | |
| 246 RefImage(draw_image); | |
| 71 *task = existing_task; | 247 *task = existing_task; |
| 72 return false; | 248 return true; |
| 73 } | 249 } |
| 74 | 250 |
| 75 void GpuImageDecodeController::UnrefImage(const DrawImage& image) { | 251 void GpuImageDecodeController::UnrefImage(const DrawImage& draw_image) { |
| 76 NOTREACHED(); | 252 base::AutoLock lock(lock_); |
| 253 UnrefImageInternal(draw_image); | |
| 77 } | 254 } |
| 78 | 255 |
| 79 DecodedDrawImage GpuImageDecodeController::GetDecodedImageForDraw( | 256 DecodedDrawImage GpuImageDecodeController::GetDecodedImageForDraw( |
| 80 const DrawImage& draw_image) { | 257 const DrawImage& draw_image) { |
| 81 return DecodedDrawImage(draw_image.image(), draw_image.filter_quality()); | 258 // We are being called during raster. The context lock must already be |
| 259 // acquired by the caller. | |
| 260 context_->GetLock()->AssertAcquired(); | |
| 261 | |
| 262 if (SkipImage(draw_image)) | |
| 263 return DecodedDrawImage(nullptr, draw_image.filter_quality()); | |
| 264 | |
| 265 TRACE_EVENT0("cc", "GpuImageDecodeController::GetDecodedImageForDraw"); | |
| 266 | |
| 267 base::AutoLock lock(lock_); | |
| 268 const uint32_t unique_id = draw_image.image()->uniqueID(); | |
| 269 auto found = image_data_.Peek(unique_id); | |
| 270 if (found == image_data_.end()) { | |
| 271 // We didn't find the image, create a new entry. | |
| 272 auto data = CreateImageData(draw_image); | |
| 273 found = image_data_.Put(unique_id, std::move(data)); | |
| 274 } | |
| 275 | |
| 276 ImageData* image_data = found->second.get(); | |
| 277 | |
| 278 if (!image_data->upload.budgeted) { | |
| 279 // If image data is not budgeted by this point, it is at-raster. | |
| 280 image_data->is_at_raster = true; | |
| 281 } | |
| 282 | |
| 283 // Ref the image and decode so that they stay alive while we are | |
| 284 // decoding/uploading. | |
| 285 RefImage(draw_image); | |
| 286 RefImageDecode(draw_image); | |
| 287 | |
| 288 // We may or may not need to decode and upload the image we've found, the | |
| 289 // following functions early-out to if we already decoded. | |
| 290 DecodeImageIfNecessary(draw_image, image_data); | |
| 291 UploadImageIfNecessary(draw_image, image_data); | |
| 292 // Unref the image decode, but not the image. The image ref will be released | |
| 293 // in DrawWithImageFinished. | |
| 294 UnrefImageDecode(draw_image); | |
| 295 | |
| 296 SkImage* image = image_data->upload.image.get(); | |
| 297 DCHECK(image || image_data->decode.decode_failure); | |
| 298 | |
| 299 DecodedDrawImage decoded_draw_image(image, draw_image.filter_quality()); | |
| 300 decoded_draw_image.set_at_raster_decode(image_data->is_at_raster); | |
| 301 return decoded_draw_image; | |
| 82 } | 302 } |
| 83 | 303 |
| 84 void GpuImageDecodeController::DrawWithImageFinished( | 304 void GpuImageDecodeController::DrawWithImageFinished( |
| 85 const DrawImage& image, | 305 const DrawImage& draw_image, |
| 86 const DecodedDrawImage& decoded_image) {} | 306 const DecodedDrawImage& decoded_draw_image) { |
| 87 | 307 // We are being called during raster. The context lock must already be |
| 88 void GpuImageDecodeController::ReduceCacheUsage() {} | 308 // acquired by the caller. |
| 89 | 309 context_->GetLock()->AssertAcquired(); |
| 90 void GpuImageDecodeController::DecodeImage(const DrawImage& image) { | 310 |
| 91 image.image()->preroll(); | 311 if (SkipImage(draw_image)) |
| 92 base::AutoLock lock(lock_); | 312 return; |
| 93 prerolled_images_.insert(image.image()->uniqueID()); | 313 |
| 94 } | 314 base::AutoLock lock(lock_); |
| 95 | 315 UnrefImageInternal(draw_image); |
| 96 void GpuImageDecodeController::RemovePendingTaskForImage( | 316 |
| 97 const DrawImage& image) { | 317 // We are mid-draw and holding the context lock, ensure we clean up any |
| 98 base::AutoLock lock(lock_); | 318 // textures (especially at-raster), which may have just been marked for |
| 99 pending_image_tasks_.erase(image.image()->uniqueID()); | 319 // deletion by UnrefImage. |
| 320 DeletePendingImages(); | |
| 321 } | |
| 322 | |
| 323 void GpuImageDecodeController::ReduceCacheUsage() { | |
| 324 base::AutoLock lock(lock_); | |
| 325 EnsureCapacity(0); | |
| 326 } | |
| 327 | |
| 328 void GpuImageDecodeController::SetShouldAggressivelyFreeResources( | |
| 329 bool aggressively_free_resources) { | |
| 330 if (aggressively_free_resources) { | |
| 331 ContextProvider::ScopedContextLock context_lock(context_); | |
| 332 base::AutoLock lock(lock_); | |
| 333 // We want to keep as little in our cache as possible. Set our memory limit | |
| 334 // to zero and EnsureCapacity to clean up memory. | |
| 335 cached_bytes_limit_ = 0; | |
| 336 EnsureCapacity(0); | |
| 337 | |
| 338 // We are holding the context lock, so finish cleaning up deleted images | |
| 339 // now. | |
| 340 DeletePendingImages(); | |
| 341 } else { | |
| 342 base::AutoLock lock(lock_); | |
| 343 cached_bytes_limit_ = kMaxGpuImageBytes; | |
| 344 } | |
| 345 } | |
| 346 | |
| 347 void GpuImageDecodeController::DecodeImage(const DrawImage& draw_image) { | |
| 348 base::AutoLock lock(lock_); | |
| 349 auto found = image_data_.Peek(draw_image.image()->uniqueID()); | |
| 350 DCHECK(found != image_data_.end()); | |
| 351 DCHECK(!found->second->is_at_raster); | |
| 352 DecodeImageIfNecessary(draw_image, found->second.get()); | |
| 353 } | |
| 354 | |
| 355 void GpuImageDecodeController::UploadImage(const DrawImage& draw_image) { | |
| 356 ContextProvider::ScopedContextLock context_lock(context_); | |
| 357 base::AutoLock lock(lock_); | |
| 358 auto found = image_data_.Peek(draw_image.image()->uniqueID()); | |
| 359 DCHECK(found != image_data_.end()); | |
| 360 DCHECK(!found->second->is_at_raster); | |
| 361 UploadImageIfNecessary(draw_image, found->second.get()); | |
| 362 } | |
| 363 | |
| 364 void GpuImageDecodeController::DecodeTaskCompleted( | |
| 365 const DrawImage& draw_image) { | |
| 366 base::AutoLock lock(lock_); | |
| 367 // Decode task is complete, remove it from our list of pending tasks. | |
| 368 pending_image_decode_tasks_.erase(draw_image.image()->uniqueID()); | |
| 369 | |
| 370 // While the decode task is active, we keep a ref on the decoded data. | |
| 371 // Release that ref now. | |
| 372 UnrefImageDecode(draw_image); | |
| 373 } | |
| 374 | |
| 375 void GpuImageDecodeController::UploadTaskCompleted( | |
| 376 const DrawImage& draw_image) { | |
| 377 base::AutoLock lock(lock_); | |
| 378 // Upload task is complete, remove it from our list of pending tasks. | |
| 379 pending_image_upload_tasks_.erase(draw_image.image()->uniqueID()); | |
| 380 | |
| 381 // While the upload task is active, we keep a ref on both the image it will be | |
| 382 // populating, as well as the decode it needs to populate it. Release these | |
| 383 // refs now. | |
| 384 UnrefImageDecode(draw_image); | |
| 385 UnrefImageInternal(draw_image); | |
| 386 } | |
| 387 | |
| 388 // Checks if an existing image decode exists. If not, returns a task to produce | |
| 389 // the requested decode. | |
| 390 scoped_refptr<ImageDecodeTask> | |
| 391 GpuImageDecodeController::GetImageDecodeTaskAndRef(const DrawImage& draw_image, | |
| 392 uint64_t prepare_tiles_id) { | |
| 393 lock_.AssertAcquired(); | |
| 394 | |
| 395 const uint32_t image_id = draw_image.image()->uniqueID(); | |
| 396 | |
| 397 // This ref is kept alive while an upload task may need this decode. We | |
| 398 // release this ref in UploadTaskCompleted. | |
| 399 RefImageDecode(draw_image); | |
| 400 | |
| 401 auto found = image_data_.Peek(image_id); | |
| 402 if (found != image_data_.end() && found->second->decode.is_locked) { | |
| 403 // We should never be creating a decode task for an at raster image. | |
| 404 DCHECK(!found->second->is_at_raster); | |
| 405 // We should never be creating a decode for an already-uploaded image. | |
| 406 DCHECK(!found->second->upload.image); | |
| 407 return nullptr; | |
| 408 } | |
| 409 | |
| 410 // We didn't have an existing locked image, create a task to lock or decode. | |
| 411 scoped_refptr<ImageDecodeTask>& existing_task = | |
| 412 pending_image_decode_tasks_[image_id]; | |
| 413 if (!existing_task) { | |
| 414 // Ref image decode and create a decode task. This ref will be released in | |
| 415 // DecodeTaskCompleted. | |
| 416 RefImageDecode(draw_image); | |
| 417 existing_task = make_scoped_refptr( | |
| 418 new ImageDecodeTaskImpl(this, draw_image, prepare_tiles_id)); | |
| 419 } | |
| 420 return existing_task; | |
| 421 } | |
| 422 | |
| 423 void GpuImageDecodeController::RefImageDecode(const DrawImage& draw_image) { | |
| 424 lock_.AssertAcquired(); | |
| 425 auto found = image_data_.Peek(draw_image.image()->uniqueID()); | |
| 426 DCHECK(found != image_data_.end()); | |
| 427 ++found->second->decode.ref_count; | |
| 428 RefCountChanged(found->second.get()); | |
| 429 } | |
| 430 | |
| 431 void GpuImageDecodeController::UnrefImageDecode(const DrawImage& draw_image) { | |
| 432 lock_.AssertAcquired(); | |
| 433 auto found = image_data_.Peek(draw_image.image()->uniqueID()); | |
| 434 DCHECK(found != image_data_.end()); | |
| 435 DCHECK_GT(found->second->decode.ref_count, 0u); | |
| 436 --found->second->decode.ref_count; | |
| 437 RefCountChanged(found->second.get()); | |
| 438 } | |
| 439 | |
| 440 void GpuImageDecodeController::RefImage(const DrawImage& draw_image) { | |
| 441 lock_.AssertAcquired(); | |
| 442 auto found = image_data_.Peek(draw_image.image()->uniqueID()); | |
| 443 DCHECK(found != image_data_.end()); | |
| 444 ++found->second->upload.ref_count; | |
| 445 RefCountChanged(found->second.get()); | |
| 446 } | |
| 447 | |
| 448 void GpuImageDecodeController::UnrefImageInternal(const DrawImage& draw_image) { | |
| 449 lock_.AssertAcquired(); | |
| 450 auto found = image_data_.Peek(draw_image.image()->uniqueID()); | |
| 451 DCHECK(found != image_data_.end()); | |
| 452 DCHECK_GT(found->second->upload.ref_count, 0u); | |
| 453 --found->second->upload.ref_count; | |
| 454 RefCountChanged(found->second.get()); | |
| 455 } | |
| 456 | |
| 457 // Called any time an image or decode ref count changes. Takes care of any | |
| 458 // necessary memory budget book-keeping and cleanup. | |
| 459 void GpuImageDecodeController::RefCountChanged(ImageData* image_data) { | |
| 460 lock_.AssertAcquired(); | |
| 461 | |
| 462 bool has_any_refs = | |
| 463 image_data->upload.ref_count > 0 || image_data->decode.ref_count > 0; | |
| 464 if (image_data->is_at_raster && !has_any_refs) { | |
| 465 // We have an at-raster image which has reached zero refs. If it won't fit | |
| 466 // in our cache, delete the image to allow it to fit. | |
| 467 if (image_data->upload.image && !CanFitSize(image_data->size)) { | |
| 468 images_pending_deletion_.push_back(std::move(image_data->upload.image)); | |
| 469 image_data->upload.image = nullptr; | |
| 470 } | |
| 471 | |
| 472 // We now have an at-raster image which will fit in our cache. Convert it | |
| 473 // to not-at-raster. | |
| 474 image_data->is_at_raster = false; | |
| 475 if (image_data->upload.image) { | |
| 476 bytes_used_ += image_data->size; | |
| 477 image_data->upload.budgeted = true; | |
| 478 } | |
| 479 } | |
| 480 | |
| 481 // If we have image refs on a non-at-raster image, it must be budgeted, as it | |
| 482 // is either uploaded or pending upload. | |
| 483 if (image_data->upload.ref_count > 0 && !image_data->upload.budgeted && | |
| 484 !image_data->is_at_raster) { | |
| 485 // We should only be taking non-at-raster refs on images that fit in cache. | |
| 486 DCHECK(CanFitSize(image_data->size)); | |
| 487 | |
| 488 bytes_used_ += image_data->size; | |
| 489 image_data->upload.budgeted = true; | |
| 490 } | |
| 491 | |
| 492 // If we have no image refs on an image, it should only be budgeted if it has | |
| 493 // an uploaded image. If no image exists (upload was cancelled), we should | |
| 494 // un-budget the image. | |
| 495 if (image_data->upload.ref_count == 0 && image_data->upload.budgeted && | |
| 496 !image_data->upload.image) { | |
| 497 DCHECK_GE(bytes_used_, image_data->size); | |
| 498 bytes_used_ -= image_data->size; | |
| 499 image_data->upload.budgeted = false; | |
| 500 } | |
| 501 | |
| 502 // If we have no decode refs on an image, we should unlock any locked | |
| 503 // discardable memory. | |
| 504 if (image_data->decode.ref_count == 0 && image_data->decode.is_locked) { | |
| 505 DCHECK(image_data->decode.data); | |
| 506 image_data->decode.data->Unlock(); | |
| 507 image_data->decode.is_locked = false; | |
| 508 } | |
| 509 } | |
| 510 | |
| 511 // Ensures that we can fit a new image of size |required_size| in our cache. In | |
| 512 // doing so, this function will free unreferenced image data as necessary to | |
| 513 // create rooom. | |
| 514 bool GpuImageDecodeController::EnsureCapacity(size_t required_size) { | |
| 515 lock_.AssertAcquired(); | |
| 516 | |
| 517 if (CanFitSize(required_size) && !ExceedsPreferredCount()) | |
| 518 return true; | |
| 519 | |
| 520 // While we are over memory or preferred item capacity, we iterate through | |
| 521 // our set of cached image data in LRU order. For each image, we can do two | |
| 522 // things: 1) We can free the uploaded image, reducing the memory usage of | |
| 523 // the cache and 2) we can remove the entry entirely, reducing the count of | |
| 524 // elements in the cache. | |
| 525 for (auto it = image_data_.rbegin(); it != image_data_.rend();) { | |
| 526 if (it->second->decode.ref_count != 0 || | |
| 527 it->second->upload.ref_count != 0) { | |
| 528 ++it; | |
| 529 continue; | |
| 530 } | |
| 531 | |
| 532 // Current entry has no refs. Ensure it is not locked. | |
| 533 DCHECK(!it->second->decode.is_locked); | |
| 534 | |
| 535 // If an image without refs is budgeted, it must have an associated image | |
| 536 // upload. | |
| 537 DCHECK(!it->second->upload.budgeted || it->second->upload.image); | |
| 538 | |
| 539 // Free the uploaded image if possible. | |
| 540 if (it->second->upload.image) { | |
| 541 DCHECK(it->second->upload.budgeted); | |
| 542 DCHECK_GE(bytes_used_, it->second->size); | |
| 543 bytes_used_ -= it->second->size; | |
| 544 images_pending_deletion_.push_back(std::move(it->second->upload.image)); | |
| 545 it->second->upload.image = nullptr; | |
| 546 it->second->upload.budgeted = false; | |
| 547 } | |
| 548 | |
| 549 // Free the entire entry if necessary. | |
| 550 if (ExceedsPreferredCount()) { | |
| 551 it = image_data_.Erase(it); | |
| 552 } else { | |
| 553 ++it; | |
| 554 } | |
| 555 | |
| 556 if (CanFitSize(required_size) && !ExceedsPreferredCount()) | |
| 557 return true; | |
| 558 } | |
| 559 | |
| 560 // Preferred count is only used as a guideline when triming the cache. Allow | |
| 561 // new elements to be added as long as we are below our size limit. | |
| 562 return CanFitSize(required_size); | |
| 563 } | |
| 564 | |
| 565 bool GpuImageDecodeController::CanFitSize(size_t size) const { | |
| 566 lock_.AssertAcquired(); | |
| 567 | |
| 568 base::CheckedNumeric<uint32_t> new_size(bytes_used_); | |
| 569 new_size += size; | |
| 570 return new_size.IsValid() && new_size.ValueOrDie() <= cached_bytes_limit_; | |
| 571 } | |
| 572 | |
| 573 bool GpuImageDecodeController::ExceedsPreferredCount() const { | |
| 574 lock_.AssertAcquired(); | |
| 575 | |
| 576 return image_data_.size() > cached_items_limit_; | |
| 577 } | |
| 578 | |
| 579 void GpuImageDecodeController::DecodeImageIfNecessary( | |
| 580 const DrawImage& draw_image, | |
| 581 ImageData* image_data) { | |
| 582 lock_.AssertAcquired(); | |
| 583 | |
| 584 DCHECK_GT(image_data->decode.ref_count, 0u); | |
| 585 | |
| 586 if (image_data->decode.decode_failure) { | |
| 587 // We have already tried and failed to decode this image. Don't try again. | |
| 588 return; | |
| 589 } | |
| 590 | |
| 591 if (image_data->upload.image) { | |
| 592 // We already have an uploaded image, no reason to decode. | |
| 593 return; | |
| 594 } | |
| 595 | |
| 596 if (image_data->decode.data && | |
| 597 (image_data->decode.is_locked || image_data->decode.data->Lock())) { | |
| 598 // We already decoded this, or we just needed to lock, early out. | |
| 599 image_data->decode.is_locked = true; | |
| 600 return; | |
| 601 } | |
| 602 | |
| 603 TRACE_EVENT0("cc", "GpuImageDecodeController::DecodeImage"); | |
| 604 | |
| 605 image_data->decode.data = nullptr; | |
| 606 scoped_ptr<base::DiscardableMemory> backing_memory; | |
|
vmpstr
2016/04/08 18:24:59
Probably unique_ptr at this point.
ericrk
2016/04/08 20:55:18
Done.
| |
| 607 { | |
| 608 base::AutoUnlock unlock(lock_); | |
| 609 switch (image_data->mode) { | |
| 610 case DecodedDataMode::CPU: { | |
| 611 backing_memory = | |
| 612 base::DiscardableMemoryAllocator::GetInstance() | |
| 613 ->AllocateLockedDiscardableMemory(image_data->size); | |
| 614 SkImageInfo image_info = CreateImageInfoForDrawImage(draw_image); | |
| 615 if (!draw_image.image()->readPixels(image_info, backing_memory->data(), | |
| 616 image_info.minRowBytes(), 0, 0, | |
| 617 SkImage::kDisallow_CachingHint)) { | |
| 618 backing_memory.reset(nullptr); | |
| 619 } | |
| 620 break; | |
| 621 } | |
| 622 case DecodedDataMode::GPU: { | |
| 623 backing_memory = | |
| 624 base::DiscardableMemoryAllocator::GetInstance() | |
| 625 ->AllocateLockedDiscardableMemory(image_data->size); | |
| 626 auto params = ParamsFromDrawImage(draw_image); | |
| 627 if (!draw_image.image()->getDeferredTextureImageData( | |
| 628 *context_threadsafe_proxy_.get(), ¶ms, 1, | |
| 629 backing_memory->data())) { | |
| 630 backing_memory.reset(nullptr); | |
|
vmpstr
2016/04/08 18:24:59
I think just .reset() works
ericrk
2016/04/08 20:55:18
Done.
| |
| 631 } | |
| 632 break; | |
| 633 } | |
| 634 } | |
| 635 } | |
| 636 | |
| 637 if (image_data->decode.data) { | |
| 638 // An at-raster task decoded this before us. Ingore our decode. | |
| 639 return; | |
| 640 } | |
| 641 | |
| 642 if (!backing_memory) { | |
| 643 // If |backing_memory| was not populated, we had a non-decodable image. | |
| 644 image_data->decode.decode_failure = true; | |
| 645 return; | |
| 646 } | |
| 647 | |
| 648 image_data->decode.data = std::move(backing_memory); | |
| 649 DCHECK(!image_data->decode.is_locked); | |
| 650 image_data->decode.is_locked = true; | |
| 651 } | |
| 652 | |
| 653 void GpuImageDecodeController::UploadImageIfNecessary( | |
| 654 const DrawImage& draw_image, | |
| 655 ImageData* image_data) { | |
| 656 context_->GetLock()->AssertAcquired(); | |
| 657 lock_.AssertAcquired(); | |
| 658 | |
| 659 if (image_data->decode.decode_failure) { | |
| 660 // We were unnable to decode this image. Don't try to upload. | |
| 661 return; | |
| 662 } | |
| 663 | |
| 664 if (image_data->upload.image) { | |
| 665 // Someone has uploaded this image before us (at raster). | |
| 666 return; | |
| 667 } | |
| 668 | |
| 669 TRACE_EVENT0("cc", "GpuImageDecodeController::UploadImage"); | |
| 670 DCHECK(image_data->decode.is_locked); | |
| 671 DCHECK_GT(image_data->decode.ref_count, 0u); | |
| 672 DCHECK_GT(image_data->upload.ref_count, 0u); | |
| 673 | |
| 674 // We are about to upload a new image and are holding the context lock. | |
| 675 // Ensure that any images which have been marked for deletion are actually | |
| 676 // cleaned up so we don't exceed our memory limit during this upload. | |
| 677 DeletePendingImages(); | |
| 678 | |
| 679 skia::RefPtr<SkImage> uploaded_image; | |
| 680 { | |
| 681 base::AutoUnlock unlock(lock_); | |
| 682 switch (image_data->mode) { | |
| 683 case DecodedDataMode::CPU: { | |
| 684 SkImageInfo image_info = CreateImageInfoForDrawImage(draw_image); | |
| 685 uploaded_image = skia::AdoptRef(SkImage::NewFromRaster( | |
| 686 image_info, image_data->decode.data->data(), | |
| 687 image_info.minRowBytes(), [](const void*, void*) {}, nullptr)); | |
| 688 break; | |
| 689 } | |
| 690 case DecodedDataMode::GPU: { | |
| 691 uploaded_image = | |
| 692 skia::AdoptRef(SkImage::NewFromDeferredTextureImageData( | |
| 693 context_->GrContext(), image_data->decode.data->data(), | |
| 694 SkBudgeted::kNo)); | |
| 695 break; | |
| 696 } | |
| 697 } | |
| 698 } | |
| 699 DCHECK(uploaded_image); | |
| 700 | |
| 701 // At-raster may have decoded this while we were unlocked. If so, ignore our | |
| 702 // result. | |
| 703 if (!image_data->upload.image) { | |
| 704 image_data->upload.image = std::move(uploaded_image); | |
| 705 } | |
| 706 } | |
| 707 | |
| 708 scoped_ptr<GpuImageDecodeController::ImageData> | |
| 709 GpuImageDecodeController::CreateImageData(const DrawImage& draw_image) { | |
| 710 lock_.AssertAcquired(); | |
| 711 | |
| 712 DecodedDataMode mode; | |
| 713 SkImageInfo info = CreateImageInfoForDrawImage(draw_image); | |
| 714 SkImage::DeferredTextureImageUsageParams params = | |
| 715 ParamsFromDrawImage(draw_image); | |
| 716 size_t data_size = draw_image.image()->getDeferredTextureImageData( | |
| 717 *context_threadsafe_proxy_.get(), ¶ms, 1, nullptr); | |
| 718 | |
| 719 if (data_size == 0) { | |
| 720 // Can't upload image, too large or other failure. Try to use SW fallback. | |
| 721 data_size = info.getSafeSize(info.minRowBytes()); | |
| 722 mode = DecodedDataMode::CPU; | |
| 723 } else { | |
| 724 mode = DecodedDataMode::GPU; | |
| 725 } | |
| 726 | |
| 727 return make_scoped_ptr(new ImageData(mode, data_size)); | |
| 728 } | |
| 729 | |
| 730 void GpuImageDecodeController::DeletePendingImages() { | |
| 731 context_->GetLock()->AssertAcquired(); | |
| 732 lock_.AssertAcquired(); | |
| 733 images_pending_deletion_.clear(); | |
| 734 } | |
| 735 | |
| 736 SkImageInfo GpuImageDecodeController::CreateImageInfoForDrawImage( | |
| 737 const DrawImage& draw_image) const { | |
| 738 return SkImageInfo::Make( | |
| 739 draw_image.image()->width(), draw_image.image()->height(), | |
| 740 ResourceFormatToClosestSkColorType(format_), kPremul_SkAlphaType); | |
| 100 } | 741 } |
| 101 | 742 |
| 102 } // namespace cc | 743 } // namespace cc |
| OLD | NEW |