| OLD | NEW |
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "cc/tiles/gpu_image_decode_controller.h" | 5 #include "cc/tiles/gpu_image_decode_controller.h" |
| 6 | 6 |
| 7 #include <inttypes.h> | 7 #include <inttypes.h> |
| 8 | 8 |
| 9 #include "base/memory/discardable_memory_allocator.h" | 9 #include "base/memory/discardable_memory_allocator.h" |
| 10 #include "base/memory/ptr_util.h" | 10 #include "base/memory/ptr_util.h" |
| 11 #include "base/metrics/histogram_macros.h" | 11 #include "base/metrics/histogram_macros.h" |
| 12 #include "base/numerics/safe_math.h" | 12 #include "base/numerics/safe_math.h" |
| 13 #include "base/strings/stringprintf.h" | 13 #include "base/strings/stringprintf.h" |
| 14 #include "base/threading/thread_task_runner_handle.h" | 14 #include "base/threading/thread_task_runner_handle.h" |
| 15 #include "cc/debug/devtools_instrumentation.h" | 15 #include "cc/debug/devtools_instrumentation.h" |
| 16 #include "cc/output/context_provider.h" | 16 #include "cc/output/context_provider.h" |
| 17 #include "cc/raster/tile_task.h" | 17 #include "cc/raster/tile_task.h" |
| 18 #include "cc/resources/resource_format_utils.h" | 18 #include "cc/resources/resource_format_utils.h" |
| 19 #include "cc/tiles/mipmap_util.h" | |
| 20 #include "gpu/command_buffer/client/context_support.h" | 19 #include "gpu/command_buffer/client/context_support.h" |
| 21 #include "gpu/command_buffer/client/gles2_interface.h" | 20 #include "gpu/command_buffer/client/gles2_interface.h" |
| 22 #include "gpu_image_decode_controller.h" | 21 #include "gpu_image_decode_controller.h" |
| 23 #include "skia/ext/texture_handle.h" | 22 #include "skia/ext/texture_handle.h" |
| 24 #include "third_party/skia/include/core/SkCanvas.h" | 23 #include "third_party/skia/include/core/SkCanvas.h" |
| 25 #include "third_party/skia/include/core/SkRefCnt.h" | 24 #include "third_party/skia/include/core/SkRefCnt.h" |
| 26 #include "third_party/skia/include/core/SkSurface.h" | 25 #include "third_party/skia/include/core/SkSurface.h" |
| 27 #include "third_party/skia/include/gpu/GrContext.h" | 26 #include "third_party/skia/include/gpu/GrContext.h" |
| 28 #include "third_party/skia/include/gpu/GrTexture.h" | 27 #include "third_party/skia/include/gpu/GrTexture.h" |
| 29 #include "ui/gfx/skia_util.h" | 28 #include "ui/gfx/skia_util.h" |
| (...skipping 11 matching lines...) Expand all Loading... |
| 41 return true; | 40 return true; |
| 42 if (std::abs(draw_image.scale().width()) < | 41 if (std::abs(draw_image.scale().width()) < |
| 43 std::numeric_limits<float>::epsilon() || | 42 std::numeric_limits<float>::epsilon() || |
| 44 std::abs(draw_image.scale().height()) < | 43 std::abs(draw_image.scale().height()) < |
| 45 std::numeric_limits<float>::epsilon()) { | 44 std::numeric_limits<float>::epsilon()) { |
| 46 return true; | 45 return true; |
| 47 } | 46 } |
| 48 return false; | 47 return false; |
| 49 } | 48 } |
| 50 | 49 |
| 51 // Returns the filter quality to use for scaling the image to upload scale. For | |
| 52 // GPU raster, medium and high filter quality are identical for downscales. | |
| 53 // Upload scaling is always a downscale, so cap our filter quality to medium. | |
| 54 SkFilterQuality CalculateUploadScaleFilterQuality(const DrawImage& draw_image) { | |
| 55 return std::min(kMedium_SkFilterQuality, draw_image.filter_quality()); | |
| 56 } | |
| 57 | |
| 58 SkImage::DeferredTextureImageUsageParams ParamsFromDrawImage( | 50 SkImage::DeferredTextureImageUsageParams ParamsFromDrawImage( |
| 59 const DrawImage& draw_image, | 51 const DrawImage& draw_image) { |
| 60 int upload_scale_mip_level) { | |
| 61 SkImage::DeferredTextureImageUsageParams params; | 52 SkImage::DeferredTextureImageUsageParams params; |
| 62 params.fMatrix = draw_image.matrix(); | 53 params.fMatrix = draw_image.matrix(); |
| 63 params.fQuality = draw_image.filter_quality(); | 54 params.fQuality = draw_image.filter_quality(); |
| 64 params.fPreScaleMipLevel = upload_scale_mip_level; | |
| 65 | 55 |
| 66 return params; | 56 return params; |
| 67 } | 57 } |
| 68 | 58 |
| 69 // Calculate the mip level to upload-scale the image to before uploading. We use | |
| 70 // mip levels rather than exact scales to increase re-use of scaled images. | |
| 71 int CalculateUploadScaleMipLevel(const DrawImage& draw_image) { | |
| 72 // Images which are being clipped will have color-bleeding if scaled. | |
| 73 // TODO(ericrk): Investigate uploading clipped images to handle this case and | |
| 74 // provide further optimization. crbug.com/620899 | |
| 75 if (draw_image.src_rect() != draw_image.image()->bounds()) | |
| 76 return 0; | |
| 77 | |
| 78 gfx::Size base_size(draw_image.image()->width(), | |
| 79 draw_image.image()->height()); | |
| 80 // Ceil our scaled size so that the mip map generated is guaranteed to be | |
| 81 // larger. Take the abs of the scale, as mipmap functions don't handle | |
| 82 // (and aren't impacted by) negative image dimensions. | |
| 83 gfx::Size scaled_size = | |
| 84 gfx::ScaleToCeiledSize(base_size, std::abs(draw_image.scale().width()), | |
| 85 std::abs(draw_image.scale().height())); | |
| 86 | |
| 87 return MipMapUtil::GetLevelForSize(base_size, scaled_size); | |
| 88 } | |
| 89 | |
| 90 // Calculates the scale factor which can be used to scale an image to a given | |
| 91 // mip level. | |
| 92 SkSize CalculateScaleFactorForMipLevel(const DrawImage& draw_image, | |
| 93 int mip_level) { | |
| 94 gfx::Size base_size(draw_image.image()->width(), | |
| 95 draw_image.image()->height()); | |
| 96 return MipMapUtil::GetScaleAdjustmentForLevel(base_size, mip_level); | |
| 97 } | |
| 98 | |
| 99 // Calculates the size of a given mip level. | |
| 100 gfx::Size CalculateSizeForMipLevel(const DrawImage& draw_image, int mip_level) { | |
| 101 gfx::Size base_size(draw_image.image()->width(), | |
| 102 draw_image.image()->height()); | |
| 103 return MipMapUtil::GetSizeForLevel(base_size, mip_level); | |
| 104 } | |
| 105 | |
| 106 // Generates a uint64_t which uniquely identifies a DrawImage for the purposes | |
| 107 // of the |in_use_cache_|. The key is generated as follows: | |
| 108 // ╔══════════════════════╤═══════════╤═══════════╗ | |
| 109 // ║ image_id │ mip_level │ quality ║ | |
| 110 // ╚════════32═bits═══════╧══16═bits══╧══16═bits══╝ | |
| 111 uint64_t GenerateInUseCacheKey(const DrawImage& draw_image) { | |
| 112 static_assert( | |
| 113 kLast_SkFilterQuality <= std::numeric_limits<uint16_t>::max(), | |
| 114 "InUseCacheKey depends on SkFilterQuality fitting in a uint16_t."); | |
| 115 | |
| 116 SkFilterQuality filter_quality = | |
| 117 CalculateUploadScaleFilterQuality(draw_image); | |
| 118 DCHECK_LE(filter_quality, kLast_SkFilterQuality); | |
| 119 | |
| 120 // An image has at most log_2(max(width, height)) mip levels, so given our | |
| 121 // usage of 32-bit sizes for images, key.mip_level is at most 31. | |
| 122 int32_t mip_level = CalculateUploadScaleMipLevel(draw_image); | |
| 123 DCHECK_LT(mip_level, 32); | |
| 124 | |
| 125 return (static_cast<uint64_t>(draw_image.image()->uniqueID()) << 32) | | |
| 126 (mip_level << 16) | filter_quality; | |
| 127 } | |
| 128 | |
| 129 } // namespace | 59 } // namespace |
| 130 | 60 |
| 131 GpuImageDecodeController::InUseCacheEntry::InUseCacheEntry( | |
| 132 scoped_refptr<ImageData> image_data) | |
| 133 : image_data(std::move(image_data)) {} | |
| 134 GpuImageDecodeController::InUseCacheEntry::InUseCacheEntry( | |
| 135 const InUseCacheEntry&) = default; | |
| 136 GpuImageDecodeController::InUseCacheEntry::InUseCacheEntry(InUseCacheEntry&&) = | |
| 137 default; | |
| 138 GpuImageDecodeController::InUseCacheEntry::~InUseCacheEntry() = default; | |
| 139 | |
| 140 // Task which decodes an image and stores the result in discardable memory. | 61 // Task which decodes an image and stores the result in discardable memory. |
| 141 // This task does not use GPU resources and can be run on any thread. | 62 // This task does not use GPU resources and can be run on any thread. |
| 142 class ImageDecodeTaskImpl : public TileTask { | 63 class ImageDecodeTaskImpl : public TileTask { |
| 143 public: | 64 public: |
| 144 ImageDecodeTaskImpl(GpuImageDecodeController* controller, | 65 ImageDecodeTaskImpl(GpuImageDecodeController* controller, |
| 145 const DrawImage& draw_image, | 66 const DrawImage& draw_image, |
| 146 const ImageDecodeController::TracingInfo& tracing_info) | 67 const ImageDecodeController::TracingInfo& tracing_info) |
| 147 : TileTask(true), | 68 : TileTask(true), |
| 148 controller_(controller), | 69 controller_(controller), |
| 149 image_(draw_image), | 70 image_(draw_image), |
| (...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 249 | 170 |
| 250 void GpuImageDecodeController::DecodedImageData::ResetData() { | 171 void GpuImageDecodeController::DecodedImageData::ResetData() { |
| 251 DCHECK(!is_locked_); | 172 DCHECK(!is_locked_); |
| 252 if (data_) | 173 if (data_) |
| 253 ReportUsageStats(); | 174 ReportUsageStats(); |
| 254 data_ = nullptr; | 175 data_ = nullptr; |
| 255 usage_stats_ = UsageStats(); | 176 usage_stats_ = UsageStats(); |
| 256 } | 177 } |
| 257 | 178 |
| 258 void GpuImageDecodeController::DecodedImageData::ReportUsageStats() const { | 179 void GpuImageDecodeController::DecodedImageData::ReportUsageStats() const { |
| 259 // lock_count │ used │ result state | 180 // lock_count | used | result state |
| 260 // ═══════════╪═══════╪══════════════════ | 181 // ===========+=======+================== |
| 261 // 1 │ false │ WASTED_ONCE | 182 // 1 | false | WASTED_ONCE |
| 262 // 1 │ true │ USED_ONCE | 183 // 1 | true | USED_ONCE |
| 263 // >1 │ false │ WASTED_RELOCKED | 184 // >1 | false | WASTED_RELOCKED |
| 264 // >1 │ true │ USED_RELOCKED | 185 // >1 | true | USED_RELOCKED |
| 265 // Note that it's important not to reorder the following enums, since the | 186 // Note that it's important not to reorder the following enums, since the |
| 266 // numerical values are used in the histogram code. | 187 // numerical values are used in the histogram code. |
| 267 enum State : int { | 188 enum State : int { |
| 268 DECODED_IMAGE_STATE_WASTED_ONCE, | 189 DECODED_IMAGE_STATE_WASTED_ONCE, |
| 269 DECODED_IMAGE_STATE_USED_ONCE, | 190 DECODED_IMAGE_STATE_USED_ONCE, |
| 270 DECODED_IMAGE_STATE_WASTED_RELOCKED, | 191 DECODED_IMAGE_STATE_WASTED_RELOCKED, |
| 271 DECODED_IMAGE_STATE_USED_RELOCKED, | 192 DECODED_IMAGE_STATE_USED_RELOCKED, |
| 272 DECODED_IMAGE_STATE_COUNT | 193 DECODED_IMAGE_STATE_COUNT |
| 273 } state = DECODED_IMAGE_STATE_WASTED_ONCE; | 194 } state = DECODED_IMAGE_STATE_WASTED_ONCE; |
| 274 | 195 |
| (...skipping 30 matching lines...) Expand all Loading... |
| 305 image_ = std::move(image); | 226 image_ = std::move(image); |
| 306 } | 227 } |
| 307 | 228 |
| 308 void GpuImageDecodeController::UploadedImageData::ReportUsageStats() const { | 229 void GpuImageDecodeController::UploadedImageData::ReportUsageStats() const { |
| 309 UMA_HISTOGRAM_BOOLEAN("Renderer4.GpuImageUploadState.Used", | 230 UMA_HISTOGRAM_BOOLEAN("Renderer4.GpuImageUploadState.Used", |
| 310 usage_stats_.used); | 231 usage_stats_.used); |
| 311 UMA_HISTOGRAM_BOOLEAN("Renderer4.GpuImageUploadState.FirstRefWasted", | 232 UMA_HISTOGRAM_BOOLEAN("Renderer4.GpuImageUploadState.FirstRefWasted", |
| 312 usage_stats_.first_ref_wasted); | 233 usage_stats_.first_ref_wasted); |
| 313 } | 234 } |
| 314 | 235 |
| 315 GpuImageDecodeController::ImageData::ImageData( | 236 GpuImageDecodeController::ImageData::ImageData(DecodedDataMode mode, |
| 316 DecodedDataMode mode, | 237 size_t size) |
| 317 size_t size, | 238 : mode(mode), size(size) {} |
| 318 int upload_scale_mip_level, | |
| 319 SkFilterQuality upload_scale_filter_quality) | |
| 320 : mode(mode), | |
| 321 size(size), | |
| 322 upload_scale_mip_level(upload_scale_mip_level), | |
| 323 upload_scale_filter_quality(upload_scale_filter_quality) {} | |
| 324 | 239 |
| 325 GpuImageDecodeController::ImageData::~ImageData() { | 240 GpuImageDecodeController::ImageData::~ImageData() = default; |
| 326 // We should never delete ImageData while it is in use or before it has been | |
| 327 // cleaned up. | |
| 328 DCHECK_EQ(0u, upload.ref_count); | |
| 329 DCHECK_EQ(0u, decode.ref_count); | |
| 330 DCHECK_EQ(false, decode.is_locked()); | |
| 331 // This should always be cleaned up before deleting the image, as it needs to | |
| 332 // be freed with the GL context lock held. | |
| 333 DCHECK(!upload.image()); | |
| 334 } | |
| 335 | 241 |
| 336 GpuImageDecodeController::GpuImageDecodeController(ContextProvider* context, | 242 GpuImageDecodeController::GpuImageDecodeController(ContextProvider* context, |
| 337 ResourceFormat decode_format, | 243 ResourceFormat decode_format, |
| 338 size_t max_gpu_image_bytes) | 244 size_t max_gpu_image_bytes) |
| 339 : format_(decode_format), | 245 : format_(decode_format), |
| 340 context_(context), | 246 context_(context), |
| 341 persistent_cache_(PersistentCache::NO_AUTO_EVICT), | 247 image_data_(ImageDataMRUCache::NO_AUTO_EVICT), |
| 342 cached_items_limit_(kMaxDiscardableItems), | 248 cached_items_limit_(kMaxDiscardableItems), |
| 343 cached_bytes_limit_(max_gpu_image_bytes), | 249 cached_bytes_limit_(max_gpu_image_bytes), |
| 344 bytes_used_(0), | 250 bytes_used_(0), |
| 345 max_gpu_image_bytes_(max_gpu_image_bytes) { | 251 max_gpu_image_bytes_(max_gpu_image_bytes) { |
| 346 // Acquire the context_lock so that we can safely retrieve the | 252 // Acquire the context_lock so that we can safely retrieve the |
| 347 // GrContextThreadSafeProxy. This proxy can then be used with no lock held. | 253 // GrContextThreadSafeProxy. This proxy can then be used with no lock held. |
| 348 { | 254 { |
| 349 ContextProvider::ScopedContextLock context_lock(context_); | 255 ContextProvider::ScopedContextLock context_lock(context_); |
| 350 context_threadsafe_proxy_ = sk_sp<GrContextThreadSafeProxy>( | 256 context_threadsafe_proxy_ = sk_sp<GrContextThreadSafeProxy>( |
| 351 context->GrContext()->threadSafeProxy()); | 257 context->GrContext()->threadSafeProxy()); |
| (...skipping 22 matching lines...) Expand all Loading... |
| 374 const DrawImage& draw_image, | 280 const DrawImage& draw_image, |
| 375 const TracingInfo& tracing_info, | 281 const TracingInfo& tracing_info, |
| 376 scoped_refptr<TileTask>* task) { | 282 scoped_refptr<TileTask>* task) { |
| 377 if (SkipImage(draw_image)) { | 283 if (SkipImage(draw_image)) { |
| 378 *task = nullptr; | 284 *task = nullptr; |
| 379 return false; | 285 return false; |
| 380 } | 286 } |
| 381 | 287 |
| 382 base::AutoLock lock(lock_); | 288 base::AutoLock lock(lock_); |
| 383 const auto image_id = draw_image.image()->uniqueID(); | 289 const auto image_id = draw_image.image()->uniqueID(); |
| 384 ImageData* image_data = GetImageDataForDrawImage(draw_image); | 290 |
| 385 scoped_refptr<ImageData> new_data; | 291 auto found = image_data_.Get(image_id); |
| 386 if (!image_data) { | 292 if (found != image_data_.end()) { |
| 387 // We need an ImageData, create one now. | 293 ImageData* image_data = found->second.get(); |
| 388 new_data = CreateImageData(draw_image); | 294 if (image_data->is_at_raster) { |
| 389 image_data = new_data.get(); | 295 // Image is at-raster, just return, this usage will be at-raster as well. |
| 390 } else if (image_data->is_at_raster) { | 296 *task = nullptr; |
| 391 // Image is at-raster, just return, this usage will be at-raster as well. | 297 return false; |
| 392 *task = nullptr; | 298 } |
| 393 return false; | 299 |
| 394 } else if (image_data->decode.decode_failure) { | 300 if (image_data->decode.decode_failure) { |
| 395 // We have already tried and failed to decode this image, so just return. | 301 // We have already tried and failed to decode this image, so just return. |
| 396 *task = nullptr; | 302 *task = nullptr; |
| 397 return false; | 303 return false; |
| 398 } else if (image_data->upload.image()) { | 304 } |
| 399 // The image is already uploaded, ref and return. | 305 |
| 400 RefImage(draw_image); | 306 if (image_data->upload.image()) { |
| 401 *task = nullptr; | 307 // The image is already uploaded, ref and return. |
| 402 return true; | 308 RefImage(draw_image); |
| 403 } else if (image_data->upload.task) { | 309 *task = nullptr; |
| 310 return true; |
| 311 } |
| 312 } |
| 313 |
| 314 // We didn't have a pre-uploaded image, so we need an upload task. Try to find |
| 315 // an existing one. |
| 316 scoped_refptr<TileTask>& existing_task = |
| 317 pending_image_upload_tasks_[image_id]; |
| 318 if (existing_task) { |
| 404 // We had an existing upload task, ref the image and return the task. | 319 // We had an existing upload task, ref the image and return the task. |
| 405 RefImage(draw_image); | 320 RefImage(draw_image); |
| 406 *task = image_data->upload.task; | 321 *task = existing_task; |
| 407 return true; | 322 return true; |
| 408 } | 323 } |
| 409 | 324 |
| 325 // We will be creating a new upload task. If necessary, create a placeholder |
| 326 // ImageData to hold the result. |
| 327 std::unique_ptr<ImageData> new_data; |
| 328 ImageData* data; |
| 329 if (found == image_data_.end()) { |
| 330 new_data = CreateImageData(draw_image); |
| 331 data = new_data.get(); |
| 332 } else { |
| 333 data = found->second.get(); |
| 334 } |
| 335 |
| 410 // Ensure that the image we're about to decode/upload will fit in memory. | 336 // Ensure that the image we're about to decode/upload will fit in memory. |
| 411 if (!EnsureCapacity(image_data->size)) { | 337 if (!EnsureCapacity(data->size)) { |
| 412 // Image will not fit, do an at-raster decode. | 338 // Image will not fit, do an at-raster decode. |
| 413 *task = nullptr; | 339 *task = nullptr; |
| 414 return false; | 340 return false; |
| 415 } | 341 } |
| 416 | 342 |
| 417 // If we had to create new image data, add it to our map now that we know it | 343 // If we had to create new image data, add it to our map now that we know it |
| 418 // will fit. | 344 // will fit. |
| 419 if (new_data) | 345 if (new_data) |
| 420 persistent_cache_.Put(image_id, std::move(new_data)); | 346 found = image_data_.Put(image_id, std::move(new_data)); |
| 421 | 347 |
| 422 // Ref image and create a upload and decode tasks. We will release this ref | 348 // Ref image and create a upload and decode tasks. We will release this ref |
| 423 // in UploadTaskCompleted. | 349 // in UploadTaskCompleted. |
| 424 RefImage(draw_image); | 350 RefImage(draw_image); |
| 425 *task = make_scoped_refptr(new ImageUploadTaskImpl( | 351 existing_task = make_scoped_refptr(new ImageUploadTaskImpl( |
| 426 this, draw_image, GetImageDecodeTaskAndRef(draw_image, tracing_info), | 352 this, draw_image, GetImageDecodeTaskAndRef(draw_image, tracing_info), |
| 427 tracing_info)); | 353 tracing_info)); |
| 428 image_data->upload.task = *task; | |
| 429 | 354 |
| 430 // Ref the image again - this ref is owned by the caller, and it is their | 355 // Ref the image again - this ref is owned by the caller, and it is their |
| 431 // responsibility to release it by calling UnrefImage. | 356 // responsibility to release it by calling UnrefImage. |
| 432 RefImage(draw_image); | 357 RefImage(draw_image); |
| 358 *task = existing_task; |
| 433 return true; | 359 return true; |
| 434 } | 360 } |
| 435 | 361 |
| 436 void GpuImageDecodeController::UnrefImage(const DrawImage& draw_image) { | 362 void GpuImageDecodeController::UnrefImage(const DrawImage& draw_image) { |
| 437 base::AutoLock lock(lock_); | 363 base::AutoLock lock(lock_); |
| 438 UnrefImageInternal(draw_image); | 364 UnrefImageInternal(draw_image); |
| 439 } | 365 } |
| 440 | 366 |
| 441 DecodedDrawImage GpuImageDecodeController::GetDecodedImageForDraw( | 367 DecodedDrawImage GpuImageDecodeController::GetDecodedImageForDraw( |
| 442 const DrawImage& draw_image) { | 368 const DrawImage& draw_image) { |
| 443 TRACE_EVENT0("cc", "GpuImageDecodeController::GetDecodedImageForDraw"); | |
| 444 | |
| 445 // We are being called during raster. The context lock must already be | 369 // We are being called during raster. The context lock must already be |
| 446 // acquired by the caller. | 370 // acquired by the caller. |
| 447 context_->GetLock()->AssertAcquired(); | 371 context_->GetLock()->AssertAcquired(); |
| 448 | 372 |
| 449 if (SkipImage(draw_image)) | 373 if (SkipImage(draw_image)) |
| 450 return DecodedDrawImage(nullptr, draw_image.filter_quality()); | 374 return DecodedDrawImage(nullptr, draw_image.filter_quality()); |
| 451 | 375 |
| 376 TRACE_EVENT0("cc", "GpuImageDecodeController::GetDecodedImageForDraw"); |
| 377 |
| 452 base::AutoLock lock(lock_); | 378 base::AutoLock lock(lock_); |
| 453 ImageData* image_data = GetImageDataForDrawImage(draw_image); | 379 const uint32_t unique_id = draw_image.image()->uniqueID(); |
| 454 if (!image_data) { | 380 auto found = image_data_.Peek(unique_id); |
| 381 if (found == image_data_.end()) { |
| 455 // We didn't find the image, create a new entry. | 382 // We didn't find the image, create a new entry. |
| 456 auto data = CreateImageData(draw_image); | 383 auto data = CreateImageData(draw_image); |
| 457 image_data = data.get(); | 384 found = image_data_.Put(unique_id, std::move(data)); |
| 458 persistent_cache_.Put(draw_image.image()->uniqueID(), std::move(data)); | |
| 459 } | 385 } |
| 460 | 386 |
| 387 ImageData* image_data = found->second.get(); |
| 388 |
| 461 if (!image_data->upload.budgeted) { | 389 if (!image_data->upload.budgeted) { |
| 462 // If image data is not budgeted by this point, it is at-raster. | 390 // If image data is not budgeted by this point, it is at-raster. |
| 463 image_data->is_at_raster = true; | 391 image_data->is_at_raster = true; |
| 464 } | 392 } |
| 465 | 393 |
| 466 // Ref the image and decode so that they stay alive while we are | 394 // Ref the image and decode so that they stay alive while we are |
| 467 // decoding/uploading. | 395 // decoding/uploading. |
| 468 RefImage(draw_image); | 396 RefImage(draw_image); |
| 469 RefImageDecode(draw_image); | 397 RefImageDecode(draw_image); |
| 470 | 398 |
| 471 // We may or may not need to decode and upload the image we've found, the | 399 // We may or may not need to decode and upload the image we've found, the |
| 472 // following functions early-out to if we already decoded. | 400 // following functions early-out to if we already decoded. |
| 473 DecodeImageIfNecessary(draw_image, image_data); | 401 DecodeImageIfNecessary(draw_image, image_data); |
| 474 UploadImageIfNecessary(draw_image, image_data); | 402 UploadImageIfNecessary(draw_image, image_data); |
| 475 // Unref the image decode, but not the image. The image ref will be released | 403 // Unref the image decode, but not the image. The image ref will be released |
| 476 // in DrawWithImageFinished. | 404 // in DrawWithImageFinished. |
| 477 UnrefImageDecode(draw_image); | 405 UnrefImageDecode(draw_image); |
| 478 | 406 |
| 479 sk_sp<SkImage> image = image_data->upload.image(); | 407 sk_sp<SkImage> image = image_data->upload.image(); |
| 480 image_data->upload.mark_used(); | 408 image_data->upload.mark_used(); |
| 481 DCHECK(image || image_data->decode.decode_failure); | 409 DCHECK(image || image_data->decode.decode_failure); |
| 482 | 410 |
| 483 SkSize scale_factor = CalculateScaleFactorForMipLevel( | 411 DecodedDrawImage decoded_draw_image(std::move(image), |
| 484 draw_image, image_data->upload_scale_mip_level); | |
| 485 DecodedDrawImage decoded_draw_image(std::move(image), SkSize(), scale_factor, | |
| 486 draw_image.filter_quality()); | 412 draw_image.filter_quality()); |
| 487 decoded_draw_image.set_at_raster_decode(image_data->is_at_raster); | 413 decoded_draw_image.set_at_raster_decode(image_data->is_at_raster); |
| 488 return decoded_draw_image; | 414 return decoded_draw_image; |
| 489 } | 415 } |
| 490 | 416 |
| 491 void GpuImageDecodeController::DrawWithImageFinished( | 417 void GpuImageDecodeController::DrawWithImageFinished( |
| 492 const DrawImage& draw_image, | 418 const DrawImage& draw_image, |
| 493 const DecodedDrawImage& decoded_draw_image) { | 419 const DecodedDrawImage& decoded_draw_image) { |
| 494 TRACE_EVENT0("cc", "GpuImageDecodeController::DrawWithImageFinished"); | |
| 495 | |
| 496 // We are being called during raster. The context lock must already be | 420 // We are being called during raster. The context lock must already be |
| 497 // acquired by the caller. | 421 // acquired by the caller. |
| 498 context_->GetLock()->AssertAcquired(); | 422 context_->GetLock()->AssertAcquired(); |
| 499 | 423 |
| 500 if (SkipImage(draw_image)) | 424 if (SkipImage(draw_image)) |
| 501 return; | 425 return; |
| 502 | 426 |
| 503 base::AutoLock lock(lock_); | 427 base::AutoLock lock(lock_); |
| 504 UnrefImageInternal(draw_image); | 428 UnrefImageInternal(draw_image); |
| 505 | 429 |
| (...skipping 23 matching lines...) Expand all Loading... |
| 529 DeletePendingImages(); | 453 DeletePendingImages(); |
| 530 } else { | 454 } else { |
| 531 base::AutoLock lock(lock_); | 455 base::AutoLock lock(lock_); |
| 532 cached_bytes_limit_ = max_gpu_image_bytes_; | 456 cached_bytes_limit_ = max_gpu_image_bytes_; |
| 533 } | 457 } |
| 534 } | 458 } |
| 535 | 459 |
| 536 bool GpuImageDecodeController::OnMemoryDump( | 460 bool GpuImageDecodeController::OnMemoryDump( |
| 537 const base::trace_event::MemoryDumpArgs& args, | 461 const base::trace_event::MemoryDumpArgs& args, |
| 538 base::trace_event::ProcessMemoryDump* pmd) { | 462 base::trace_event::ProcessMemoryDump* pmd) { |
| 539 for (const auto& image_pair : persistent_cache_) { | 463 for (const auto& image_pair : image_data_) { |
| 540 const ImageData* image_data = image_pair.second.get(); | 464 const ImageData* image_data = image_pair.second.get(); |
| 541 const uint32_t image_id = image_pair.first; | 465 const uint32_t image_id = image_pair.first; |
| 542 | 466 |
| 543 // If we have discardable decoded data, dump this here. | 467 // If we have discardable decoded data, dump this here. |
| 544 if (image_data->decode.data()) { | 468 if (image_data->decode.data()) { |
| 545 std::string discardable_dump_name = base::StringPrintf( | 469 std::string discardable_dump_name = base::StringPrintf( |
| 546 "cc/image_memory/controller_0x%" PRIXPTR "/discardable/image_%d", | 470 "cc/image_memory/controller_0x%" PRIXPTR "/discardable/image_%d", |
| 547 reinterpret_cast<uintptr_t>(this), image_id); | 471 reinterpret_cast<uintptr_t>(this), image_id); |
| 548 base::trace_event::MemoryAllocatorDump* dump = | 472 base::trace_event::MemoryAllocatorDump* dump = |
| 549 image_data->decode.data()->CreateMemoryAllocatorDump( | 473 image_data->decode.data()->CreateMemoryAllocatorDump( |
| 550 discardable_dump_name.c_str(), pmd); | 474 discardable_dump_name.c_str(), pmd); |
| 475 |
| 551 // If our image is locked, dump the "locked_size" as an additional column. | 476 // If our image is locked, dump the "locked_size" as an additional column. |
| 552 // This lets us see the amount of discardable which is contributing to | 477 // This lets us see the amount of discardable which is contributing to |
| 553 // memory pressure. | 478 // memory pressure. |
| 554 if (image_data->decode.is_locked()) { | 479 if (image_data->decode.is_locked()) { |
| 555 dump->AddScalar("locked_size", | 480 dump->AddScalar("locked_size", |
| 556 base::trace_event::MemoryAllocatorDump::kUnitsBytes, | 481 base::trace_event::MemoryAllocatorDump::kUnitsBytes, |
| 557 image_data->size); | 482 image_data->size); |
| 558 } | 483 } |
| 559 } | 484 } |
| 560 | 485 |
| (...skipping 27 matching lines...) Expand all Loading... |
| 588 pmd->CreateSharedGlobalAllocatorDump(guid); | 513 pmd->CreateSharedGlobalAllocatorDump(guid); |
| 589 pmd->AddOwnershipEdge(dump->guid(), guid, kImportance); | 514 pmd->AddOwnershipEdge(dump->guid(), guid, kImportance); |
| 590 } | 515 } |
| 591 } | 516 } |
| 592 | 517 |
| 593 return true; | 518 return true; |
| 594 } | 519 } |
| 595 | 520 |
| 596 void GpuImageDecodeController::DecodeImage(const DrawImage& draw_image) { | 521 void GpuImageDecodeController::DecodeImage(const DrawImage& draw_image) { |
| 597 base::AutoLock lock(lock_); | 522 base::AutoLock lock(lock_); |
| 598 ImageData* image_data = GetImageDataForDrawImage(draw_image); | 523 auto found = image_data_.Peek(draw_image.image()->uniqueID()); |
| 599 DCHECK(image_data); | 524 DCHECK(found != image_data_.end()); |
| 600 DCHECK(!image_data->is_at_raster); | 525 DCHECK(!found->second->is_at_raster); |
| 601 DecodeImageIfNecessary(draw_image, image_data); | 526 DecodeImageIfNecessary(draw_image, found->second.get()); |
| 602 } | 527 } |
| 603 | 528 |
| 604 void GpuImageDecodeController::UploadImage(const DrawImage& draw_image) { | 529 void GpuImageDecodeController::UploadImage(const DrawImage& draw_image) { |
| 605 ContextProvider::ScopedContextLock context_lock(context_); | 530 ContextProvider::ScopedContextLock context_lock(context_); |
| 606 base::AutoLock lock(lock_); | 531 base::AutoLock lock(lock_); |
| 607 ImageData* image_data = GetImageDataForDrawImage(draw_image); | 532 auto found = image_data_.Peek(draw_image.image()->uniqueID()); |
| 608 DCHECK(image_data); | 533 DCHECK(found != image_data_.end()); |
| 609 DCHECK(!image_data->is_at_raster); | 534 DCHECK(!found->second->is_at_raster); |
| 610 UploadImageIfNecessary(draw_image, image_data); | 535 UploadImageIfNecessary(draw_image, found->second.get()); |
| 611 } | 536 } |
| 612 | 537 |
| 613 void GpuImageDecodeController::OnImageDecodeTaskCompleted( | 538 void GpuImageDecodeController::OnImageDecodeTaskCompleted( |
| 614 const DrawImage& draw_image) { | 539 const DrawImage& draw_image) { |
| 615 base::AutoLock lock(lock_); | 540 base::AutoLock lock(lock_); |
| 616 // Decode task is complete, remove our reference to it. | 541 // Decode task is complete, remove it from our list of pending tasks. |
| 617 ImageData* image_data = GetImageDataForDrawImage(draw_image); | 542 pending_image_decode_tasks_.erase(draw_image.image()->uniqueID()); |
| 618 DCHECK(image_data); | |
| 619 DCHECK(image_data->decode.task); | |
| 620 image_data->decode.task = nullptr; | |
| 621 | 543 |
| 622 // While the decode task is active, we keep a ref on the decoded data. | 544 // While the decode task is active, we keep a ref on the decoded data. |
| 623 // Release that ref now. | 545 // Release that ref now. |
| 624 UnrefImageDecode(draw_image); | 546 UnrefImageDecode(draw_image); |
| 625 } | 547 } |
| 626 | 548 |
| 627 void GpuImageDecodeController::OnImageUploadTaskCompleted( | 549 void GpuImageDecodeController::OnImageUploadTaskCompleted( |
| 628 const DrawImage& draw_image) { | 550 const DrawImage& draw_image) { |
| 629 base::AutoLock lock(lock_); | 551 base::AutoLock lock(lock_); |
| 630 // Upload task is complete, remove our reference to it. | 552 // Upload task is complete, remove it from our list of pending tasks. |
| 631 ImageData* image_data = GetImageDataForDrawImage(draw_image); | 553 pending_image_upload_tasks_.erase(draw_image.image()->uniqueID()); |
| 632 DCHECK(image_data); | |
| 633 DCHECK(image_data->upload.task); | |
| 634 image_data->upload.task = nullptr; | |
| 635 | 554 |
| 636 // While the upload task is active, we keep a ref on both the image it will be | 555 // While the upload task is active, we keep a ref on both the image it will be |
| 637 // populating, as well as the decode it needs to populate it. Release these | 556 // populating, as well as the decode it needs to populate it. Release these |
| 638 // refs now. | 557 // refs now. |
| 639 UnrefImageDecode(draw_image); | 558 UnrefImageDecode(draw_image); |
| 640 UnrefImageInternal(draw_image); | 559 UnrefImageInternal(draw_image); |
| 641 } | 560 } |
| 642 | 561 |
| 643 // Checks if an existing image decode exists. If not, returns a task to produce | 562 // Checks if an existing image decode exists. If not, returns a task to produce |
| 644 // the requested decode. | 563 // the requested decode. |
| 645 scoped_refptr<TileTask> GpuImageDecodeController::GetImageDecodeTaskAndRef( | 564 scoped_refptr<TileTask> GpuImageDecodeController::GetImageDecodeTaskAndRef( |
| 646 const DrawImage& draw_image, | 565 const DrawImage& draw_image, |
| 647 const TracingInfo& tracing_info) { | 566 const TracingInfo& tracing_info) { |
| 648 lock_.AssertAcquired(); | 567 lock_.AssertAcquired(); |
| 649 | 568 |
| 569 const uint32_t image_id = draw_image.image()->uniqueID(); |
| 570 |
| 650 // This ref is kept alive while an upload task may need this decode. We | 571 // This ref is kept alive while an upload task may need this decode. We |
| 651 // release this ref in UploadTaskCompleted. | 572 // release this ref in UploadTaskCompleted. |
| 652 RefImageDecode(draw_image); | 573 RefImageDecode(draw_image); |
| 653 | 574 |
| 654 ImageData* image_data = GetImageDataForDrawImage(draw_image); | 575 auto found = image_data_.Peek(image_id); |
| 655 DCHECK(image_data); | 576 if (found != image_data_.end() && found->second->decode.is_locked()) { |
| 656 if (image_data->decode.is_locked()) { | |
| 657 // We should never be creating a decode task for an at raster image. | 577 // We should never be creating a decode task for an at raster image. |
| 658 DCHECK(!image_data->is_at_raster); | 578 DCHECK(!found->second->is_at_raster); |
| 659 // We should never be creating a decode for an already-uploaded image. | 579 // We should never be creating a decode for an already-uploaded image. |
| 660 DCHECK(!image_data->upload.image()); | 580 DCHECK(!found->second->upload.image()); |
| 661 return nullptr; | 581 return nullptr; |
| 662 } | 582 } |
| 663 | 583 |
| 664 // We didn't have an existing locked image, create a task to lock or decode. | 584 // We didn't have an existing locked image, create a task to lock or decode. |
| 665 scoped_refptr<TileTask>& existing_task = image_data->decode.task; | 585 scoped_refptr<TileTask>& existing_task = |
| 586 pending_image_decode_tasks_[image_id]; |
| 666 if (!existing_task) { | 587 if (!existing_task) { |
| 667 // Ref image decode and create a decode task. This ref will be released in | 588 // Ref image decode and create a decode task. This ref will be released in |
| 668 // DecodeTaskCompleted. | 589 // DecodeTaskCompleted. |
| 669 RefImageDecode(draw_image); | 590 RefImageDecode(draw_image); |
| 670 existing_task = make_scoped_refptr( | 591 existing_task = make_scoped_refptr( |
| 671 new ImageDecodeTaskImpl(this, draw_image, tracing_info)); | 592 new ImageDecodeTaskImpl(this, draw_image, tracing_info)); |
| 672 } | 593 } |
| 673 return existing_task; | 594 return existing_task; |
| 674 } | 595 } |
| 675 | 596 |
| 676 void GpuImageDecodeController::RefImageDecode(const DrawImage& draw_image) { | 597 void GpuImageDecodeController::RefImageDecode(const DrawImage& draw_image) { |
| 677 lock_.AssertAcquired(); | 598 lock_.AssertAcquired(); |
| 678 auto found = in_use_cache_.find(GenerateInUseCacheKey(draw_image)); | 599 auto found = image_data_.Peek(draw_image.image()->uniqueID()); |
| 679 DCHECK(found != in_use_cache_.end()); | 600 DCHECK(found != image_data_.end()); |
| 680 ++found->second.ref_count; | 601 ++found->second->decode.ref_count; |
| 681 ++found->second.image_data->decode.ref_count; | 602 RefCountChanged(found->second.get()); |
| 682 OwnershipChanged(found->second.image_data.get()); | |
| 683 } | 603 } |
| 684 | 604 |
| 685 void GpuImageDecodeController::UnrefImageDecode(const DrawImage& draw_image) { | 605 void GpuImageDecodeController::UnrefImageDecode(const DrawImage& draw_image) { |
| 686 lock_.AssertAcquired(); | 606 lock_.AssertAcquired(); |
| 687 auto found = in_use_cache_.find(GenerateInUseCacheKey(draw_image)); | 607 auto found = image_data_.Peek(draw_image.image()->uniqueID()); |
| 688 DCHECK(found != in_use_cache_.end()); | 608 DCHECK(found != image_data_.end()); |
| 689 DCHECK_GT(found->second.image_data->decode.ref_count, 0u); | 609 DCHECK_GT(found->second->decode.ref_count, 0u); |
| 690 DCHECK_GT(found->second.ref_count, 0u); | 610 --found->second->decode.ref_count; |
| 691 --found->second.ref_count; | 611 RefCountChanged(found->second.get()); |
| 692 --found->second.image_data->decode.ref_count; | |
| 693 OwnershipChanged(found->second.image_data.get()); | |
| 694 if (found->second.ref_count == 0u) { | |
| 695 in_use_cache_.erase(found); | |
| 696 } | |
| 697 } | 612 } |
| 698 | 613 |
| 699 void GpuImageDecodeController::RefImage(const DrawImage& draw_image) { | 614 void GpuImageDecodeController::RefImage(const DrawImage& draw_image) { |
| 700 lock_.AssertAcquired(); | 615 lock_.AssertAcquired(); |
| 701 InUseCacheKey key = GenerateInUseCacheKey(draw_image); | 616 auto found = image_data_.Peek(draw_image.image()->uniqueID()); |
| 702 auto found = in_use_cache_.find(key); | 617 DCHECK(found != image_data_.end()); |
| 703 | 618 ++found->second->upload.ref_count; |
| 704 // If no secondary cache entry was found for the given |draw_image|, then | 619 RefCountChanged(found->second.get()); |
| 705 // the draw_image only exists in the |persistent_cache_|. Create an in-use | |
| 706 // cache entry now. | |
| 707 if (found == in_use_cache_.end()) { | |
| 708 auto found_image = persistent_cache_.Peek(draw_image.image()->uniqueID()); | |
| 709 DCHECK(found_image != persistent_cache_.end()); | |
| 710 DCHECK(found_image->second->upload_scale_mip_level <= | |
| 711 CalculateUploadScaleMipLevel(draw_image)); | |
| 712 found = in_use_cache_ | |
| 713 .insert(InUseCache::value_type( | |
| 714 key, InUseCacheEntry(found_image->second))) | |
| 715 .first; | |
| 716 } | |
| 717 | |
| 718 DCHECK(found != in_use_cache_.end()); | |
| 719 ++found->second.ref_count; | |
| 720 ++found->second.image_data->upload.ref_count; | |
| 721 OwnershipChanged(found->second.image_data.get()); | |
| 722 } | 620 } |
| 723 | 621 |
| 724 void GpuImageDecodeController::UnrefImageInternal(const DrawImage& draw_image) { | 622 void GpuImageDecodeController::UnrefImageInternal(const DrawImage& draw_image) { |
| 725 lock_.AssertAcquired(); | 623 lock_.AssertAcquired(); |
| 726 auto found = in_use_cache_.find(GenerateInUseCacheKey(draw_image)); | 624 auto found = image_data_.Peek(draw_image.image()->uniqueID()); |
| 727 DCHECK(found != in_use_cache_.end()); | 625 DCHECK(found != image_data_.end()); |
| 728 DCHECK_GT(found->second.image_data->upload.ref_count, 0u); | 626 DCHECK_GT(found->second->upload.ref_count, 0u); |
| 729 DCHECK_GT(found->second.ref_count, 0u); | 627 --found->second->upload.ref_count; |
| 730 --found->second.ref_count; | 628 if (found->second->upload.ref_count == 0) |
| 731 --found->second.image_data->upload.ref_count; | 629 found->second->upload.notify_ref_reached_zero(); |
| 732 OwnershipChanged(found->second.image_data.get()); | 630 RefCountChanged(found->second.get()); |
| 733 if (found->second.ref_count == 0u) { | |
| 734 in_use_cache_.erase(found); | |
| 735 } | |
| 736 } | 631 } |
| 737 | 632 |
| 738 // Called any time an image or decode ref count changes. Takes care of any | 633 // Called any time an image or decode ref count changes. Takes care of any |
| 739 // necessary memory budget book-keeping and cleanup. | 634 // necessary memory budget book-keeping and cleanup. |
| 740 void GpuImageDecodeController::OwnershipChanged(ImageData* image_data) { | 635 void GpuImageDecodeController::RefCountChanged(ImageData* image_data) { |
| 741 lock_.AssertAcquired(); | 636 lock_.AssertAcquired(); |
| 742 | 637 |
| 743 bool has_any_refs = | 638 bool has_any_refs = |
| 744 image_data->upload.ref_count > 0 || image_data->decode.ref_count > 0; | 639 image_data->upload.ref_count > 0 || image_data->decode.ref_count > 0; |
| 745 | 640 |
| 746 // Don't keep around orphaned images. | |
| 747 if (image_data->is_orphaned && !has_any_refs) { | |
| 748 images_pending_deletion_.push_back(std::move(image_data->upload.image())); | |
| 749 image_data->upload.SetImage(nullptr); | |
| 750 } | |
| 751 | |
| 752 // Don't keep CPU images if they are unused, these images can be recreated by | 641 // Don't keep CPU images if they are unused, these images can be recreated by |
| 753 // re-locking discardable (rather than requiring a full upload like GPU | 642 // re-locking discardable (rather than requiring a full upload like GPU |
| 754 // images). | 643 // images). |
| 755 if (image_data->mode == DecodedDataMode::CPU && !has_any_refs) { | 644 if (image_data->mode == DecodedDataMode::CPU && !has_any_refs) { |
| 756 images_pending_deletion_.push_back(image_data->upload.image()); | 645 images_pending_deletion_.push_back(image_data->upload.image()); |
| 757 image_data->upload.SetImage(nullptr); | 646 image_data->upload.SetImage(nullptr); |
| 758 } | 647 } |
| 759 | 648 |
| 760 if (image_data->is_at_raster && !has_any_refs) { | 649 if (image_data->is_at_raster && !has_any_refs) { |
| 761 // We have an at-raster image which has reached zero refs. If it won't fit | 650 // We have an at-raster image which has reached zero refs. If it won't fit |
| (...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 827 lock_.AssertAcquired(); | 716 lock_.AssertAcquired(); |
| 828 | 717 |
| 829 if (CanFitSize(required_size) && !ExceedsPreferredCount()) | 718 if (CanFitSize(required_size) && !ExceedsPreferredCount()) |
| 830 return true; | 719 return true; |
| 831 | 720 |
| 832 // While we are over memory or preferred item capacity, we iterate through | 721 // While we are over memory or preferred item capacity, we iterate through |
| 833 // our set of cached image data in LRU order. For each image, we can do two | 722 // our set of cached image data in LRU order. For each image, we can do two |
| 834 // things: 1) We can free the uploaded image, reducing the memory usage of | 723 // things: 1) We can free the uploaded image, reducing the memory usage of |
| 835 // the cache and 2) we can remove the entry entirely, reducing the count of | 724 // the cache and 2) we can remove the entry entirely, reducing the count of |
| 836 // elements in the cache. | 725 // elements in the cache. |
| 837 for (auto it = persistent_cache_.rbegin(); it != persistent_cache_.rend();) { | 726 for (auto it = image_data_.rbegin(); it != image_data_.rend();) { |
| 838 if (it->second->decode.ref_count != 0 || | 727 if (it->second->decode.ref_count != 0 || |
| 839 it->second->upload.ref_count != 0) { | 728 it->second->upload.ref_count != 0) { |
| 840 ++it; | 729 ++it; |
| 841 continue; | 730 continue; |
| 842 } | 731 } |
| 843 | 732 |
| 844 // Current entry has no refs. Ensure it is not locked. | 733 // Current entry has no refs. Ensure it is not locked. |
| 845 DCHECK(!it->second->decode.is_locked()); | 734 DCHECK(!it->second->decode.is_locked()); |
| 846 | 735 |
| 847 // If an image without refs is budgeted, it must have an associated image | 736 // If an image without refs is budgeted, it must have an associated image |
| 848 // upload. | 737 // upload. |
| 849 DCHECK(!it->second->upload.budgeted || it->second->upload.image()); | 738 DCHECK(!it->second->upload.budgeted || it->second->upload.image()); |
| 850 | 739 |
| 851 // Free the uploaded image if possible. | 740 // Free the uploaded image if possible. |
| 852 if (it->second->upload.image()) { | 741 if (it->second->upload.image()) { |
| 853 DCHECK(it->second->upload.budgeted); | 742 DCHECK(it->second->upload.budgeted); |
| 854 DCHECK_GE(bytes_used_, it->second->size); | 743 DCHECK_GE(bytes_used_, it->second->size); |
| 855 bytes_used_ -= it->second->size; | 744 bytes_used_ -= it->second->size; |
| 856 images_pending_deletion_.push_back(it->second->upload.image()); | 745 images_pending_deletion_.push_back(it->second->upload.image()); |
| 857 it->second->upload.SetImage(nullptr); | 746 it->second->upload.SetImage(nullptr); |
| 858 it->second->upload.budgeted = false; | 747 it->second->upload.budgeted = false; |
| 859 } | 748 } |
| 860 | 749 |
| 861 // Free the entire entry if necessary. | 750 // Free the entire entry if necessary. |
| 862 if (ExceedsPreferredCount()) { | 751 if (ExceedsPreferredCount()) { |
| 863 it = persistent_cache_.Erase(it); | 752 it = image_data_.Erase(it); |
| 864 } else { | 753 } else { |
| 865 ++it; | 754 ++it; |
| 866 } | 755 } |
| 867 | 756 |
| 868 if (CanFitSize(required_size) && !ExceedsPreferredCount()) | 757 if (CanFitSize(required_size) && !ExceedsPreferredCount()) |
| 869 return true; | 758 return true; |
| 870 } | 759 } |
| 871 | 760 |
| 872 // Preferred count is only used as a guideline when triming the cache. Allow | 761 // Preferred count is only used as a guideline when triming the cache. Allow |
| 873 // new elements to be added as long as we are below our size limit. | 762 // new elements to be added as long as we are below our size limit. |
| 874 return CanFitSize(required_size); | 763 return CanFitSize(required_size); |
| 875 } | 764 } |
| 876 | 765 |
| 877 bool GpuImageDecodeController::CanFitSize(size_t size) const { | 766 bool GpuImageDecodeController::CanFitSize(size_t size) const { |
| 878 lock_.AssertAcquired(); | 767 lock_.AssertAcquired(); |
| 879 | 768 |
| 880 base::CheckedNumeric<uint32_t> new_size(bytes_used_); | 769 base::CheckedNumeric<uint32_t> new_size(bytes_used_); |
| 881 new_size += size; | 770 new_size += size; |
| 882 return new_size.IsValid() && new_size.ValueOrDie() <= cached_bytes_limit_; | 771 return new_size.IsValid() && new_size.ValueOrDie() <= cached_bytes_limit_; |
| 883 } | 772 } |
| 884 | 773 |
| 885 bool GpuImageDecodeController::ExceedsPreferredCount() const { | 774 bool GpuImageDecodeController::ExceedsPreferredCount() const { |
| 886 lock_.AssertAcquired(); | 775 lock_.AssertAcquired(); |
| 887 | 776 |
| 888 return persistent_cache_.size() > cached_items_limit_; | 777 return image_data_.size() > cached_items_limit_; |
| 889 } | 778 } |
| 890 | 779 |
| 891 void GpuImageDecodeController::DecodeImageIfNecessary( | 780 void GpuImageDecodeController::DecodeImageIfNecessary( |
| 892 const DrawImage& draw_image, | 781 const DrawImage& draw_image, |
| 893 ImageData* image_data) { | 782 ImageData* image_data) { |
| 894 lock_.AssertAcquired(); | 783 lock_.AssertAcquired(); |
| 895 | 784 |
| 896 DCHECK_GT(image_data->decode.ref_count, 0u); | 785 DCHECK_GT(image_data->decode.ref_count, 0u); |
| 897 | 786 |
| 898 if (image_data->decode.decode_failure) { | 787 if (image_data->decode.decode_failure) { |
| (...skipping 16 matching lines...) Expand all Loading... |
| 915 | 804 |
| 916 image_data->decode.ResetData(); | 805 image_data->decode.ResetData(); |
| 917 std::unique_ptr<base::DiscardableMemory> backing_memory; | 806 std::unique_ptr<base::DiscardableMemory> backing_memory; |
| 918 { | 807 { |
| 919 base::AutoUnlock unlock(lock_); | 808 base::AutoUnlock unlock(lock_); |
| 920 switch (image_data->mode) { | 809 switch (image_data->mode) { |
| 921 case DecodedDataMode::CPU: { | 810 case DecodedDataMode::CPU: { |
| 922 backing_memory = | 811 backing_memory = |
| 923 base::DiscardableMemoryAllocator::GetInstance() | 812 base::DiscardableMemoryAllocator::GetInstance() |
| 924 ->AllocateLockedDiscardableMemory(image_data->size); | 813 ->AllocateLockedDiscardableMemory(image_data->size); |
| 925 SkImageInfo image_info = CreateImageInfoForDrawImage( | 814 SkImageInfo image_info = CreateImageInfoForDrawImage(draw_image); |
| 926 draw_image, image_data->upload_scale_mip_level); | 815 if (!draw_image.image()->readPixels(image_info, backing_memory->data(), |
| 927 // In order to match GPU scaling quality (which uses mip-maps at high | 816 image_info.minRowBytes(), 0, 0, |
| 928 // quality), we want to use at most medium filter quality for the | 817 SkImage::kDisallow_CachingHint)) { |
| 929 // scale. | |
| 930 SkPixmap image_pixmap(image_info, backing_memory->data(), | |
| 931 image_info.minRowBytes()); | |
| 932 // Note that scalePixels falls back to readPixels if the sale is 1x, so | |
| 933 // no need to special case that as an optimization. | |
| 934 if (!draw_image.image()->scalePixels( | |
| 935 image_pixmap, CalculateUploadScaleFilterQuality(draw_image), | |
| 936 SkImage::kDisallow_CachingHint)) { | |
| 937 backing_memory.reset(); | 818 backing_memory.reset(); |
| 938 } | 819 } |
| 939 break; | 820 break; |
| 940 } | 821 } |
| 941 case DecodedDataMode::GPU: { | 822 case DecodedDataMode::GPU: { |
| 942 backing_memory = | 823 backing_memory = |
| 943 base::DiscardableMemoryAllocator::GetInstance() | 824 base::DiscardableMemoryAllocator::GetInstance() |
| 944 ->AllocateLockedDiscardableMemory(image_data->size); | 825 ->AllocateLockedDiscardableMemory(image_data->size); |
| 945 auto params = | 826 auto params = ParamsFromDrawImage(draw_image); |
| 946 ParamsFromDrawImage(draw_image, image_data->upload_scale_mip_level); | |
| 947 if (!draw_image.image()->getDeferredTextureImageData( | 827 if (!draw_image.image()->getDeferredTextureImageData( |
| 948 *context_threadsafe_proxy_.get(), ¶ms, 1, | 828 *context_threadsafe_proxy_.get(), ¶ms, 1, |
| 949 backing_memory->data())) { | 829 backing_memory->data())) { |
| 950 backing_memory.reset(); | 830 backing_memory.reset(); |
| 951 } | 831 } |
| 952 break; | 832 break; |
| 953 } | 833 } |
| 954 } | 834 } |
| 955 } | 835 } |
| 956 | 836 |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 992 // We are about to upload a new image and are holding the context lock. | 872 // We are about to upload a new image and are holding the context lock. |
| 993 // Ensure that any images which have been marked for deletion are actually | 873 // Ensure that any images which have been marked for deletion are actually |
| 994 // cleaned up so we don't exceed our memory limit during this upload. | 874 // cleaned up so we don't exceed our memory limit during this upload. |
| 995 DeletePendingImages(); | 875 DeletePendingImages(); |
| 996 | 876 |
| 997 sk_sp<SkImage> uploaded_image; | 877 sk_sp<SkImage> uploaded_image; |
| 998 { | 878 { |
| 999 base::AutoUnlock unlock(lock_); | 879 base::AutoUnlock unlock(lock_); |
| 1000 switch (image_data->mode) { | 880 switch (image_data->mode) { |
| 1001 case DecodedDataMode::CPU: { | 881 case DecodedDataMode::CPU: { |
| 1002 SkImageInfo image_info = CreateImageInfoForDrawImage( | 882 SkImageInfo image_info = CreateImageInfoForDrawImage(draw_image); |
| 1003 draw_image, image_data->upload_scale_mip_level); | |
| 1004 SkPixmap pixmap(image_info, image_data->decode.data()->data(), | 883 SkPixmap pixmap(image_info, image_data->decode.data()->data(), |
| 1005 image_info.minRowBytes()); | 884 image_info.minRowBytes()); |
| 1006 uploaded_image = | 885 uploaded_image = |
| 1007 SkImage::MakeFromRaster(pixmap, [](const void*, void*) {}, nullptr); | 886 SkImage::MakeFromRaster(pixmap, [](const void*, void*) {}, nullptr); |
| 1008 break; | 887 break; |
| 1009 } | 888 } |
| 1010 case DecodedDataMode::GPU: { | 889 case DecodedDataMode::GPU: { |
| 1011 uploaded_image = SkImage::MakeFromDeferredTextureImageData( | 890 uploaded_image = SkImage::MakeFromDeferredTextureImageData( |
| 1012 context_->GrContext(), image_data->decode.data()->data(), | 891 context_->GrContext(), image_data->decode.data()->data(), |
| 1013 SkBudgeted::kNo); | 892 SkBudgeted::kNo); |
| 1014 break; | 893 break; |
| 1015 } | 894 } |
| 1016 } | 895 } |
| 1017 } | 896 } |
| 1018 image_data->decode.mark_used(); | 897 image_data->decode.mark_used(); |
| 1019 DCHECK(uploaded_image); | 898 DCHECK(uploaded_image); |
| 1020 | 899 |
| 1021 // At-raster may have decoded this while we were unlocked. If so, ignore our | 900 // At-raster may have decoded this while we were unlocked. If so, ignore our |
| 1022 // result. | 901 // result. |
| 1023 if (!image_data->upload.image()) | 902 if (!image_data->upload.image()) |
| 1024 image_data->upload.SetImage(std::move(uploaded_image)); | 903 image_data->upload.SetImage(std::move(uploaded_image)); |
| 1025 } | 904 } |
| 1026 | 905 |
| 1027 scoped_refptr<GpuImageDecodeController::ImageData> | 906 std::unique_ptr<GpuImageDecodeController::ImageData> |
| 1028 GpuImageDecodeController::CreateImageData(const DrawImage& draw_image) { | 907 GpuImageDecodeController::CreateImageData(const DrawImage& draw_image) { |
| 1029 lock_.AssertAcquired(); | 908 lock_.AssertAcquired(); |
| 1030 | 909 |
| 1031 DecodedDataMode mode; | 910 DecodedDataMode mode; |
| 1032 int upload_scale_mip_level = CalculateUploadScaleMipLevel(draw_image); | 911 SkImageInfo info = CreateImageInfoForDrawImage(draw_image); |
| 1033 SkImage::DeferredTextureImageUsageParams params = | 912 SkImage::DeferredTextureImageUsageParams params = |
| 1034 ParamsFromDrawImage(draw_image, upload_scale_mip_level); | 913 ParamsFromDrawImage(draw_image); |
| 1035 size_t data_size = draw_image.image()->getDeferredTextureImageData( | 914 size_t data_size = draw_image.image()->getDeferredTextureImageData( |
| 1036 *context_threadsafe_proxy_.get(), ¶ms, 1, nullptr); | 915 *context_threadsafe_proxy_.get(), ¶ms, 1, nullptr); |
| 1037 | 916 |
| 1038 if (data_size == 0) { | 917 if (data_size == 0) { |
| 1039 // Can't upload image, too large or other failure. Try to use SW fallback. | 918 // Can't upload image, too large or other failure. Try to use SW fallback. |
| 1040 SkImageInfo image_info = | 919 data_size = info.getSafeSize(info.minRowBytes()); |
| 1041 CreateImageInfoForDrawImage(draw_image, upload_scale_mip_level); | |
| 1042 data_size = image_info.getSafeSize(image_info.minRowBytes()); | |
| 1043 mode = DecodedDataMode::CPU; | 920 mode = DecodedDataMode::CPU; |
| 1044 } else { | 921 } else { |
| 1045 mode = DecodedDataMode::GPU; | 922 mode = DecodedDataMode::GPU; |
| 1046 } | 923 } |
| 1047 | 924 |
| 1048 return make_scoped_refptr( | 925 return base::WrapUnique(new ImageData(mode, data_size)); |
| 1049 new ImageData(mode, data_size, upload_scale_mip_level, | |
| 1050 CalculateUploadScaleFilterQuality(draw_image))); | |
| 1051 } | 926 } |
| 1052 | 927 |
| 1053 void GpuImageDecodeController::DeletePendingImages() { | 928 void GpuImageDecodeController::DeletePendingImages() { |
| 1054 context_->GetLock()->AssertAcquired(); | 929 context_->GetLock()->AssertAcquired(); |
| 1055 lock_.AssertAcquired(); | 930 lock_.AssertAcquired(); |
| 1056 images_pending_deletion_.clear(); | 931 images_pending_deletion_.clear(); |
| 1057 } | 932 } |
| 1058 | 933 |
| 1059 SkImageInfo GpuImageDecodeController::CreateImageInfoForDrawImage( | 934 SkImageInfo GpuImageDecodeController::CreateImageInfoForDrawImage( |
| 1060 const DrawImage& draw_image, | 935 const DrawImage& draw_image) const { |
| 1061 int upload_scale_mip_level) const { | 936 return SkImageInfo::Make( |
| 1062 gfx::Size mip_size = | 937 draw_image.image()->width(), draw_image.image()->height(), |
| 1063 CalculateSizeForMipLevel(draw_image, upload_scale_mip_level); | 938 ResourceFormatToClosestSkColorType(format_), kPremul_SkAlphaType); |
| 1064 return SkImageInfo::Make(mip_size.width(), mip_size.height(), | |
| 1065 ResourceFormatToClosestSkColorType(format_), | |
| 1066 kPremul_SkAlphaType); | |
| 1067 } | |
| 1068 | |
| 1069 // Tries to find an ImageData that can be used to draw the provided | |
| 1070 // |draw_image|. First looks for an exact entry in our |in_use_cache_|. If one | |
| 1071 // cannot be found, it looks for a compatible entry in our |persistent_cache_|. | |
| 1072 GpuImageDecodeController::ImageData* | |
| 1073 GpuImageDecodeController::GetImageDataForDrawImage( | |
| 1074 const DrawImage& draw_image) { | |
| 1075 lock_.AssertAcquired(); | |
| 1076 auto found_in_use = in_use_cache_.find(GenerateInUseCacheKey(draw_image)); | |
| 1077 if (found_in_use != in_use_cache_.end()) | |
| 1078 return found_in_use->second.image_data.get(); | |
| 1079 | |
| 1080 auto found_persistent = persistent_cache_.Get(draw_image.image()->uniqueID()); | |
| 1081 if (found_persistent != persistent_cache_.end()) { | |
| 1082 ImageData* image_data = found_persistent->second.get(); | |
| 1083 if (IsCompatible(image_data, draw_image)) { | |
| 1084 return image_data; | |
| 1085 } else { | |
| 1086 found_persistent->second->is_orphaned = true; | |
| 1087 // Call OwnershipChanged before erasing the orphaned task from the | |
| 1088 // persistent cache. This ensures that if the orphaned task has 0 | |
| 1089 // references, it is cleaned up safely before it is deleted. | |
| 1090 OwnershipChanged(image_data); | |
| 1091 persistent_cache_.Erase(found_persistent); | |
| 1092 } | |
| 1093 } | |
| 1094 | |
| 1095 return nullptr; | |
| 1096 } | |
| 1097 | |
| 1098 // Determines if we can draw the provided |draw_image| using the provided | |
| 1099 // |image_data|. This is true if the |image_data| is not scaled, or if it | |
| 1100 // is scaled at an equal or larger scale and equal or larger quality to | |
| 1101 // the provided |draw_image|. | |
| 1102 bool GpuImageDecodeController::IsCompatible(const ImageData* image_data, | |
| 1103 const DrawImage& draw_image) const { | |
| 1104 bool is_scaled = image_data->upload_scale_mip_level != 0; | |
| 1105 bool scale_is_compatible = CalculateUploadScaleMipLevel(draw_image) >= | |
| 1106 image_data->upload_scale_mip_level; | |
| 1107 bool quality_is_compatible = CalculateUploadScaleFilterQuality(draw_image) <= | |
| 1108 image_data->upload_scale_filter_quality; | |
| 1109 return !is_scaled || (scale_is_compatible && quality_is_compatible); | |
| 1110 } | |
| 1111 | |
| 1112 size_t GpuImageDecodeController::GetDrawImageSizeForTesting( | |
| 1113 const DrawImage& image) { | |
| 1114 base::AutoLock lock(lock_); | |
| 1115 scoped_refptr<ImageData> data = CreateImageData(image); | |
| 1116 return data->size; | |
| 1117 } | 939 } |
| 1118 | 940 |
| 1119 void GpuImageDecodeController::SetImageDecodingFailedForTesting( | 941 void GpuImageDecodeController::SetImageDecodingFailedForTesting( |
| 1120 const DrawImage& image) { | 942 const DrawImage& image) { |
| 1121 base::AutoLock lock(lock_); | 943 base::AutoLock lock(lock_); |
| 1122 auto found = persistent_cache_.Peek(image.image()->uniqueID()); | 944 auto found = image_data_.Peek(image.image()->uniqueID()); |
| 1123 DCHECK(found != persistent_cache_.end()); | 945 DCHECK(found != image_data_.end()); |
| 1124 ImageData* image_data = found->second.get(); | 946 ImageData* image_data = found->second.get(); |
| 1125 image_data->decode.decode_failure = true; | 947 image_data->decode.decode_failure = true; |
| 1126 } | 948 } |
| 1127 | 949 |
| 1128 bool GpuImageDecodeController::DiscardableIsLockedForTesting( | 950 bool GpuImageDecodeController::DiscardableIsLockedForTesting( |
| 1129 const DrawImage& image) { | 951 const DrawImage& image) { |
| 1130 base::AutoLock lock(lock_); | 952 base::AutoLock lock(lock_); |
| 1131 auto found = persistent_cache_.Peek(image.image()->uniqueID()); | 953 auto found = image_data_.Peek(image.image()->uniqueID()); |
| 1132 DCHECK(found != persistent_cache_.end()); | 954 DCHECK(found != image_data_.end()); |
| 1133 ImageData* image_data = found->second.get(); | 955 ImageData* image_data = found->second.get(); |
| 1134 return image_data->decode.is_locked(); | 956 return image_data->decode.is_locked(); |
| 1135 } | 957 } |
| 1136 | 958 |
| 1137 } // namespace cc | 959 } // namespace cc |
| OLD | NEW |