Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(27)

Side by Side Diff: cc/tiles/gpu_image_decode_controller.cc

Issue 2042133002: Add display-resolution caching to GPU IDC (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@drt
Patch Set: Add display-resolution caching to GPU IDC Created 4 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2016 The Chromium Authors. All rights reserved. 1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "cc/tiles/gpu_image_decode_controller.h" 5 #include "cc/tiles/gpu_image_decode_controller.h"
6 6
7 #include <inttypes.h> 7 #include <inttypes.h>
8 8
9 #include "base/memory/discardable_memory_allocator.h" 9 #include "base/memory/discardable_memory_allocator.h"
10 #include "base/memory/ptr_util.h" 10 #include "base/memory/ptr_util.h"
11 #include "base/metrics/histogram_macros.h" 11 #include "base/metrics/histogram_macros.h"
12 #include "base/numerics/safe_math.h" 12 #include "base/numerics/safe_math.h"
13 #include "base/strings/stringprintf.h" 13 #include "base/strings/stringprintf.h"
14 #include "base/threading/thread_task_runner_handle.h" 14 #include "base/threading/thread_task_runner_handle.h"
15 #include "cc/debug/devtools_instrumentation.h" 15 #include "cc/debug/devtools_instrumentation.h"
16 #include "cc/output/context_provider.h" 16 #include "cc/output/context_provider.h"
17 #include "cc/raster/tile_task.h" 17 #include "cc/raster/tile_task.h"
18 #include "cc/resources/resource_format_utils.h" 18 #include "cc/resources/resource_format_utils.h"
19 #include "cc/tiles/mipmap_util.h"
19 #include "gpu/command_buffer/client/context_support.h" 20 #include "gpu/command_buffer/client/context_support.h"
20 #include "gpu/command_buffer/client/gles2_interface.h" 21 #include "gpu/command_buffer/client/gles2_interface.h"
21 #include "gpu_image_decode_controller.h" 22 #include "gpu_image_decode_controller.h"
22 #include "skia/ext/texture_handle.h" 23 #include "skia/ext/texture_handle.h"
23 #include "third_party/skia/include/core/SkCanvas.h" 24 #include "third_party/skia/include/core/SkCanvas.h"
24 #include "third_party/skia/include/core/SkRefCnt.h" 25 #include "third_party/skia/include/core/SkRefCnt.h"
25 #include "third_party/skia/include/core/SkSurface.h" 26 #include "third_party/skia/include/core/SkSurface.h"
26 #include "third_party/skia/include/gpu/GrContext.h" 27 #include "third_party/skia/include/gpu/GrContext.h"
27 #include "third_party/skia/include/gpu/GrTexture.h" 28 #include "third_party/skia/include/gpu/GrTexture.h"
28 #include "ui/gfx/skia_util.h" 29 #include "ui/gfx/skia_util.h"
(...skipping 12 matching lines...) Expand all
41 if (std::abs(draw_image.scale().width()) < 42 if (std::abs(draw_image.scale().width()) <
42 std::numeric_limits<float>::epsilon() || 43 std::numeric_limits<float>::epsilon() ||
43 std::abs(draw_image.scale().height()) < 44 std::abs(draw_image.scale().height()) <
44 std::numeric_limits<float>::epsilon()) { 45 std::numeric_limits<float>::epsilon()) {
45 return true; 46 return true;
46 } 47 }
47 return false; 48 return false;
48 } 49 }
49 50
50 SkImage::DeferredTextureImageUsageParams ParamsFromDrawImage( 51 SkImage::DeferredTextureImageUsageParams ParamsFromDrawImage(
51 const DrawImage& draw_image) { 52 const DrawImage& draw_image,
53 int upload_scale_mip_level) {
52 SkImage::DeferredTextureImageUsageParams params; 54 SkImage::DeferredTextureImageUsageParams params;
53 params.fMatrix = draw_image.matrix(); 55 params.fMatrix = draw_image.matrix();
54 params.fQuality = draw_image.filter_quality(); 56 params.fQuality = draw_image.filter_quality();
57 params.fPreScaleMipLevel = upload_scale_mip_level;
55 58
56 return params; 59 return params;
57 } 60 }
58 61
62 // Calculate the mip level to upload-scale the image to before uploading. We use
63 // mip levels rather than exact scales to increase re-use of scaled images.
64 int CalculateUploadScaleMipLevel(const DrawImage& draw_image) {
65 // Images which are being clipped will have color-bleeding if scaled.
66 // TODO(ericrk): Investigate uploading clipped images to handle this case and
67 // provide further optimization. crbug.com/620899
68 if (draw_image.src_rect() != draw_image.image()->bounds()) {
vmpstr 2016/06/22 21:33:34 nit: no braces
ericrk 2016/06/23 18:16:55 Done.
69 return 0;
70 }
71
72 gfx::Size base_size(draw_image.image()->width(),
73 draw_image.image()->height());
74 // Ceil our scaled size so that the mip map generated is guaranteed to be
75 // larger.
76 gfx::Size scaled_size = gfx::ScaleToCeiledSize(
77 base_size, draw_image.scale().width(), draw_image.scale().height());
78
79 return MipMapUtil::GetLevelForSize(base_size, scaled_size);
80 }
81
82 // Calculates the scale factor which can be used to scale an image to a given
83 // mip level.
84 SkSize CalculateScaleFactorForMipLevel(const DrawImage& draw_image,
85 int mip_level) {
86 gfx::Size base_size(draw_image.image()->width(),
87 draw_image.image()->height());
88 return MipMapUtil::GetScaleAdjustmentForLevel(base_size, mip_level);
89 }
90
91 // Generates a uint64_t which uniquely identifies a DrawImage for the purposes
92 // of the |in_use_cache_|. The key is generated as follows:
93 // ╔══════════════════════╤═══════════╤═══════════╗
94 // ║ image_id │ mip_level │ quality ║
95 // ╚════════32═bits═══════╧══16═bits══╧══16═bits══╝
96 uint64_t GenerateInUseCacheKey(const DrawImage& draw_image) {
97 static_assert(
98 kLast_SkFilterQuality <= std::numeric_limits<uint16_t>::max(),
99 "InUseCacheKey depends on SkFilterQuality fitting in a uint16_t.");
100
101 SkFilterQuality filter_quality = draw_image.filter_quality();
102 DCHECK_LE(filter_quality, kLast_SkFilterQuality);
103
104 // An image has at most log_2(max(width, height)) mip levels, so given our
105 // usage of 32-bit sizes for images, key.mip_level is at most 31.
106 int32_t mip_level = CalculateUploadScaleMipLevel(draw_image);
107 DCHECK_LT(mip_level, 32);
108
109 return (static_cast<uint64_t>(draw_image.image()->uniqueID()) << 32) |
110 (mip_level << 16) | draw_image.filter_quality();
111 }
112
59 } // namespace 113 } // namespace
60 114
115 GpuImageDecodeController::InUseCacheEntry::InUseCacheEntry(
116 scoped_refptr<ImageData> image_data)
117 : image_data(std::move(image_data)) {}
118 GpuImageDecodeController::InUseCacheEntry::InUseCacheEntry(
119 const InUseCacheEntry&) = default;
120 GpuImageDecodeController::InUseCacheEntry::InUseCacheEntry(InUseCacheEntry&&) =
121 default;
122 GpuImageDecodeController::InUseCacheEntry::~InUseCacheEntry() = default;
123
61 // Task which decodes an image and stores the result in discardable memory. 124 // Task which decodes an image and stores the result in discardable memory.
62 // This task does not use GPU resources and can be run on any thread. 125 // This task does not use GPU resources and can be run on any thread.
63 class ImageDecodeTaskImpl : public TileTask { 126 class ImageDecodeTaskImpl : public TileTask {
64 public: 127 public:
65 ImageDecodeTaskImpl(GpuImageDecodeController* controller, 128 ImageDecodeTaskImpl(GpuImageDecodeController* controller,
66 const DrawImage& draw_image, 129 const DrawImage& draw_image,
67 const ImageDecodeController::TracingInfo& tracing_info) 130 const ImageDecodeController::TracingInfo& tracing_info)
68 : TileTask(true), 131 : TileTask(true),
69 controller_(controller), 132 controller_(controller),
70 image_(draw_image), 133 image_(draw_image),
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after
170 233
171 void GpuImageDecodeController::DecodedImageData::ResetData() { 234 void GpuImageDecodeController::DecodedImageData::ResetData() {
172 DCHECK(!is_locked_); 235 DCHECK(!is_locked_);
173 if (data_) 236 if (data_)
174 ReportUsageStats(); 237 ReportUsageStats();
175 data_ = nullptr; 238 data_ = nullptr;
176 usage_stats_ = UsageStats(); 239 usage_stats_ = UsageStats();
177 } 240 }
178 241
179 void GpuImageDecodeController::DecodedImageData::ReportUsageStats() const { 242 void GpuImageDecodeController::DecodedImageData::ReportUsageStats() const {
180 // lock_count | used | result state 243 // lock_count used result state
181 // ===========+=======+================== 244 // ═══════════╪═══════╪══════════════════
182 // 1 | false | WASTED_ONCE 245 // 1 false WASTED_ONCE
183 // 1 | true | USED_ONCE 246 // 1 true USED_ONCE
184 // >1 | false | WASTED_RELOCKED 247 // >1 false WASTED_RELOCKED
185 // >1 | true | USED_RELOCKED 248 // >1 true USED_RELOCKED
186 // Note that it's important not to reorder the following enums, since the 249 // Note that it's important not to reorder the following enums, since the
187 // numerical values are used in the histogram code. 250 // numerical values are used in the histogram code.
188 enum State : int { 251 enum State : int {
189 DECODED_IMAGE_STATE_WASTED_ONCE, 252 DECODED_IMAGE_STATE_WASTED_ONCE,
190 DECODED_IMAGE_STATE_USED_ONCE, 253 DECODED_IMAGE_STATE_USED_ONCE,
191 DECODED_IMAGE_STATE_WASTED_RELOCKED, 254 DECODED_IMAGE_STATE_WASTED_RELOCKED,
192 DECODED_IMAGE_STATE_USED_RELOCKED, 255 DECODED_IMAGE_STATE_USED_RELOCKED,
193 DECODED_IMAGE_STATE_COUNT 256 DECODED_IMAGE_STATE_COUNT
194 } state = DECODED_IMAGE_STATE_WASTED_ONCE; 257 } state = DECODED_IMAGE_STATE_WASTED_ONCE;
195 258
(...skipping 30 matching lines...) Expand all
226 image_ = std::move(image); 289 image_ = std::move(image);
227 } 290 }
228 291
229 void GpuImageDecodeController::UploadedImageData::ReportUsageStats() const { 292 void GpuImageDecodeController::UploadedImageData::ReportUsageStats() const {
230 UMA_HISTOGRAM_BOOLEAN("Renderer4.GpuImageUploadState.Used", 293 UMA_HISTOGRAM_BOOLEAN("Renderer4.GpuImageUploadState.Used",
231 usage_stats_.used); 294 usage_stats_.used);
232 UMA_HISTOGRAM_BOOLEAN("Renderer4.GpuImageUploadState.FirstRefWasted", 295 UMA_HISTOGRAM_BOOLEAN("Renderer4.GpuImageUploadState.FirstRefWasted",
233 usage_stats_.first_ref_wasted); 296 usage_stats_.first_ref_wasted);
234 } 297 }
235 298
236 GpuImageDecodeController::ImageData::ImageData(DecodedDataMode mode, 299 GpuImageDecodeController::ImageData::ImageData(
237 size_t size) 300 DecodedDataMode mode,
238 : mode(mode), size(size) {} 301 size_t size,
302 int upload_scale_mip_level,
303 SkFilterQuality upload_scale_filter_quality)
304 : mode(mode),
305 size(size),
306 upload_scale_mip_level(upload_scale_mip_level),
307 upload_scale_filter_quality(upload_scale_filter_quality) {}
239 308
240 GpuImageDecodeController::ImageData::~ImageData() = default; 309 GpuImageDecodeController::ImageData::~ImageData() {
310 // We should never delete ImageData while it is in use or before it has been
311 // cleaned up.
312 DCHECK_EQ(0u, upload.ref_count);
313 DCHECK_EQ(0u, decode.ref_count);
314 DCHECK_EQ(false, decode.is_locked());
315 // This should always be cleaned up before deleting the image, as it needs to
316 // be freed with the GL context lock held.
317 DCHECK(!upload.image());
318 }
241 319
242 GpuImageDecodeController::GpuImageDecodeController(ContextProvider* context, 320 GpuImageDecodeController::GpuImageDecodeController(ContextProvider* context,
243 ResourceFormat decode_format, 321 ResourceFormat decode_format,
244 size_t max_gpu_image_bytes) 322 size_t max_gpu_image_bytes)
245 : format_(decode_format), 323 : format_(decode_format),
246 context_(context), 324 context_(context),
247 image_data_(ImageDataMRUCache::NO_AUTO_EVICT), 325 persistent_cache_(ImageDataMRUCache::NO_AUTO_EVICT),
248 cached_items_limit_(kMaxDiscardableItems), 326 cached_items_limit_(kMaxDiscardableItems),
249 cached_bytes_limit_(max_gpu_image_bytes), 327 cached_bytes_limit_(max_gpu_image_bytes),
250 bytes_used_(0), 328 bytes_used_(0),
251 max_gpu_image_bytes_(max_gpu_image_bytes) { 329 max_gpu_image_bytes_(max_gpu_image_bytes) {
252 // Acquire the context_lock so that we can safely retrieve the 330 // Acquire the context_lock so that we can safely retrieve the
253 // GrContextThreadSafeProxy. This proxy can then be used with no lock held. 331 // GrContextThreadSafeProxy. This proxy can then be used with no lock held.
254 { 332 {
255 ContextProvider::ScopedContextLock context_lock(context_); 333 ContextProvider::ScopedContextLock context_lock(context_);
256 context_threadsafe_proxy_ = sk_sp<GrContextThreadSafeProxy>( 334 context_threadsafe_proxy_ = sk_sp<GrContextThreadSafeProxy>(
257 context->GrContext()->threadSafeProxy()); 335 context->GrContext()->threadSafeProxy());
(...skipping 22 matching lines...) Expand all
280 const DrawImage& draw_image, 358 const DrawImage& draw_image,
281 const TracingInfo& tracing_info, 359 const TracingInfo& tracing_info,
282 scoped_refptr<TileTask>* task) { 360 scoped_refptr<TileTask>* task) {
283 if (SkipImage(draw_image)) { 361 if (SkipImage(draw_image)) {
284 *task = nullptr; 362 *task = nullptr;
285 return false; 363 return false;
286 } 364 }
287 365
288 base::AutoLock lock(lock_); 366 base::AutoLock lock(lock_);
289 const auto image_id = draw_image.image()->uniqueID(); 367 const auto image_id = draw_image.image()->uniqueID();
290 368 ImageData* image_data = GetImageDataForDrawImage(draw_image);
291 auto found = image_data_.Get(image_id); 369 if (image_data) {
292 if (found != image_data_.end()) {
293 ImageData* image_data = found->second.get();
294 if (image_data->is_at_raster) { 370 if (image_data->is_at_raster) {
295 // Image is at-raster, just return, this usage will be at-raster as well. 371 // Image is at-raster, just return, this usage will be at-raster as well.
296 *task = nullptr; 372 *task = nullptr;
297 return false; 373 return false;
298 } 374 }
299 375
300 if (image_data->decode.decode_failure) { 376 if (image_data->decode.decode_failure) {
301 // We have already tried and failed to decode this image, so just return. 377 // We have already tried and failed to decode this image, so just return.
302 *task = nullptr; 378 *task = nullptr;
303 return false; 379 return false;
304 } 380 }
305 381
306 if (image_data->upload.image()) { 382 if (image_data->upload.image()) {
307 // The image is already uploaded, ref and return. 383 // The image is already uploaded, ref and return.
308 RefImage(draw_image); 384 RefImage(draw_image);
309 *task = nullptr; 385 *task = nullptr;
310 return true; 386 return true;
311 } 387 }
312 }
313 388
314 // We didn't have a pre-uploaded image, so we need an upload task. Try to find 389 // We didn't have a pre-uploaded image, so we need an upload task. Try to
315 // an existing one. 390 // find an existing one.
316 scoped_refptr<TileTask>& existing_task = 391 if (image_data->upload.task) {
317 pending_image_upload_tasks_[image_id]; 392 // We had an existing upload task, ref the image and return the task.
318 if (existing_task) { 393 RefImage(draw_image);
319 // We had an existing upload task, ref the image and return the task. 394 *task = image_data->upload.task;
320 RefImage(draw_image); 395 return true;
321 *task = existing_task; 396 } else {
vmpstr 2016/06/22 21:33:35 nit: remove
ericrk 2016/06/23 18:16:55 Done.
322 return true; 397 }
323 } 398 }
324 399
325 // We will be creating a new upload task. If necessary, create a placeholder 400 // We will be creating a new upload task. If necessary, create a placeholder
326 // ImageData to hold the result. 401 // ImageData to hold the result.
327 std::unique_ptr<ImageData> new_data; 402 scoped_refptr<ImageData> new_data;
328 ImageData* data; 403 if (!image_data) {
vmpstr 2016/06/22 21:33:35 Can you restructure the code from 369 to 405 as:
ericrk 2016/06/23 18:16:55 good call.
329 if (found == image_data_.end()) {
330 new_data = CreateImageData(draw_image); 404 new_data = CreateImageData(draw_image);
331 data = new_data.get(); 405 image_data = new_data.get();
332 } else {
333 data = found->second.get();
334 } 406 }
335 407
336 // Ensure that the image we're about to decode/upload will fit in memory. 408 // Ensure that the image we're about to decode/upload will fit in memory.
337 if (!EnsureCapacity(data->size)) { 409 if (!EnsureCapacity(image_data->size)) {
338 // Image will not fit, do an at-raster decode. 410 // Image will not fit, do an at-raster decode.
339 *task = nullptr; 411 *task = nullptr;
340 return false; 412 return false;
341 } 413 }
342 414
343 // If we had to create new image data, add it to our map now that we know it 415 // If we had to create new image data, add it to our map now that we know it
344 // will fit. 416 // will fit.
345 if (new_data) 417 if (new_data)
346 found = image_data_.Put(image_id, std::move(new_data)); 418 persistent_cache_.Put(image_id, std::move(new_data));
347 419
348 // Ref image and create a upload and decode tasks. We will release this ref 420 // Ref image and create a upload and decode tasks. We will release this ref
349 // in UploadTaskCompleted. 421 // in UploadTaskCompleted.
350 RefImage(draw_image); 422 RefImage(draw_image);
351 existing_task = make_scoped_refptr(new ImageUploadTaskImpl( 423 *task = make_scoped_refptr(new ImageUploadTaskImpl(
352 this, draw_image, GetImageDecodeTaskAndRef(draw_image, tracing_info), 424 this, draw_image, GetImageDecodeTaskAndRef(draw_image, tracing_info),
353 tracing_info)); 425 tracing_info));
426 image_data->upload.task = *task;
354 427
355 // Ref the image again - this ref is owned by the caller, and it is their 428 // Ref the image again - this ref is owned by the caller, and it is their
356 // responsibility to release it by calling UnrefImage. 429 // responsibility to release it by calling UnrefImage.
357 RefImage(draw_image); 430 RefImage(draw_image);
358 *task = existing_task;
359 return true; 431 return true;
360 } 432 }
361 433
362 void GpuImageDecodeController::UnrefImage(const DrawImage& draw_image) { 434 void GpuImageDecodeController::UnrefImage(const DrawImage& draw_image) {
363 base::AutoLock lock(lock_); 435 base::AutoLock lock(lock_);
364 UnrefImageInternal(draw_image); 436 UnrefImageInternal(draw_image);
365 } 437 }
366 438
367 DecodedDrawImage GpuImageDecodeController::GetDecodedImageForDraw( 439 DecodedDrawImage GpuImageDecodeController::GetDecodedImageForDraw(
368 const DrawImage& draw_image) { 440 const DrawImage& draw_image) {
369 // We are being called during raster. The context lock must already be 441 // We are being called during raster. The context lock must already be
370 // acquired by the caller. 442 // acquired by the caller.
371 context_->GetLock()->AssertAcquired(); 443 context_->GetLock()->AssertAcquired();
372 444
373 if (SkipImage(draw_image)) 445 if (SkipImage(draw_image))
374 return DecodedDrawImage(nullptr, draw_image.filter_quality()); 446 return DecodedDrawImage(nullptr, draw_image.filter_quality());
375 447
376 TRACE_EVENT0("cc", "GpuImageDecodeController::GetDecodedImageForDraw"); 448 TRACE_EVENT0("cc", "GpuImageDecodeController::GetDecodedImageForDraw");
377 449
378 base::AutoLock lock(lock_); 450 base::AutoLock lock(lock_);
379 const uint32_t unique_id = draw_image.image()->uniqueID(); 451 ImageData* image_data = GetImageDataForDrawImage(draw_image);
380 auto found = image_data_.Peek(unique_id); 452 if (!image_data) {
381 if (found == image_data_.end()) {
382 // We didn't find the image, create a new entry. 453 // We didn't find the image, create a new entry.
383 auto data = CreateImageData(draw_image); 454 auto data = CreateImageData(draw_image);
384 found = image_data_.Put(unique_id, std::move(data)); 455 image_data = data.get();
456 persistent_cache_.Put(draw_image.image()->uniqueID(), std::move(data));
385 } 457 }
386 458
387 ImageData* image_data = found->second.get();
388
389 if (!image_data->upload.budgeted) { 459 if (!image_data->upload.budgeted) {
390 // If image data is not budgeted by this point, it is at-raster. 460 // If image data is not budgeted by this point, it is at-raster.
391 image_data->is_at_raster = true; 461 image_data->is_at_raster = true;
392 } 462 }
393 463
394 // Ref the image and decode so that they stay alive while we are 464 // Ref the image and decode so that they stay alive while we are
395 // decoding/uploading. 465 // decoding/uploading.
396 RefImage(draw_image); 466 RefImage(draw_image);
397 RefImageDecode(draw_image); 467 RefImageDecode(draw_image);
398 468
399 // We may or may not need to decode and upload the image we've found, the 469 // We may or may not need to decode and upload the image we've found, the
400 // following functions early-out to if we already decoded. 470 // following functions early-out to if we already decoded.
401 DecodeImageIfNecessary(draw_image, image_data); 471 DecodeImageIfNecessary(draw_image, image_data);
402 UploadImageIfNecessary(draw_image, image_data); 472 UploadImageIfNecessary(draw_image, image_data);
403 // Unref the image decode, but not the image. The image ref will be released 473 // Unref the image decode, but not the image. The image ref will be released
404 // in DrawWithImageFinished. 474 // in DrawWithImageFinished.
405 UnrefImageDecode(draw_image); 475 UnrefImageDecode(draw_image);
406 476
407 sk_sp<SkImage> image = image_data->upload.image(); 477 sk_sp<SkImage> image = image_data->upload.image();
408 image_data->upload.mark_used(); 478 image_data->upload.mark_used();
409 DCHECK(image || image_data->decode.decode_failure); 479 DCHECK(image || image_data->decode.decode_failure);
410 480
411 DecodedDrawImage decoded_draw_image(std::move(image), 481 SkSize scale_factor = CalculateScaleFactorForMipLevel(
482 draw_image, image_data->upload_scale_mip_level);
483 DecodedDrawImage decoded_draw_image(std::move(image), SkSize(), scale_factor,
412 draw_image.filter_quality()); 484 draw_image.filter_quality());
413 decoded_draw_image.set_at_raster_decode(image_data->is_at_raster); 485 decoded_draw_image.set_at_raster_decode(image_data->is_at_raster);
414 return decoded_draw_image; 486 return decoded_draw_image;
415 } 487 }
416 488
417 void GpuImageDecodeController::DrawWithImageFinished( 489 void GpuImageDecodeController::DrawWithImageFinished(
418 const DrawImage& draw_image, 490 const DrawImage& draw_image,
419 const DecodedDrawImage& decoded_draw_image) { 491 const DecodedDrawImage& decoded_draw_image) {
420 // We are being called during raster. The context lock must already be 492 // We are being called during raster. The context lock must already be
421 // acquired by the caller. 493 // acquired by the caller.
422 context_->GetLock()->AssertAcquired(); 494 context_->GetLock()->AssertAcquired();
423 495
424 if (SkipImage(draw_image)) 496 if (SkipImage(draw_image))
425 return; 497 return;
426 498
427 base::AutoLock lock(lock_); 499 base::AutoLock lock(lock_);
500 TRACE_EVENT0("cc", "GpuImageDecodeController::DrawWithImageFinished");
vmpstr 2016/06/22 21:33:35 nit: Move this before the early out
ericrk 2016/06/23 18:16:55 I also log "GetDecodedImageForDraw" after the earl
428 UnrefImageInternal(draw_image); 501 UnrefImageInternal(draw_image);
429 502
430 // We are mid-draw and holding the context lock, ensure we clean up any 503 // We are mid-draw and holding the context lock, ensure we clean up any
431 // textures (especially at-raster), which may have just been marked for 504 // textures (especially at-raster), which may have just been marked for
432 // deletion by UnrefImage. 505 // deletion by UnrefImage.
433 DeletePendingImages(); 506 DeletePendingImages();
434 } 507 }
435 508
436 void GpuImageDecodeController::ReduceCacheUsage() { 509 void GpuImageDecodeController::ReduceCacheUsage() {
437 base::AutoLock lock(lock_); 510 base::AutoLock lock(lock_);
(...skipping 15 matching lines...) Expand all
453 DeletePendingImages(); 526 DeletePendingImages();
454 } else { 527 } else {
455 base::AutoLock lock(lock_); 528 base::AutoLock lock(lock_);
456 cached_bytes_limit_ = max_gpu_image_bytes_; 529 cached_bytes_limit_ = max_gpu_image_bytes_;
457 } 530 }
458 } 531 }
459 532
460 bool GpuImageDecodeController::OnMemoryDump( 533 bool GpuImageDecodeController::OnMemoryDump(
461 const base::trace_event::MemoryDumpArgs& args, 534 const base::trace_event::MemoryDumpArgs& args,
462 base::trace_event::ProcessMemoryDump* pmd) { 535 base::trace_event::ProcessMemoryDump* pmd) {
463 for (const auto& image_pair : image_data_) { 536 for (const auto& image_pair : persistent_cache_) {
464 const ImageData* image_data = image_pair.second.get(); 537 const ImageData* image_data = image_pair.second.get();
465 const uint32_t image_id = image_pair.first; 538 const uint32_t image_id = image_pair.first;
466 539
467 // If we have discardable decoded data, dump this here. 540 // If we have discardable decoded data, dump this here.
468 if (image_data->decode.data()) { 541 if (image_data->decode.data()) {
469 std::string discardable_dump_name = base::StringPrintf( 542 std::string discardable_dump_name = base::StringPrintf(
470 "cc/image_memory/controller_0x%" PRIXPTR "/discardable/image_%d", 543 "cc/image_memory/controller_0x%" PRIXPTR "/discardable/image_%d",
471 reinterpret_cast<uintptr_t>(this), image_id); 544 reinterpret_cast<uintptr_t>(this), image_id);
472 base::trace_event::MemoryAllocatorDump* dump = 545 base::trace_event::MemoryAllocatorDump* dump =
473 image_data->decode.data()->CreateMemoryAllocatorDump( 546 image_data->decode.data()->CreateMemoryAllocatorDump(
474 discardable_dump_name.c_str(), pmd); 547 discardable_dump_name.c_str(), pmd);
475
476 // If our image is locked, dump the "locked_size" as an additional column. 548 // If our image is locked, dump the "locked_size" as an additional column.
477 // This lets us see the amount of discardable which is contributing to 549 // This lets us see the amount of discardable which is contributing to
478 // memory pressure. 550 // memory pressure.
479 if (image_data->decode.is_locked()) { 551 if (image_data->decode.is_locked()) {
480 dump->AddScalar("locked_size", 552 dump->AddScalar("locked_size",
481 base::trace_event::MemoryAllocatorDump::kUnitsBytes, 553 base::trace_event::MemoryAllocatorDump::kUnitsBytes,
482 image_data->size); 554 image_data->size);
483 } 555 }
484 } 556 }
485 557
(...skipping 27 matching lines...) Expand all
513 pmd->CreateSharedGlobalAllocatorDump(guid); 585 pmd->CreateSharedGlobalAllocatorDump(guid);
514 pmd->AddOwnershipEdge(dump->guid(), guid, kImportance); 586 pmd->AddOwnershipEdge(dump->guid(), guid, kImportance);
515 } 587 }
516 } 588 }
517 589
518 return true; 590 return true;
519 } 591 }
520 592
521 void GpuImageDecodeController::DecodeImage(const DrawImage& draw_image) { 593 void GpuImageDecodeController::DecodeImage(const DrawImage& draw_image) {
522 base::AutoLock lock(lock_); 594 base::AutoLock lock(lock_);
523 auto found = image_data_.Peek(draw_image.image()->uniqueID()); 595 ImageData* image_data = GetImageDataForDrawImage(draw_image);
524 DCHECK(found != image_data_.end()); 596 DCHECK(image_data);
525 DCHECK(!found->second->is_at_raster); 597 DCHECK(!image_data->is_at_raster);
526 DecodeImageIfNecessary(draw_image, found->second.get()); 598 DecodeImageIfNecessary(draw_image, image_data);
527 } 599 }
528 600
529 void GpuImageDecodeController::UploadImage(const DrawImage& draw_image) { 601 void GpuImageDecodeController::UploadImage(const DrawImage& draw_image) {
530 ContextProvider::ScopedContextLock context_lock(context_); 602 ContextProvider::ScopedContextLock context_lock(context_);
531 base::AutoLock lock(lock_); 603 base::AutoLock lock(lock_);
532 auto found = image_data_.Peek(draw_image.image()->uniqueID()); 604 ImageData* image_data = GetImageDataForDrawImage(draw_image);
533 DCHECK(found != image_data_.end()); 605 DCHECK(image_data);
534 DCHECK(!found->second->is_at_raster); 606 DCHECK(!image_data->is_at_raster);
535 UploadImageIfNecessary(draw_image, found->second.get()); 607 UploadImageIfNecessary(draw_image, image_data);
536 } 608 }
537 609
538 void GpuImageDecodeController::OnImageDecodeTaskCompleted( 610 void GpuImageDecodeController::OnImageDecodeTaskCompleted(
539 const DrawImage& draw_image) { 611 const DrawImage& draw_image) {
540 base::AutoLock lock(lock_); 612 base::AutoLock lock(lock_);
541 // Decode task is complete, remove it from our list of pending tasks. 613 // Decode task is complete, remove our reference to it.
542 pending_image_decode_tasks_.erase(draw_image.image()->uniqueID()); 614 ImageData* image_data = GetImageDataForDrawImage(draw_image);
615 DCHECK(image_data);
616 DCHECK(image_data->decode.task);
617 image_data->decode.task = nullptr;
543 618
544 // While the decode task is active, we keep a ref on the decoded data. 619 // While the decode task is active, we keep a ref on the decoded data.
545 // Release that ref now. 620 // Release that ref now.
546 UnrefImageDecode(draw_image); 621 UnrefImageDecode(draw_image);
547 } 622 }
548 623
549 void GpuImageDecodeController::OnImageUploadTaskCompleted( 624 void GpuImageDecodeController::OnImageUploadTaskCompleted(
550 const DrawImage& draw_image) { 625 const DrawImage& draw_image) {
551 base::AutoLock lock(lock_); 626 base::AutoLock lock(lock_);
552 // Upload task is complete, remove it from our list of pending tasks. 627 // Upload task is complete, remove our reference to it.
553 pending_image_upload_tasks_.erase(draw_image.image()->uniqueID()); 628 ImageData* image_data = GetImageDataForDrawImage(draw_image);
629 DCHECK(image_data);
630 DCHECK(image_data->upload.task);
631 image_data->upload.task = nullptr;
554 632
555 // While the upload task is active, we keep a ref on both the image it will be 633 // While the upload task is active, we keep a ref on both the image it will be
556 // populating, as well as the decode it needs to populate it. Release these 634 // populating, as well as the decode it needs to populate it. Release these
557 // refs now. 635 // refs now.
558 UnrefImageDecode(draw_image); 636 UnrefImageDecode(draw_image);
559 UnrefImageInternal(draw_image); 637 UnrefImageInternal(draw_image);
560 } 638 }
561 639
562 // Checks if an existing image decode exists. If not, returns a task to produce 640 // Checks if an existing image decode exists. If not, returns a task to produce
563 // the requested decode. 641 // the requested decode.
564 scoped_refptr<TileTask> GpuImageDecodeController::GetImageDecodeTaskAndRef( 642 scoped_refptr<TileTask> GpuImageDecodeController::GetImageDecodeTaskAndRef(
565 const DrawImage& draw_image, 643 const DrawImage& draw_image,
566 const TracingInfo& tracing_info) { 644 const TracingInfo& tracing_info) {
567 lock_.AssertAcquired(); 645 lock_.AssertAcquired();
568 646
569 const uint32_t image_id = draw_image.image()->uniqueID();
570
571 // This ref is kept alive while an upload task may need this decode. We 647 // This ref is kept alive while an upload task may need this decode. We
572 // release this ref in UploadTaskCompleted. 648 // release this ref in UploadTaskCompleted.
573 RefImageDecode(draw_image); 649 RefImageDecode(draw_image);
574 650
575 auto found = image_data_.Peek(image_id); 651 ImageData* image_data = GetImageDataForDrawImage(draw_image);
576 if (found != image_data_.end() && found->second->decode.is_locked()) { 652 DCHECK(image_data);
653 if (image_data->decode.is_locked()) {
577 // We should never be creating a decode task for an at raster image. 654 // We should never be creating a decode task for an at raster image.
578 DCHECK(!found->second->is_at_raster); 655 DCHECK(!image_data->is_at_raster);
579 // We should never be creating a decode for an already-uploaded image. 656 // We should never be creating a decode for an already-uploaded image.
580 DCHECK(!found->second->upload.image()); 657 DCHECK(!image_data->upload.image());
581 return nullptr; 658 return nullptr;
582 } 659 }
583 660
584 // We didn't have an existing locked image, create a task to lock or decode. 661 // We didn't have an existing locked image, create a task to lock or decode.
585 scoped_refptr<TileTask>& existing_task = 662 scoped_refptr<TileTask>& existing_task = image_data->decode.task;
586 pending_image_decode_tasks_[image_id];
587 if (!existing_task) { 663 if (!existing_task) {
588 // Ref image decode and create a decode task. This ref will be released in 664 // Ref image decode and create a decode task. This ref will be released in
589 // DecodeTaskCompleted. 665 // DecodeTaskCompleted.
590 RefImageDecode(draw_image); 666 RefImageDecode(draw_image);
591 existing_task = make_scoped_refptr( 667 existing_task = make_scoped_refptr(
592 new ImageDecodeTaskImpl(this, draw_image, tracing_info)); 668 new ImageDecodeTaskImpl(this, draw_image, tracing_info));
593 } 669 }
594 return existing_task; 670 return existing_task;
595 } 671 }
596 672
597 void GpuImageDecodeController::RefImageDecode(const DrawImage& draw_image) { 673 void GpuImageDecodeController::RefImageDecode(const DrawImage& draw_image) {
598 lock_.AssertAcquired(); 674 lock_.AssertAcquired();
599 auto found = image_data_.Peek(draw_image.image()->uniqueID()); 675 auto found = in_use_cache_.find(GenerateInUseCacheKey(draw_image));
600 DCHECK(found != image_data_.end()); 676 DCHECK(found != in_use_cache_.end());
601 ++found->second->decode.ref_count; 677 ++found->second.ref_count;
602 RefCountChanged(found->second.get()); 678 ++found->second.image_data->decode.ref_count;
679 OwnershipChanged(found->second.image_data.get());
603 } 680 }
604 681
605 void GpuImageDecodeController::UnrefImageDecode(const DrawImage& draw_image) { 682 void GpuImageDecodeController::UnrefImageDecode(const DrawImage& draw_image) {
606 lock_.AssertAcquired(); 683 lock_.AssertAcquired();
607 auto found = image_data_.Peek(draw_image.image()->uniqueID()); 684 auto found = in_use_cache_.find(GenerateInUseCacheKey(draw_image));
608 DCHECK(found != image_data_.end()); 685 DCHECK(found != in_use_cache_.end());
609 DCHECK_GT(found->second->decode.ref_count, 0u); 686 DCHECK_GT(found->second.image_data->decode.ref_count, 0u);
610 --found->second->decode.ref_count; 687 DCHECK_GT(found->second.ref_count, 0u);
611 RefCountChanged(found->second.get()); 688 --found->second.ref_count;
689 --found->second.image_data->decode.ref_count;
690 OwnershipChanged(found->second.image_data.get());
691 if (found->second.ref_count == 0u) {
692 in_use_cache_.erase(found);
693 }
612 } 694 }
613 695
614 void GpuImageDecodeController::RefImage(const DrawImage& draw_image) { 696 void GpuImageDecodeController::RefImage(const DrawImage& draw_image) {
615 lock_.AssertAcquired(); 697 lock_.AssertAcquired();
616 auto found = image_data_.Peek(draw_image.image()->uniqueID()); 698 InUseCacheKey key = GenerateInUseCacheKey(draw_image);
617 DCHECK(found != image_data_.end()); 699 auto found = in_use_cache_.find(key);
618 ++found->second->upload.ref_count; 700
619 RefCountChanged(found->second.get()); 701 // If no secondary cache entry was found for the given |draw_image|, then
702 // the draw_image only exists in the |persistent_cache_|. Create an in-use
703 // cache entry now.
704 if (found == in_use_cache_.end()) {
705 auto found_image = persistent_cache_.Peek(draw_image.image()->uniqueID());
706 DCHECK(found_image != persistent_cache_.end());
707 DCHECK(found_image->second->upload_scale_mip_level <=
708 CalculateUploadScaleMipLevel(draw_image));
709 found =
710 in_use_cache_.emplace(key, InUseCacheEntry(found_image->second)).first;
711 }
712
713 DCHECK(found != in_use_cache_.end());
714 ++found->second.ref_count;
715 ++found->second.image_data->upload.ref_count;
716 OwnershipChanged(found->second.image_data.get());
620 } 717 }
621 718
622 void GpuImageDecodeController::UnrefImageInternal(const DrawImage& draw_image) { 719 void GpuImageDecodeController::UnrefImageInternal(const DrawImage& draw_image) {
623 lock_.AssertAcquired(); 720 lock_.AssertAcquired();
624 auto found = image_data_.Peek(draw_image.image()->uniqueID()); 721 auto found = in_use_cache_.find(GenerateInUseCacheKey(draw_image));
625 DCHECK(found != image_data_.end()); 722 DCHECK(found != in_use_cache_.end());
626 DCHECK_GT(found->second->upload.ref_count, 0u); 723 DCHECK_GT(found->second.image_data->upload.ref_count, 0u);
627 --found->second->upload.ref_count; 724 DCHECK_GT(found->second.ref_count, 0u);
628 if (found->second->upload.ref_count == 0) 725 --found->second.ref_count;
629 found->second->upload.notify_ref_reached_zero(); 726 --found->second.image_data->upload.ref_count;
630 RefCountChanged(found->second.get()); 727 OwnershipChanged(found->second.image_data.get());
728 if (found->second.ref_count == 0u) {
729 in_use_cache_.erase(found);
730 }
631 } 731 }
632 732
633 // Called any time an image or decode ref count changes. Takes care of any 733 // Called any time an image or decode ref count changes. Takes care of any
634 // necessary memory budget book-keeping and cleanup. 734 // necessary memory budget book-keeping and cleanup.
635 void GpuImageDecodeController::RefCountChanged(ImageData* image_data) { 735 void GpuImageDecodeController::OwnershipChanged(ImageData* image_data) {
636 lock_.AssertAcquired(); 736 lock_.AssertAcquired();
637 737
638 bool has_any_refs = 738 bool has_any_refs =
639 image_data->upload.ref_count > 0 || image_data->decode.ref_count > 0; 739 image_data->upload.ref_count > 0 || image_data->decode.ref_count > 0;
640 740
741 // Don't keep around orphaned images.
742 if (image_data->is_orphaned && !has_any_refs) {
743 images_pending_deletion_.push_back(std::move(image_data->upload.image()));
744 image_data->upload.SetImage(nullptr);
745 }
746
641 // Don't keep CPU images if they are unused, these images can be recreated by 747 // Don't keep CPU images if they are unused, these images can be recreated by
642 // re-locking discardable (rather than requiring a full upload like GPU 748 // re-locking discardable (rather than requiring a full upload like GPU
643 // images). 749 // images).
644 if (image_data->mode == DecodedDataMode::CPU && !has_any_refs) { 750 if (image_data->mode == DecodedDataMode::CPU && !has_any_refs) {
645 images_pending_deletion_.push_back(image_data->upload.image()); 751 images_pending_deletion_.push_back(image_data->upload.image());
646 image_data->upload.SetImage(nullptr); 752 image_data->upload.SetImage(nullptr);
647 } 753 }
648 754
649 if (image_data->is_at_raster && !has_any_refs) { 755 if (image_data->is_at_raster && !has_any_refs) {
650 // We have an at-raster image which has reached zero refs. If it won't fit 756 // We have an at-raster image which has reached zero refs. If it won't fit
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
716 lock_.AssertAcquired(); 822 lock_.AssertAcquired();
717 823
718 if (CanFitSize(required_size) && !ExceedsPreferredCount()) 824 if (CanFitSize(required_size) && !ExceedsPreferredCount())
719 return true; 825 return true;
720 826
721 // While we are over memory or preferred item capacity, we iterate through 827 // While we are over memory or preferred item capacity, we iterate through
722 // our set of cached image data in LRU order. For each image, we can do two 828 // our set of cached image data in LRU order. For each image, we can do two
723 // things: 1) We can free the uploaded image, reducing the memory usage of 829 // things: 1) We can free the uploaded image, reducing the memory usage of
724 // the cache and 2) we can remove the entry entirely, reducing the count of 830 // the cache and 2) we can remove the entry entirely, reducing the count of
725 // elements in the cache. 831 // elements in the cache.
726 for (auto it = image_data_.rbegin(); it != image_data_.rend();) { 832 for (auto it = persistent_cache_.rbegin(); it != persistent_cache_.rend();) {
727 if (it->second->decode.ref_count != 0 || 833 if (it->second->decode.ref_count != 0 ||
728 it->second->upload.ref_count != 0) { 834 it->second->upload.ref_count != 0) {
729 ++it; 835 ++it;
730 continue; 836 continue;
731 } 837 }
732 838
733 // Current entry has no refs. Ensure it is not locked. 839 // Current entry has no refs. Ensure it is not locked.
734 DCHECK(!it->second->decode.is_locked()); 840 DCHECK(!it->second->decode.is_locked());
735 841
736 // If an image without refs is budgeted, it must have an associated image 842 // If an image without refs is budgeted, it must have an associated image
737 // upload. 843 // upload.
738 DCHECK(!it->second->upload.budgeted || it->second->upload.image()); 844 DCHECK(!it->second->upload.budgeted || it->second->upload.image());
739 845
740 // Free the uploaded image if possible. 846 // Free the uploaded image if possible.
741 if (it->second->upload.image()) { 847 if (it->second->upload.image()) {
742 DCHECK(it->second->upload.budgeted); 848 DCHECK(it->second->upload.budgeted);
743 DCHECK_GE(bytes_used_, it->second->size); 849 DCHECK_GE(bytes_used_, it->second->size);
744 bytes_used_ -= it->second->size; 850 bytes_used_ -= it->second->size;
745 images_pending_deletion_.push_back(it->second->upload.image()); 851 images_pending_deletion_.push_back(it->second->upload.image());
746 it->second->upload.SetImage(nullptr); 852 it->second->upload.SetImage(nullptr);
747 it->second->upload.budgeted = false; 853 it->second->upload.budgeted = false;
748 } 854 }
749 855
750 // Free the entire entry if necessary. 856 // Free the entire entry if necessary.
751 if (ExceedsPreferredCount()) { 857 if (ExceedsPreferredCount()) {
752 it = image_data_.Erase(it); 858 it = persistent_cache_.Erase(it);
753 } else { 859 } else {
754 ++it; 860 ++it;
755 } 861 }
756 862
757 if (CanFitSize(required_size) && !ExceedsPreferredCount()) 863 if (CanFitSize(required_size) && !ExceedsPreferredCount())
758 return true; 864 return true;
759 } 865 }
760 866
761 // Preferred count is only used as a guideline when triming the cache. Allow 867 // Preferred count is only used as a guideline when triming the cache. Allow
762 // new elements to be added as long as we are below our size limit. 868 // new elements to be added as long as we are below our size limit.
763 return CanFitSize(required_size); 869 return CanFitSize(required_size);
764 } 870 }
765 871
766 bool GpuImageDecodeController::CanFitSize(size_t size) const { 872 bool GpuImageDecodeController::CanFitSize(size_t size) const {
767 lock_.AssertAcquired(); 873 lock_.AssertAcquired();
768 874
769 base::CheckedNumeric<uint32_t> new_size(bytes_used_); 875 base::CheckedNumeric<uint32_t> new_size(bytes_used_);
770 new_size += size; 876 new_size += size;
771 return new_size.IsValid() && new_size.ValueOrDie() <= cached_bytes_limit_; 877 return new_size.IsValid() && new_size.ValueOrDie() <= cached_bytes_limit_;
772 } 878 }
773 879
774 bool GpuImageDecodeController::ExceedsPreferredCount() const { 880 bool GpuImageDecodeController::ExceedsPreferredCount() const {
775 lock_.AssertAcquired(); 881 lock_.AssertAcquired();
776 882
777 return image_data_.size() > cached_items_limit_; 883 return persistent_cache_.size() > cached_items_limit_;
778 } 884 }
779 885
780 void GpuImageDecodeController::DecodeImageIfNecessary( 886 void GpuImageDecodeController::DecodeImageIfNecessary(
781 const DrawImage& draw_image, 887 const DrawImage& draw_image,
782 ImageData* image_data) { 888 ImageData* image_data) {
783 lock_.AssertAcquired(); 889 lock_.AssertAcquired();
784 890
785 DCHECK_GT(image_data->decode.ref_count, 0u); 891 DCHECK_GT(image_data->decode.ref_count, 0u);
786 892
787 if (image_data->decode.decode_failure) { 893 if (image_data->decode.decode_failure) {
(...skipping 16 matching lines...) Expand all
804 910
805 image_data->decode.ResetData(); 911 image_data->decode.ResetData();
806 std::unique_ptr<base::DiscardableMemory> backing_memory; 912 std::unique_ptr<base::DiscardableMemory> backing_memory;
807 { 913 {
808 base::AutoUnlock unlock(lock_); 914 base::AutoUnlock unlock(lock_);
809 switch (image_data->mode) { 915 switch (image_data->mode) {
810 case DecodedDataMode::CPU: { 916 case DecodedDataMode::CPU: {
811 backing_memory = 917 backing_memory =
812 base::DiscardableMemoryAllocator::GetInstance() 918 base::DiscardableMemoryAllocator::GetInstance()
813 ->AllocateLockedDiscardableMemory(image_data->size); 919 ->AllocateLockedDiscardableMemory(image_data->size);
814 SkImageInfo image_info = CreateImageInfoForDrawImage(draw_image); 920 SkImageInfo image_info = CreateImageInfoForDrawImage(
815 if (!draw_image.image()->readPixels(image_info, backing_memory->data(), 921 draw_image, image_data->upload_scale_mip_level);
816 image_info.minRowBytes(), 0, 0, 922 // In order to match GPU scaling quality (which uses mip-maps at high
817 SkImage::kDisallow_CachingHint)) { 923 // quality), we want to use at most medium filter quality for the
924 // scale.
925 SkFilterQuality scale_quality =
926 std::max(kMedium_SkFilterQuality, draw_image.filter_quality());
vmpstr 2016/06/22 21:33:34 Based on the comment, this should be std::min
ericrk 2016/06/23 18:16:55 definitely :D - Re-structured the code a bit to pu
927 SkPixmap image_pixmap(image_info, backing_memory->data(),
928 image_info.minRowBytes());
929 // Note that scalePixels falls back to readPixels if the sale is 1x, so
930 // no need to special case that as an optimization.
931 if (!draw_image.image()->scalePixels(image_pixmap, scale_quality,
932 SkImage::kDisallow_CachingHint)) {
818 backing_memory.reset(); 933 backing_memory.reset();
819 } 934 }
820 break; 935 break;
821 } 936 }
822 case DecodedDataMode::GPU: { 937 case DecodedDataMode::GPU: {
823 backing_memory = 938 backing_memory =
824 base::DiscardableMemoryAllocator::GetInstance() 939 base::DiscardableMemoryAllocator::GetInstance()
825 ->AllocateLockedDiscardableMemory(image_data->size); 940 ->AllocateLockedDiscardableMemory(image_data->size);
826 auto params = ParamsFromDrawImage(draw_image); 941 auto params =
942 ParamsFromDrawImage(draw_image, image_data->upload_scale_mip_level);
827 if (!draw_image.image()->getDeferredTextureImageData( 943 if (!draw_image.image()->getDeferredTextureImageData(
828 *context_threadsafe_proxy_.get(), &params, 1, 944 *context_threadsafe_proxy_.get(), &params, 1,
829 backing_memory->data())) { 945 backing_memory->data())) {
830 backing_memory.reset(); 946 backing_memory.reset();
831 } 947 }
832 break; 948 break;
833 } 949 }
834 } 950 }
835 } 951 }
836 952
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
872 // We are about to upload a new image and are holding the context lock. 988 // We are about to upload a new image and are holding the context lock.
873 // Ensure that any images which have been marked for deletion are actually 989 // Ensure that any images which have been marked for deletion are actually
874 // cleaned up so we don't exceed our memory limit during this upload. 990 // cleaned up so we don't exceed our memory limit during this upload.
875 DeletePendingImages(); 991 DeletePendingImages();
876 992
877 sk_sp<SkImage> uploaded_image; 993 sk_sp<SkImage> uploaded_image;
878 { 994 {
879 base::AutoUnlock unlock(lock_); 995 base::AutoUnlock unlock(lock_);
880 switch (image_data->mode) { 996 switch (image_data->mode) {
881 case DecodedDataMode::CPU: { 997 case DecodedDataMode::CPU: {
882 SkImageInfo image_info = CreateImageInfoForDrawImage(draw_image); 998 SkImageInfo image_info = CreateImageInfoForDrawImage(
999 draw_image, image_data->upload_scale_mip_level);
883 SkPixmap pixmap(image_info, image_data->decode.data()->data(), 1000 SkPixmap pixmap(image_info, image_data->decode.data()->data(),
884 image_info.minRowBytes()); 1001 image_info.minRowBytes());
885 uploaded_image = 1002 uploaded_image =
886 SkImage::MakeFromRaster(pixmap, [](const void*, void*) {}, nullptr); 1003 SkImage::MakeFromRaster(pixmap, [](const void*, void*) {}, nullptr);
887 break; 1004 break;
888 } 1005 }
889 case DecodedDataMode::GPU: { 1006 case DecodedDataMode::GPU: {
890 uploaded_image = SkImage::MakeFromDeferredTextureImageData( 1007 uploaded_image = SkImage::MakeFromDeferredTextureImageData(
891 context_->GrContext(), image_data->decode.data()->data(), 1008 context_->GrContext(), image_data->decode.data()->data(),
892 SkBudgeted::kNo); 1009 SkBudgeted::kNo);
893 break; 1010 break;
894 } 1011 }
895 } 1012 }
896 } 1013 }
897 image_data->decode.mark_used(); 1014 image_data->decode.mark_used();
898 DCHECK(uploaded_image); 1015 DCHECK(uploaded_image);
899 1016
900 // At-raster may have decoded this while we were unlocked. If so, ignore our 1017 // At-raster may have decoded this while we were unlocked. If so, ignore our
901 // result. 1018 // result.
902 if (!image_data->upload.image()) 1019 if (!image_data->upload.image())
903 image_data->upload.SetImage(std::move(uploaded_image)); 1020 image_data->upload.SetImage(std::move(uploaded_image));
904 } 1021 }
905 1022
906 std::unique_ptr<GpuImageDecodeController::ImageData> 1023 scoped_refptr<GpuImageDecodeController::ImageData>
907 GpuImageDecodeController::CreateImageData(const DrawImage& draw_image) { 1024 GpuImageDecodeController::CreateImageData(const DrawImage& draw_image) {
908 lock_.AssertAcquired(); 1025 lock_.AssertAcquired();
909 1026
910 DecodedDataMode mode; 1027 DecodedDataMode mode;
911 SkImageInfo info = CreateImageInfoForDrawImage(draw_image); 1028 int upload_scale_mip_level = CalculateUploadScaleMipLevel(draw_image);
912 SkImage::DeferredTextureImageUsageParams params = 1029 SkImage::DeferredTextureImageUsageParams params =
913 ParamsFromDrawImage(draw_image); 1030 ParamsFromDrawImage(draw_image, upload_scale_mip_level);
914 size_t data_size = draw_image.image()->getDeferredTextureImageData( 1031 size_t data_size = draw_image.image()->getDeferredTextureImageData(
915 *context_threadsafe_proxy_.get(), &params, 1, nullptr); 1032 *context_threadsafe_proxy_.get(), &params, 1, nullptr);
916 1033
917 if (data_size == 0) { 1034 if (data_size == 0) {
918 // Can't upload image, too large or other failure. Try to use SW fallback. 1035 // Can't upload image, too large or other failure. Try to use SW fallback.
919 data_size = info.getSafeSize(info.minRowBytes()); 1036 SkImageInfo image_info =
1037 CreateImageInfoForDrawImage(draw_image, upload_scale_mip_level);
1038 data_size = image_info.getSafeSize(image_info.minRowBytes());
920 mode = DecodedDataMode::CPU; 1039 mode = DecodedDataMode::CPU;
921 } else { 1040 } else {
922 mode = DecodedDataMode::GPU; 1041 mode = DecodedDataMode::GPU;
923 } 1042 }
924 1043
925 return base::WrapUnique(new ImageData(mode, data_size)); 1044 return make_scoped_refptr(new ImageData(
1045 mode, data_size, upload_scale_mip_level, draw_image.filter_quality()));
926 } 1046 }
927 1047
928 void GpuImageDecodeController::DeletePendingImages() { 1048 void GpuImageDecodeController::DeletePendingImages() {
929 context_->GetLock()->AssertAcquired(); 1049 context_->GetLock()->AssertAcquired();
930 lock_.AssertAcquired(); 1050 lock_.AssertAcquired();
931 images_pending_deletion_.clear(); 1051 images_pending_deletion_.clear();
932 } 1052 }
933 1053
934 SkImageInfo GpuImageDecodeController::CreateImageInfoForDrawImage( 1054 SkImageInfo GpuImageDecodeController::CreateImageInfoForDrawImage(
1055 const DrawImage& draw_image,
1056 int upload_scale_mip_level) const {
1057 DrawImage scaled_draw_image = draw_image.ApplyScale(
1058 CalculateScaleFactorForMipLevel(draw_image, upload_scale_mip_level));
1059 return SkImageInfo::Make(
1060 scaled_draw_image.image()->width() * scaled_draw_image.scale().width(),
1061 scaled_draw_image.image()->height() * scaled_draw_image.scale().height(),
1062 ResourceFormatToClosestSkColorType(format_), kPremul_SkAlphaType);
1063 }
1064
1065 // Tries to find an ImageData that can be used to draw the provided
1066 // |draw_image|. First looks for an exact entry in our |in_use_cache_|. If one
1067 // cannot be found, it looks for a compatible entry in our |persistent_cache_|.
1068 GpuImageDecodeController::ImageData*
1069 GpuImageDecodeController::GetImageDataForDrawImage(
1070 const DrawImage& draw_image) {
1071 lock_.AssertAcquired();
1072 auto found_in_use = in_use_cache_.find(GenerateInUseCacheKey(draw_image));
1073 if (found_in_use != in_use_cache_.end())
1074 return found_in_use->second.image_data.get();
1075
1076 auto found_persistent = persistent_cache_.Get(draw_image.image()->uniqueID());
1077 if (found_persistent != persistent_cache_.end()) {
1078 ImageData* image_data = found_persistent->second.get();
1079 if (IsCompatibleWithDrawImage(image_data, draw_image)) {
1080 return image_data;
1081 } else {
1082 found_persistent->second->is_orphaned = true;
1083 // Call OwnershipChanged before erasing the orphaned task from the
1084 // persistent cache. This ensures that if the orphaned task has 0
1085 // references, it is cleaned up safely before it is deleted.
1086 OwnershipChanged(image_data);
1087 persistent_cache_.Erase(found_persistent);
1088 }
1089 }
1090
1091 return nullptr;
1092 }
1093
1094 // Determines if we can draw the provided |draw_image| using the provided
1095 // |image_data|. This is true if the |image_data| is not scaled, or if it
1096 // is scaled at an equal or larger scale and equal or larger quality to
1097 // the provided |draw_image|.
1098 bool GpuImageDecodeController::IsCompatibleWithDrawImage(
1099 const ImageData* image_data,
935 const DrawImage& draw_image) const { 1100 const DrawImage& draw_image) const {
936 return SkImageInfo::Make( 1101 bool is_scaled = image_data->upload_scale_mip_level != 0;
937 draw_image.image()->width(), draw_image.image()->height(), 1102 bool scale_is_compatible = CalculateUploadScaleMipLevel(draw_image) >=
938 ResourceFormatToClosestSkColorType(format_), kPremul_SkAlphaType); 1103 image_data->upload_scale_mip_level;
1104 bool quality_is_compatible =
1105 draw_image.filter_quality() <= image_data->upload_scale_filter_quality;
1106 return !is_scaled || (scale_is_compatible && quality_is_compatible);
1107 }
1108
1109 size_t GpuImageDecodeController::GetDrawImageSizeForTesting(
1110 const DrawImage& image) {
1111 base::AutoLock lock(lock_);
1112 scoped_refptr<ImageData> data = CreateImageData(image);
1113 return data->size;
939 } 1114 }
940 1115
941 void GpuImageDecodeController::SetImageDecodingFailedForTesting( 1116 void GpuImageDecodeController::SetImageDecodingFailedForTesting(
942 const DrawImage& image) { 1117 const DrawImage& image) {
943 base::AutoLock lock(lock_); 1118 base::AutoLock lock(lock_);
944 auto found = image_data_.Peek(image.image()->uniqueID()); 1119 auto found = persistent_cache_.Peek(image.image()->uniqueID());
945 DCHECK(found != image_data_.end()); 1120 DCHECK(found != persistent_cache_.end());
946 ImageData* image_data = found->second.get(); 1121 ImageData* image_data = found->second.get();
947 image_data->decode.decode_failure = true; 1122 image_data->decode.decode_failure = true;
948 } 1123 }
949 1124
950 bool GpuImageDecodeController::DiscardableIsLockedForTesting( 1125 bool GpuImageDecodeController::DiscardableIsLockedForTesting(
951 const DrawImage& image) { 1126 const DrawImage& image) {
952 base::AutoLock lock(lock_); 1127 base::AutoLock lock(lock_);
953 auto found = image_data_.Peek(image.image()->uniqueID()); 1128 auto found = persistent_cache_.Peek(image.image()->uniqueID());
954 DCHECK(found != image_data_.end()); 1129 DCHECK(found != persistent_cache_.end());
955 ImageData* image_data = found->second.get(); 1130 ImageData* image_data = found->second.get();
956 return image_data->decode.is_locked(); 1131 return image_data->decode.is_locked();
957 } 1132 }
958 1133
959 } // namespace cc 1134 } // namespace cc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698