Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(789)

Side by Side Diff: cc/tiles/gpu_image_decode_controller.cc

Issue 2042133002: Add display-resolution caching to GPU IDC (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@drt
Patch Set: Cleanup/feedback Created 4 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2016 The Chromium Authors. All rights reserved. 1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "cc/tiles/gpu_image_decode_controller.h" 5 #include "cc/tiles/gpu_image_decode_controller.h"
6 6
7 #include "base/memory/discardable_memory_allocator.h" 7 #include "base/memory/discardable_memory_allocator.h"
8 #include "base/memory/ptr_util.h" 8 #include "base/memory/ptr_util.h"
9 #include "base/metrics/histogram_macros.h" 9 #include "base/metrics/histogram_macros.h"
10 #include "base/numerics/safe_math.h" 10 #include "base/numerics/safe_math.h"
11 #include "base/strings/stringprintf.h" 11 #include "base/strings/stringprintf.h"
12 #include "base/threading/thread_task_runner_handle.h" 12 #include "base/threading/thread_task_runner_handle.h"
13 #include "cc/debug/devtools_instrumentation.h" 13 #include "cc/debug/devtools_instrumentation.h"
14 #include "cc/output/context_provider.h" 14 #include "cc/output/context_provider.h"
15 #include "cc/raster/tile_task.h" 15 #include "cc/raster/tile_task.h"
16 #include "cc/resources/resource_format_utils.h" 16 #include "cc/resources/resource_format_utils.h"
17 #include "cc/tiles/mipmap_util.h"
17 #include "gpu/command_buffer/client/context_support.h" 18 #include "gpu/command_buffer/client/context_support.h"
18 #include "gpu/command_buffer/client/gles2_interface.h" 19 #include "gpu/command_buffer/client/gles2_interface.h"
19 #include "gpu_image_decode_controller.h" 20 #include "gpu_image_decode_controller.h"
20 #include "skia/ext/texture_handle.h" 21 #include "skia/ext/texture_handle.h"
21 #include "third_party/skia/include/core/SkCanvas.h" 22 #include "third_party/skia/include/core/SkCanvas.h"
22 #include "third_party/skia/include/core/SkRefCnt.h" 23 #include "third_party/skia/include/core/SkRefCnt.h"
23 #include "third_party/skia/include/core/SkSurface.h" 24 #include "third_party/skia/include/core/SkSurface.h"
24 #include "third_party/skia/include/gpu/GrContext.h" 25 #include "third_party/skia/include/gpu/GrContext.h"
25 #include "third_party/skia/include/gpu/GrTexture.h" 26 #include "third_party/skia/include/gpu/GrTexture.h"
26 #include "ui/gfx/skia_util.h" 27 #include "ui/gfx/skia_util.h"
(...skipping 12 matching lines...) Expand all
39 if (std::abs(draw_image.scale().width()) < 40 if (std::abs(draw_image.scale().width()) <
40 std::numeric_limits<float>::epsilon() || 41 std::numeric_limits<float>::epsilon() ||
41 std::abs(draw_image.scale().height()) < 42 std::abs(draw_image.scale().height()) <
42 std::numeric_limits<float>::epsilon()) { 43 std::numeric_limits<float>::epsilon()) {
43 return true; 44 return true;
44 } 45 }
45 return false; 46 return false;
46 } 47 }
47 48
48 SkImage::DeferredTextureImageUsageParams ParamsFromDrawImage( 49 SkImage::DeferredTextureImageUsageParams ParamsFromDrawImage(
49 const DrawImage& draw_image) { 50 const DrawImage& draw_image,
51 int pre_scale_mip_level) {
50 SkImage::DeferredTextureImageUsageParams params; 52 SkImage::DeferredTextureImageUsageParams params;
51 params.fMatrix = draw_image.matrix(); 53 params.fMatrix = draw_image.matrix();
52 params.fQuality = draw_image.filter_quality(); 54 params.fQuality = draw_image.filter_quality();
55 params.fPreScaleMipLevel = pre_scale_mip_level;
53 56
54 return params; 57 return params;
55 } 58 }
56 59
60 // Calculate the mp level to pre-scale the image to before uploading. We use mip
vmpstr 2016/06/21 20:07:08 s/mp/mip/
ericrk 2016/06/22 18:56:38 Done.
61 // levels rather than exact scales to increase re-use of scaled images.
62 int CalculatePreScaleMipLevel(const DrawImage& draw_image) {
vmpstr 2016/06/21 20:07:08 Can we rename "Pre" to "Upload"
ericrk 2016/06/22 18:56:37 Done.
63 if (draw_image.src_rect() != draw_image.image()->bounds()) {
64 // Images which are being clipped will have color-bleeding if scaled.
vmpstr 2016/06/21 20:07:07 Move the comment before the if, please
ericrk 2016/06/22 18:56:38 Done.
65 // TODO(ericrk): Investigate uploading clipped images to handle this case
66 // and provide further optimization. crbug.com/620899
67 return 0;
68 }
69
70 gfx::Size base_size(draw_image.image()->width(),
71 draw_image.image()->height());
72 // Ceil our scaled size so that the mip map generated is guaranteed to be
73 // larger.
74 gfx::Size scaled_size = gfx::ScaleToCeiledSize(
75 base_size, draw_image.scale().width(), draw_image.scale().height());
76
77 return MipMapUtil::GetLevelForSize(base_size, scaled_size);
78 }
79
80 SkSize CalculatePreScaleFactor(const DrawImage& draw_image, int mip_level) {
vmpstr 2016/06/21 20:07:07 Put a comment before the function please
ericrk 2016/06/22 18:56:38 Done.
81 gfx::Size base_size(draw_image.image()->width(),
82 draw_image.image()->height());
83 return MipMapUtil::GetScaleAdjustmentForLevel(base_size, mip_level);
84 }
85
86 // Generates a uint64_t which uniquely identifies a DrawImage for the purposes
87 // of the |in_use_cache_|. The key is generated as follows:
88 // ╔══════════════════════╤═══════════╤═══════════╗
89 // ║ image_id │ mip_level │ quality ║
90 // ╚════════32═bits═══════╧══16═bits══╧══16═bits══╝
91 uint64_t GenerateInUseCacheKey(const DrawImage& draw_image) {
92 static_assert(
93 kLast_SkFilterQuality <= std::numeric_limits<uint16_t>::max(),
94 "InUseCacheKey depends on SkFilterQuality fitting in a uint16_t.");
95
96 SkFilterQuality filter_quality = draw_image.filter_quality();
97 DCHECK_LE(filter_quality, kLast_SkFilterQuality);
98
99 // An image has at most log_2(max(width, height)) mip levels, so given our
100 // usage of 32-bit sizes for images, key.mip_level is at most 31.
101 int32_t mip_level = CalculatePreScaleMipLevel(draw_image);
102 DCHECK_LT(mip_level, 32);
103
104 return (static_cast<uint64_t>(draw_image.image()->uniqueID()) << 32) |
105 (mip_level << 16) | draw_image.filter_quality();
106 }
107
57 } // namespace 108 } // namespace
58 109
110 GpuImageDecodeController::InUseCacheEntry::InUseCacheEntry(
111 scoped_refptr<ImageData> image_data)
112 : image_data(std::move(image_data)) {}
113 GpuImageDecodeController::InUseCacheEntry::InUseCacheEntry(
114 const InUseCacheEntry&) = default;
115 GpuImageDecodeController::InUseCacheEntry::InUseCacheEntry(InUseCacheEntry&&) =
116 default;
117 GpuImageDecodeController::InUseCacheEntry::~InUseCacheEntry() = default;
118
59 // Task which decodes an image and stores the result in discardable memory. 119 // Task which decodes an image and stores the result in discardable memory.
60 // This task does not use GPU resources and can be run on any thread. 120 // This task does not use GPU resources and can be run on any thread.
61 class ImageDecodeTaskImpl : public TileTask { 121 class ImageDecodeTaskImpl : public TileTask {
62 public: 122 public:
63 ImageDecodeTaskImpl(GpuImageDecodeController* controller, 123 ImageDecodeTaskImpl(GpuImageDecodeController* controller,
64 const DrawImage& draw_image, 124 const DrawImage& draw_image,
65 const ImageDecodeController::TracingInfo& tracing_info) 125 const ImageDecodeController::TracingInfo& tracing_info)
66 : TileTask(true), 126 : TileTask(true),
67 controller_(controller), 127 controller_(controller),
68 image_(draw_image), 128 image_(draw_image),
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after
168 228
169 void GpuImageDecodeController::DecodedImageData::ResetData() { 229 void GpuImageDecodeController::DecodedImageData::ResetData() {
170 DCHECK(!is_locked_); 230 DCHECK(!is_locked_);
171 if (data_) 231 if (data_)
172 ReportUsageStats(); 232 ReportUsageStats();
173 data_ = nullptr; 233 data_ = nullptr;
174 usage_stats_ = UsageStats(); 234 usage_stats_ = UsageStats();
175 } 235 }
176 236
177 void GpuImageDecodeController::DecodedImageData::ReportUsageStats() const { 237 void GpuImageDecodeController::DecodedImageData::ReportUsageStats() const {
178 // lock_count | used | result state 238 // lock_count used result state
179 // ===========+=======+================== 239 // ═══════════╪═══════╪══════════════════
vmpstr 2016/06/21 20:07:08 nice!
ericrk 2016/06/22 18:56:38 Done.
180 // 1 | false | WASTED_ONCE 240 // 1 false WASTED_ONCE
181 // 1 | true | USED_ONCE 241 // 1 true USED_ONCE
182 // >1 | false | WASTED_RELOCKED 242 // >1 false WASTED_RELOCKED
183 // >1 | true | USED_RELOCKED 243 // >1 true USED_RELOCKED
184 // Note that it's important not to reorder the following enums, since the 244 // Note that it's important not to reorder the following enums, since the
185 // numerical values are used in the histogram code. 245 // numerical values are used in the histogram code.
186 enum State : int { 246 enum State : int {
187 DECODED_IMAGE_STATE_WASTED_ONCE, 247 DECODED_IMAGE_STATE_WASTED_ONCE,
188 DECODED_IMAGE_STATE_USED_ONCE, 248 DECODED_IMAGE_STATE_USED_ONCE,
189 DECODED_IMAGE_STATE_WASTED_RELOCKED, 249 DECODED_IMAGE_STATE_WASTED_RELOCKED,
190 DECODED_IMAGE_STATE_USED_RELOCKED, 250 DECODED_IMAGE_STATE_USED_RELOCKED,
191 DECODED_IMAGE_STATE_COUNT 251 DECODED_IMAGE_STATE_COUNT
192 } state = DECODED_IMAGE_STATE_WASTED_ONCE; 252 } state = DECODED_IMAGE_STATE_WASTED_ONCE;
193 253
(...skipping 30 matching lines...) Expand all
224 image_ = std::move(image); 284 image_ = std::move(image);
225 } 285 }
226 286
227 void GpuImageDecodeController::UploadedImageData::ReportUsageStats() const { 287 void GpuImageDecodeController::UploadedImageData::ReportUsageStats() const {
228 UMA_HISTOGRAM_BOOLEAN("Renderer4.GpuImageUploadState.Used", 288 UMA_HISTOGRAM_BOOLEAN("Renderer4.GpuImageUploadState.Used",
229 usage_stats_.used); 289 usage_stats_.used);
230 UMA_HISTOGRAM_BOOLEAN("Renderer4.GpuImageUploadState.FirstRefWasted", 290 UMA_HISTOGRAM_BOOLEAN("Renderer4.GpuImageUploadState.FirstRefWasted",
231 usage_stats_.first_ref_wasted); 291 usage_stats_.first_ref_wasted);
232 } 292 }
233 293
234 GpuImageDecodeController::ImageData::ImageData(DecodedDataMode mode, 294 GpuImageDecodeController::ImageData::ImageData(
235 size_t size) 295 DecodedDataMode mode,
236 : mode(mode), size(size) {} 296 size_t size,
297 int pre_scale_mip_level,
298 SkFilterQuality pre_scale_filter_quality)
299 : mode(mode),
300 size(size),
301 pre_scale_mip_level(pre_scale_mip_level),
302 pre_scale_filter_quality(pre_scale_filter_quality) {}
237 303
238 GpuImageDecodeController::ImageData::~ImageData() = default; 304 GpuImageDecodeController::ImageData::~ImageData() {
305 // We should never delete ImageData while it is in use or before it has been
306 // cleaned up.
307 DCHECK_EQ(0u, upload.ref_count);
308 DCHECK_EQ(0u, decode.ref_count);
309 DCHECK_EQ(false, decode.is_locked());
310 // This should always be cleaned up before deleting the image, as it needs to
311 // be freed with the GL context lock held.
312 DCHECK(!upload.image());
313 }
239 314
240 GpuImageDecodeController::GpuImageDecodeController(ContextProvider* context, 315 GpuImageDecodeController::GpuImageDecodeController(ContextProvider* context,
241 ResourceFormat decode_format, 316 ResourceFormat decode_format,
242 size_t max_gpu_image_bytes) 317 size_t max_gpu_image_bytes)
243 : format_(decode_format), 318 : format_(decode_format),
244 context_(context), 319 context_(context),
245 image_data_(ImageDataMRUCache::NO_AUTO_EVICT), 320 persistent_cache_(ImageDataMRUCache::NO_AUTO_EVICT),
246 cached_items_limit_(kMaxDiscardableItems), 321 cached_items_limit_(kMaxDiscardableItems),
247 cached_bytes_limit_(max_gpu_image_bytes), 322 cached_bytes_limit_(max_gpu_image_bytes),
248 bytes_used_(0), 323 bytes_used_(0),
249 max_gpu_image_bytes_(max_gpu_image_bytes) { 324 max_gpu_image_bytes_(max_gpu_image_bytes) {
250 // Acquire the context_lock so that we can safely retrieve the 325 // Acquire the context_lock so that we can safely retrieve the
251 // GrContextThreadSafeProxy. This proxy can then be used with no lock held. 326 // GrContextThreadSafeProxy. This proxy can then be used with no lock held.
252 { 327 {
253 ContextProvider::ScopedContextLock context_lock(context_); 328 ContextProvider::ScopedContextLock context_lock(context_);
254 context_threadsafe_proxy_ = sk_sp<GrContextThreadSafeProxy>( 329 context_threadsafe_proxy_ = sk_sp<GrContextThreadSafeProxy>(
255 context->GrContext()->threadSafeProxy()); 330 context->GrContext()->threadSafeProxy());
(...skipping 22 matching lines...) Expand all
278 const DrawImage& draw_image, 353 const DrawImage& draw_image,
279 const TracingInfo& tracing_info, 354 const TracingInfo& tracing_info,
280 scoped_refptr<TileTask>* task) { 355 scoped_refptr<TileTask>* task) {
281 if (SkipImage(draw_image)) { 356 if (SkipImage(draw_image)) {
282 *task = nullptr; 357 *task = nullptr;
283 return false; 358 return false;
284 } 359 }
285 360
286 base::AutoLock lock(lock_); 361 base::AutoLock lock(lock_);
287 const auto image_id = draw_image.image()->uniqueID(); 362 const auto image_id = draw_image.image()->uniqueID();
288 363 ImageData* image_data = GetImageDataForDrawImage(draw_image);
289 auto found = image_data_.Get(image_id); 364 if (image_data) {
290 if (found != image_data_.end()) {
291 ImageData* image_data = found->second.get();
292 if (image_data->is_at_raster) { 365 if (image_data->is_at_raster) {
293 // Image is at-raster, just return, this usage will be at-raster as well. 366 // Image is at-raster, just return, this usage will be at-raster as well.
294 *task = nullptr; 367 *task = nullptr;
295 return false; 368 return false;
296 } 369 }
297 370
298 if (image_data->decode.decode_failure) { 371 if (image_data->decode.decode_failure) {
299 // We have already tried and failed to decode this image, so just return. 372 // We have already tried and failed to decode this image, so just return.
300 *task = nullptr; 373 *task = nullptr;
301 return false; 374 return false;
302 } 375 }
303 376
304 if (image_data->upload.image()) { 377 if (image_data->upload.image()) {
305 // The image is already uploaded, ref and return. 378 // The image is already uploaded, ref and return.
306 RefImage(draw_image); 379 RefImage(draw_image);
307 *task = nullptr; 380 *task = nullptr;
308 return true; 381 return true;
309 } 382 }
310 }
311 383
312 // We didn't have a pre-uploaded image, so we need an upload task. Try to find 384 // We didn't have a pre-uploaded image, so we need an upload task. Try to
313 // an existing one. 385 // find an existing one.
314 scoped_refptr<TileTask>& existing_task = 386 if (image_data->upload.task) {
315 pending_image_upload_tasks_[image_id]; 387 // We had an existing upload task, ref the image and return the task.
316 if (existing_task) { 388 RefImage(draw_image);
317 // We had an existing upload task, ref the image and return the task. 389 *task = image_data->upload.task;
318 RefImage(draw_image); 390 return true;
319 *task = existing_task; 391 } else {
320 return true; 392 }
321 } 393 }
322 394
323 // We will be creating a new upload task. If necessary, create a placeholder 395 // We will be creating a new upload task. If necessary, create a placeholder
324 // ImageData to hold the result. 396 // ImageData to hold the result.
325 std::unique_ptr<ImageData> new_data; 397 scoped_refptr<ImageData> new_data;
326 ImageData* data; 398 if (!image_data) {
327 if (found == image_data_.end()) {
328 new_data = CreateImageData(draw_image); 399 new_data = CreateImageData(draw_image);
329 data = new_data.get(); 400 image_data = new_data.get();
330 } else {
331 data = found->second.get();
332 } 401 }
333 402
334 // Ensure that the image we're about to decode/upload will fit in memory. 403 // Ensure that the image we're about to decode/upload will fit in memory.
335 if (!EnsureCapacity(data->size)) { 404 if (!EnsureCapacity(image_data->size)) {
336 // Image will not fit, do an at-raster decode. 405 // Image will not fit, do an at-raster decode.
337 *task = nullptr; 406 *task = nullptr;
338 return false; 407 return false;
339 } 408 }
340 409
341 // If we had to create new image data, add it to our map now that we know it 410 // If we had to create new image data, add it to our map now that we know it
342 // will fit. 411 // will fit.
343 if (new_data) 412 if (new_data)
344 found = image_data_.Put(image_id, std::move(new_data)); 413 persistent_cache_.Put(image_id, std::move(new_data));
345 414
346 // Ref image and create a upload and decode tasks. We will release this ref 415 // Ref image and create a upload and decode tasks. We will release this ref
347 // in UploadTaskCompleted. 416 // in UploadTaskCompleted.
348 RefImage(draw_image); 417 RefImage(draw_image);
349 existing_task = make_scoped_refptr(new ImageUploadTaskImpl( 418 *task = make_scoped_refptr(new ImageUploadTaskImpl(
350 this, draw_image, GetImageDecodeTaskAndRef(draw_image, tracing_info), 419 this, draw_image, GetImageDecodeTaskAndRef(draw_image, tracing_info),
351 tracing_info)); 420 tracing_info));
421 image_data->upload.task = *task;
352 422
353 // Ref the image again - this ref is owned by the caller, and it is their 423 // Ref the image again - this ref is owned by the caller, and it is their
354 // responsibility to release it by calling UnrefImage. 424 // responsibility to release it by calling UnrefImage.
355 RefImage(draw_image); 425 RefImage(draw_image);
356 *task = existing_task;
357 return true; 426 return true;
358 } 427 }
359 428
360 void GpuImageDecodeController::UnrefImage(const DrawImage& draw_image) { 429 void GpuImageDecodeController::UnrefImage(const DrawImage& draw_image) {
361 base::AutoLock lock(lock_); 430 base::AutoLock lock(lock_);
362 UnrefImageInternal(draw_image); 431 UnrefImageInternal(draw_image);
363 } 432 }
364 433
365 DecodedDrawImage GpuImageDecodeController::GetDecodedImageForDraw( 434 DecodedDrawImage GpuImageDecodeController::GetDecodedImageForDraw(
366 const DrawImage& draw_image) { 435 const DrawImage& draw_image) {
367 // We are being called during raster. The context lock must already be 436 // We are being called during raster. The context lock must already be
368 // acquired by the caller. 437 // acquired by the caller.
369 context_->GetLock()->AssertAcquired(); 438 context_->GetLock()->AssertAcquired();
370 439
371 if (SkipImage(draw_image)) 440 if (SkipImage(draw_image))
372 return DecodedDrawImage(nullptr, draw_image.filter_quality()); 441 return DecodedDrawImage(nullptr, draw_image.filter_quality());
373 442
374 TRACE_EVENT0("cc", "GpuImageDecodeController::GetDecodedImageForDraw"); 443 TRACE_EVENT0("cc", "GpuImageDecodeController::GetDecodedImageForDraw");
375 444
376 base::AutoLock lock(lock_); 445 base::AutoLock lock(lock_);
377 const uint32_t unique_id = draw_image.image()->uniqueID(); 446 ImageData* image_data = GetImageDataForDrawImage(draw_image);
378 auto found = image_data_.Peek(unique_id); 447 if (!image_data) {
379 if (found == image_data_.end()) {
380 // We didn't find the image, create a new entry. 448 // We didn't find the image, create a new entry.
381 auto data = CreateImageData(draw_image); 449 auto data = CreateImageData(draw_image);
382 found = image_data_.Put(unique_id, std::move(data)); 450 image_data = data.get();
451 persistent_cache_.Put(draw_image.image()->uniqueID(), std::move(data));
383 } 452 }
384 453
385 ImageData* image_data = found->second.get();
386
387 if (!image_data->upload.budgeted) { 454 if (!image_data->upload.budgeted) {
388 // If image data is not budgeted by this point, it is at-raster. 455 // If image data is not budgeted by this point, it is at-raster.
389 image_data->is_at_raster = true; 456 image_data->is_at_raster = true;
390 } 457 }
391 458
392 // Ref the image and decode so that they stay alive while we are 459 // Ref the image and decode so that they stay alive while we are
393 // decoding/uploading. 460 // decoding/uploading.
394 RefImage(draw_image); 461 RefImage(draw_image);
395 RefImageDecode(draw_image); 462 RefImageDecode(draw_image);
396 463
397 // We may or may not need to decode and upload the image we've found, the 464 // We may or may not need to decode and upload the image we've found, the
398 // following functions early-out to if we already decoded. 465 // following functions early-out to if we already decoded.
399 DecodeImageIfNecessary(draw_image, image_data); 466 DecodeImageIfNecessary(draw_image, image_data);
400 UploadImageIfNecessary(draw_image, image_data); 467 UploadImageIfNecessary(draw_image, image_data);
401 // Unref the image decode, but not the image. The image ref will be released 468 // Unref the image decode, but not the image. The image ref will be released
402 // in DrawWithImageFinished. 469 // in DrawWithImageFinished.
403 UnrefImageDecode(draw_image); 470 UnrefImageDecode(draw_image);
404 471
405 sk_sp<SkImage> image = image_data->upload.image(); 472 sk_sp<SkImage> image = image_data->upload.image();
406 image_data->upload.mark_used(); 473 image_data->upload.mark_used();
407 DCHECK(image || image_data->decode.decode_failure); 474 DCHECK(image || image_data->decode.decode_failure);
408 475
409 DecodedDrawImage decoded_draw_image(std::move(image), 476 SkSize scale_factor =
477 CalculatePreScaleFactor(draw_image, image_data->pre_scale_mip_level);
478 DecodedDrawImage decoded_draw_image(std::move(image), SkSize(), scale_factor,
410 draw_image.filter_quality()); 479 draw_image.filter_quality());
411 decoded_draw_image.set_at_raster_decode(image_data->is_at_raster); 480 decoded_draw_image.set_at_raster_decode(image_data->is_at_raster);
412 return decoded_draw_image; 481 return decoded_draw_image;
413 } 482 }
414 483
415 void GpuImageDecodeController::DrawWithImageFinished( 484 void GpuImageDecodeController::DrawWithImageFinished(
416 const DrawImage& draw_image, 485 const DrawImage& draw_image,
417 const DecodedDrawImage& decoded_draw_image) { 486 const DecodedDrawImage& decoded_draw_image) {
418 // We are being called during raster. The context lock must already be 487 // We are being called during raster. The context lock must already be
419 // acquired by the caller. 488 // acquired by the caller.
420 context_->GetLock()->AssertAcquired(); 489 context_->GetLock()->AssertAcquired();
421 490
422 if (SkipImage(draw_image)) 491 if (SkipImage(draw_image))
423 return; 492 return;
424 493
425 base::AutoLock lock(lock_); 494 base::AutoLock lock(lock_);
495 TRACE_EVENT0("cc", "GpuImageDecodeController::DrawWithImageFinished");
426 UnrefImageInternal(draw_image); 496 UnrefImageInternal(draw_image);
427 497
428 // We are mid-draw and holding the context lock, ensure we clean up any 498 // We are mid-draw and holding the context lock, ensure we clean up any
429 // textures (especially at-raster), which may have just been marked for 499 // textures (especially at-raster), which may have just been marked for
430 // deletion by UnrefImage. 500 // deletion by UnrefImage.
431 DeletePendingImages(); 501 DeletePendingImages();
432 } 502 }
433 503
434 void GpuImageDecodeController::ReduceCacheUsage() { 504 void GpuImageDecodeController::ReduceCacheUsage() {
435 base::AutoLock lock(lock_); 505 base::AutoLock lock(lock_);
(...skipping 15 matching lines...) Expand all
451 DeletePendingImages(); 521 DeletePendingImages();
452 } else { 522 } else {
453 base::AutoLock lock(lock_); 523 base::AutoLock lock(lock_);
454 cached_bytes_limit_ = max_gpu_image_bytes_; 524 cached_bytes_limit_ = max_gpu_image_bytes_;
455 } 525 }
456 } 526 }
457 527
458 bool GpuImageDecodeController::OnMemoryDump( 528 bool GpuImageDecodeController::OnMemoryDump(
459 const base::trace_event::MemoryDumpArgs& args, 529 const base::trace_event::MemoryDumpArgs& args,
460 base::trace_event::ProcessMemoryDump* pmd) { 530 base::trace_event::ProcessMemoryDump* pmd) {
461 for (const auto& image_pair : image_data_) { 531 for (const auto& image_pair : persistent_cache_) {
462 const ImageData* image_data = image_pair.second.get(); 532 const ImageData* image_data = image_pair.second.get();
463 const uint32_t image_id = image_pair.first; 533 const uint32_t image_id = image_pair.first;
464 534
465 // If we have discardable decoded data, dump this here. 535 // If we have discardable decoded data, dump this here.
466 if (image_data->decode.data()) { 536 if (image_data->decode.data()) {
467 std::string discardable_dump_name = base::StringPrintf( 537 std::string discardable_dump_name = base::StringPrintf(
468 "cc/image_memory/controller_%p/discardable/image_%d", this, image_id); 538 "cc/image_memory/controller_%p/discardable/image_%d", this, image_id);
469 base::trace_event::MemoryAllocatorDump* dump = 539 base::trace_event::MemoryAllocatorDump* dump =
470 image_data->decode.data()->CreateMemoryAllocatorDump( 540 image_data->decode.data()->CreateMemoryAllocatorDump(
471 discardable_dump_name.c_str(), pmd); 541 discardable_dump_name.c_str(), pmd);
472
473 // If our image is locked, dump the "locked_size" as an additional column. 542 // If our image is locked, dump the "locked_size" as an additional column.
474 // This lets us see the amount of discardable which is contributing to 543 // This lets us see the amount of discardable which is contributing to
475 // memory pressure. 544 // memory pressure.
476 if (image_data->decode.is_locked()) { 545 if (image_data->decode.is_locked()) {
477 dump->AddScalar("locked_size", 546 dump->AddScalar("locked_size",
478 base::trace_event::MemoryAllocatorDump::kUnitsBytes, 547 base::trace_event::MemoryAllocatorDump::kUnitsBytes,
479 image_data->size); 548 image_data->size);
480 } 549 }
481 } 550 }
482 551
(...skipping 26 matching lines...) Expand all
509 pmd->CreateSharedGlobalAllocatorDump(guid); 578 pmd->CreateSharedGlobalAllocatorDump(guid);
510 pmd->AddOwnershipEdge(dump->guid(), guid, kImportance); 579 pmd->AddOwnershipEdge(dump->guid(), guid, kImportance);
511 } 580 }
512 } 581 }
513 582
514 return true; 583 return true;
515 } 584 }
516 585
517 void GpuImageDecodeController::DecodeImage(const DrawImage& draw_image) { 586 void GpuImageDecodeController::DecodeImage(const DrawImage& draw_image) {
518 base::AutoLock lock(lock_); 587 base::AutoLock lock(lock_);
519 auto found = image_data_.Peek(draw_image.image()->uniqueID()); 588 ImageData* image_data = GetImageDataForDrawImage(draw_image);
520 DCHECK(found != image_data_.end()); 589 DCHECK(image_data);
521 DCHECK(!found->second->is_at_raster); 590 DCHECK(!image_data->is_at_raster);
522 DecodeImageIfNecessary(draw_image, found->second.get()); 591 DecodeImageIfNecessary(draw_image, image_data);
523 } 592 }
524 593
525 void GpuImageDecodeController::UploadImage(const DrawImage& draw_image) { 594 void GpuImageDecodeController::UploadImage(const DrawImage& draw_image) {
526 ContextProvider::ScopedContextLock context_lock(context_); 595 ContextProvider::ScopedContextLock context_lock(context_);
527 base::AutoLock lock(lock_); 596 base::AutoLock lock(lock_);
528 auto found = image_data_.Peek(draw_image.image()->uniqueID()); 597 ImageData* image_data = GetImageDataForDrawImage(draw_image);
529 DCHECK(found != image_data_.end()); 598 DCHECK(image_data);
530 DCHECK(!found->second->is_at_raster); 599 DCHECK(!image_data->is_at_raster);
531 UploadImageIfNecessary(draw_image, found->second.get()); 600 UploadImageIfNecessary(draw_image, image_data);
532 } 601 }
533 602
534 void GpuImageDecodeController::OnImageDecodeTaskCompleted( 603 void GpuImageDecodeController::OnImageDecodeTaskCompleted(
535 const DrawImage& draw_image) { 604 const DrawImage& draw_image) {
536 base::AutoLock lock(lock_); 605 base::AutoLock lock(lock_);
537 // Decode task is complete, remove it from our list of pending tasks. 606 // Decode task is complete, remove our reference to it.
538 pending_image_decode_tasks_.erase(draw_image.image()->uniqueID()); 607 ImageData* image_data = GetImageDataForDrawImage(draw_image);
608 DCHECK(image_data);
609 DCHECK(image_data->decode.task);
610 image_data->decode.task = nullptr;
539 611
540 // While the decode task is active, we keep a ref on the decoded data. 612 // While the decode task is active, we keep a ref on the decoded data.
541 // Release that ref now. 613 // Release that ref now.
542 UnrefImageDecode(draw_image); 614 UnrefImageDecode(draw_image);
543 } 615 }
544 616
545 void GpuImageDecodeController::OnImageUploadTaskCompleted( 617 void GpuImageDecodeController::OnImageUploadTaskCompleted(
546 const DrawImage& draw_image) { 618 const DrawImage& draw_image) {
547 base::AutoLock lock(lock_); 619 base::AutoLock lock(lock_);
548 // Upload task is complete, remove it from our list of pending tasks. 620 // Upload task is complete, remove our referene to it.
vmpstr 2016/06/21 20:07:08 s/ne/nce/
ericrk 2016/06/22 18:56:37 Done.
549 pending_image_upload_tasks_.erase(draw_image.image()->uniqueID()); 621 ImageData* image_data = GetImageDataForDrawImage(draw_image);
622 DCHECK(image_data);
623 DCHECK(image_data->upload.task);
624 image_data->upload.task = nullptr;
550 625
551 // While the upload task is active, we keep a ref on both the image it will be 626 // While the upload task is active, we keep a ref on both the image it will be
552 // populating, as well as the decode it needs to populate it. Release these 627 // populating, as well as the decode it needs to populate it. Release these
553 // refs now. 628 // refs now.
554 UnrefImageDecode(draw_image); 629 UnrefImageDecode(draw_image);
555 UnrefImageInternal(draw_image); 630 UnrefImageInternal(draw_image);
556 } 631 }
557 632
558 // Checks if an existing image decode exists. If not, returns a task to produce 633 // Checks if an existing image decode exists. If not, returns a task to produce
559 // the requested decode. 634 // the requested decode.
560 scoped_refptr<TileTask> GpuImageDecodeController::GetImageDecodeTaskAndRef( 635 scoped_refptr<TileTask> GpuImageDecodeController::GetImageDecodeTaskAndRef(
561 const DrawImage& draw_image, 636 const DrawImage& draw_image,
562 const TracingInfo& tracing_info) { 637 const TracingInfo& tracing_info) {
563 lock_.AssertAcquired(); 638 lock_.AssertAcquired();
564 639
565 const uint32_t image_id = draw_image.image()->uniqueID();
566
567 // This ref is kept alive while an upload task may need this decode. We 640 // This ref is kept alive while an upload task may need this decode. We
568 // release this ref in UploadTaskCompleted. 641 // release this ref in UploadTaskCompleted.
569 RefImageDecode(draw_image); 642 RefImageDecode(draw_image);
570 643
571 auto found = image_data_.Peek(image_id); 644 ImageData* image_data = GetImageDataForDrawImage(draw_image);
572 if (found != image_data_.end() && found->second->decode.is_locked()) { 645 DCHECK(image_data);
646 if (image_data->decode.is_locked()) {
573 // We should never be creating a decode task for an at raster image. 647 // We should never be creating a decode task for an at raster image.
574 DCHECK(!found->second->is_at_raster); 648 DCHECK(!image_data->is_at_raster);
575 // We should never be creating a decode for an already-uploaded image. 649 // We should never be creating a decode for an already-uploaded image.
576 DCHECK(!found->second->upload.image()); 650 DCHECK(!image_data->upload.image());
577 return nullptr; 651 return nullptr;
578 } 652 }
579 653
580 // We didn't have an existing locked image, create a task to lock or decode. 654 // We didn't have an existing locked image, create a task to lock or decode.
581 scoped_refptr<TileTask>& existing_task = 655 scoped_refptr<TileTask>& existing_task = image_data->decode.task;
582 pending_image_decode_tasks_[image_id];
583 if (!existing_task) { 656 if (!existing_task) {
584 // Ref image decode and create a decode task. This ref will be released in 657 // Ref image decode and create a decode task. This ref will be released in
585 // DecodeTaskCompleted. 658 // DecodeTaskCompleted.
586 RefImageDecode(draw_image); 659 RefImageDecode(draw_image);
587 existing_task = make_scoped_refptr( 660 existing_task = make_scoped_refptr(
588 new ImageDecodeTaskImpl(this, draw_image, tracing_info)); 661 new ImageDecodeTaskImpl(this, draw_image, tracing_info));
589 } 662 }
590 return existing_task; 663 return existing_task;
591 } 664 }
592 665
593 void GpuImageDecodeController::RefImageDecode(const DrawImage& draw_image) { 666 void GpuImageDecodeController::RefImageDecode(const DrawImage& draw_image) {
594 lock_.AssertAcquired(); 667 lock_.AssertAcquired();
595 auto found = image_data_.Peek(draw_image.image()->uniqueID()); 668 auto found = in_use_cache_.find(GenerateInUseCacheKey(draw_image));
596 DCHECK(found != image_data_.end()); 669 DCHECK(found != in_use_cache_.end());
597 ++found->second->decode.ref_count; 670 ++found->second.ref_count;
598 RefCountChanged(found->second.get()); 671 ++found->second.image_data->decode.ref_count;
672 RefCountChanged(found->second.image_data.get());
599 } 673 }
600 674
601 void GpuImageDecodeController::UnrefImageDecode(const DrawImage& draw_image) { 675 void GpuImageDecodeController::UnrefImageDecode(const DrawImage& draw_image) {
602 lock_.AssertAcquired(); 676 lock_.AssertAcquired();
603 auto found = image_data_.Peek(draw_image.image()->uniqueID()); 677 auto found = in_use_cache_.find(GenerateInUseCacheKey(draw_image));
604 DCHECK(found != image_data_.end()); 678 DCHECK(found != in_use_cache_.end());
605 DCHECK_GT(found->second->decode.ref_count, 0u); 679 DCHECK_GT(found->second.image_data->decode.ref_count, 0u);
606 --found->second->decode.ref_count; 680 DCHECK_GT(found->second.ref_count, 0u);
607 RefCountChanged(found->second.get()); 681 --found->second.ref_count;
682 --found->second.image_data->decode.ref_count;
683 RefCountChanged(found->second.image_data.get());
684 if (found->second.ref_count == 0u) {
685 in_use_cache_.erase(found);
686 }
608 } 687 }
609 688
610 void GpuImageDecodeController::RefImage(const DrawImage& draw_image) { 689 void GpuImageDecodeController::RefImage(const DrawImage& draw_image) {
611 lock_.AssertAcquired(); 690 lock_.AssertAcquired();
612 auto found = image_data_.Peek(draw_image.image()->uniqueID()); 691 InUseCacheKey key = GenerateInUseCacheKey(draw_image);
613 DCHECK(found != image_data_.end()); 692 auto found = in_use_cache_.find(key);
614 ++found->second->upload.ref_count; 693
615 RefCountChanged(found->second.get()); 694 // If no secondary cache entry was found for the given |draw_image|, then
695 // the draw_image only exists in the |persistent_cache_|. Create an in-use
696 // cache entry now.
697 if (found == in_use_cache_.end()) {
698 auto found_image = persistent_cache_.Peek(draw_image.image()->uniqueID());
699 DCHECK(found_image != persistent_cache_.end());
700 DCHECK(found_image->second->pre_scale_mip_level <=
701 CalculatePreScaleMipLevel(draw_image));
702 found =
703 in_use_cache_
704 .insert(std::make_pair(key, InUseCacheEntry(found_image->second)))
vmpstr 2016/06/21 20:07:08 make_pair here will make a std::pair<InUseCacheKey
ericrk 2016/06/22 18:56:37 Emplace works on windows - will update this again
705 .first;
706 }
707
708 DCHECK(found != in_use_cache_.end());
709 ++found->second.ref_count;
710 ++found->second.image_data->upload.ref_count;
711 RefCountChanged(found->second.image_data.get());
616 } 712 }
617 713
618 void GpuImageDecodeController::UnrefImageInternal(const DrawImage& draw_image) { 714 void GpuImageDecodeController::UnrefImageInternal(const DrawImage& draw_image) {
619 lock_.AssertAcquired(); 715 lock_.AssertAcquired();
620 auto found = image_data_.Peek(draw_image.image()->uniqueID()); 716 auto found = in_use_cache_.find(GenerateInUseCacheKey(draw_image));
621 DCHECK(found != image_data_.end()); 717 DCHECK(found != in_use_cache_.end());
622 DCHECK_GT(found->second->upload.ref_count, 0u); 718 DCHECK_GT(found->second.image_data->upload.ref_count, 0u);
623 --found->second->upload.ref_count; 719 DCHECK_GT(found->second.ref_count, 0u);
624 if (found->second->upload.ref_count == 0) 720 --found->second.ref_count;
625 found->second->upload.notify_ref_reached_zero(); 721 --found->second.image_data->upload.ref_count;
626 RefCountChanged(found->second.get()); 722 RefCountChanged(found->second.image_data.get());
723 if (found->second.ref_count == 0u) {
724 in_use_cache_.erase(found);
725 }
627 } 726 }
628 727
629 // Called any time an image or decode ref count changes. Takes care of any 728 // Called any time an image or decode ref count changes. Takes care of any
630 // necessary memory budget book-keeping and cleanup. 729 // necessary memory budget book-keeping and cleanup.
631 void GpuImageDecodeController::RefCountChanged(ImageData* image_data) { 730 void GpuImageDecodeController::RefCountChanged(ImageData* image_data) {
632 lock_.AssertAcquired(); 731 lock_.AssertAcquired();
633 732
634 bool has_any_refs = 733 bool has_any_refs =
635 image_data->upload.ref_count > 0 || image_data->decode.ref_count > 0; 734 image_data->upload.ref_count > 0 || image_data->decode.ref_count > 0;
636 735
736 // Don't keep around orphaned images.
737 if (image_data->is_orphaned && !has_any_refs) {
738 images_pending_deletion_.push_back(std::move(image_data->upload.image()));
739 image_data->upload.SetImage(nullptr);
740 }
741
637 // Don't keep CPU images if they are unused, these images can be recreated by 742 // Don't keep CPU images if they are unused, these images can be recreated by
638 // re-locking discardable (rather than requiring a full upload like GPU 743 // re-locking discardable (rather than requiring a full upload like GPU
639 // images). 744 // images).
640 if (image_data->mode == DecodedDataMode::CPU && !has_any_refs) { 745 if (image_data->mode == DecodedDataMode::CPU && !has_any_refs) {
641 images_pending_deletion_.push_back(image_data->upload.image()); 746 images_pending_deletion_.push_back(image_data->upload.image());
642 image_data->upload.SetImage(nullptr); 747 image_data->upload.SetImage(nullptr);
643 } 748 }
644 749
645 if (image_data->is_at_raster && !has_any_refs) { 750 if (image_data->is_at_raster && !has_any_refs) {
646 // We have an at-raster image which has reached zero refs. If it won't fit 751 // We have an at-raster image which has reached zero refs. If it won't fit
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
712 lock_.AssertAcquired(); 817 lock_.AssertAcquired();
713 818
714 if (CanFitSize(required_size) && !ExceedsPreferredCount()) 819 if (CanFitSize(required_size) && !ExceedsPreferredCount())
715 return true; 820 return true;
716 821
717 // While we are over memory or preferred item capacity, we iterate through 822 // While we are over memory or preferred item capacity, we iterate through
718 // our set of cached image data in LRU order. For each image, we can do two 823 // our set of cached image data in LRU order. For each image, we can do two
719 // things: 1) We can free the uploaded image, reducing the memory usage of 824 // things: 1) We can free the uploaded image, reducing the memory usage of
720 // the cache and 2) we can remove the entry entirely, reducing the count of 825 // the cache and 2) we can remove the entry entirely, reducing the count of
721 // elements in the cache. 826 // elements in the cache.
722 for (auto it = image_data_.rbegin(); it != image_data_.rend();) { 827 for (auto it = persistent_cache_.rbegin(); it != persistent_cache_.rend();) {
723 if (it->second->decode.ref_count != 0 || 828 if (it->second->decode.ref_count != 0 ||
724 it->second->upload.ref_count != 0) { 829 it->second->upload.ref_count != 0) {
725 ++it; 830 ++it;
726 continue; 831 continue;
727 } 832 }
728 833
729 // Current entry has no refs. Ensure it is not locked. 834 // Current entry has no refs. Ensure it is not locked.
730 DCHECK(!it->second->decode.is_locked()); 835 DCHECK(!it->second->decode.is_locked());
731 836
732 // If an image without refs is budgeted, it must have an associated image 837 // If an image without refs is budgeted, it must have an associated image
733 // upload. 838 // upload.
734 DCHECK(!it->second->upload.budgeted || it->second->upload.image()); 839 DCHECK(!it->second->upload.budgeted || it->second->upload.image());
735 840
736 // Free the uploaded image if possible. 841 // Free the uploaded image if possible.
737 if (it->second->upload.image()) { 842 if (it->second->upload.image()) {
738 DCHECK(it->second->upload.budgeted); 843 DCHECK(it->second->upload.budgeted);
739 DCHECK_GE(bytes_used_, it->second->size); 844 DCHECK_GE(bytes_used_, it->second->size);
740 bytes_used_ -= it->second->size; 845 bytes_used_ -= it->second->size;
741 images_pending_deletion_.push_back(it->second->upload.image()); 846 images_pending_deletion_.push_back(it->second->upload.image());
742 it->second->upload.SetImage(nullptr); 847 it->second->upload.SetImage(nullptr);
743 it->second->upload.budgeted = false; 848 it->second->upload.budgeted = false;
744 } 849 }
745 850
746 // Free the entire entry if necessary. 851 // Free the entire entry if necessary.
747 if (ExceedsPreferredCount()) { 852 if (ExceedsPreferredCount()) {
748 it = image_data_.Erase(it); 853 it = persistent_cache_.Erase(it);
749 } else { 854 } else {
750 ++it; 855 ++it;
751 } 856 }
752 857
753 if (CanFitSize(required_size) && !ExceedsPreferredCount()) 858 if (CanFitSize(required_size) && !ExceedsPreferredCount())
754 return true; 859 return true;
755 } 860 }
756 861
757 // Preferred count is only used as a guideline when triming the cache. Allow 862 // Preferred count is only used as a guideline when triming the cache. Allow
758 // new elements to be added as long as we are below our size limit. 863 // new elements to be added as long as we are below our size limit.
759 return CanFitSize(required_size); 864 return CanFitSize(required_size);
760 } 865 }
761 866
762 bool GpuImageDecodeController::CanFitSize(size_t size) const { 867 bool GpuImageDecodeController::CanFitSize(size_t size) const {
763 lock_.AssertAcquired(); 868 lock_.AssertAcquired();
764 869
765 base::CheckedNumeric<uint32_t> new_size(bytes_used_); 870 base::CheckedNumeric<uint32_t> new_size(bytes_used_);
766 new_size += size; 871 new_size += size;
767 return new_size.IsValid() && new_size.ValueOrDie() <= cached_bytes_limit_; 872 return new_size.IsValid() && new_size.ValueOrDie() <= cached_bytes_limit_;
768 } 873 }
769 874
770 bool GpuImageDecodeController::ExceedsPreferredCount() const { 875 bool GpuImageDecodeController::ExceedsPreferredCount() const {
771 lock_.AssertAcquired(); 876 lock_.AssertAcquired();
772 877
773 return image_data_.size() > cached_items_limit_; 878 return persistent_cache_.size() > cached_items_limit_;
774 } 879 }
775 880
776 void GpuImageDecodeController::DecodeImageIfNecessary( 881 void GpuImageDecodeController::DecodeImageIfNecessary(
777 const DrawImage& draw_image, 882 const DrawImage& draw_image,
778 ImageData* image_data) { 883 ImageData* image_data) {
779 lock_.AssertAcquired(); 884 lock_.AssertAcquired();
780 885
781 DCHECK_GT(image_data->decode.ref_count, 0u); 886 DCHECK_GT(image_data->decode.ref_count, 0u);
782 887
783 if (image_data->decode.decode_failure) { 888 if (image_data->decode.decode_failure) {
(...skipping 16 matching lines...) Expand all
800 905
801 image_data->decode.ResetData(); 906 image_data->decode.ResetData();
802 std::unique_ptr<base::DiscardableMemory> backing_memory; 907 std::unique_ptr<base::DiscardableMemory> backing_memory;
803 { 908 {
804 base::AutoUnlock unlock(lock_); 909 base::AutoUnlock unlock(lock_);
805 switch (image_data->mode) { 910 switch (image_data->mode) {
806 case DecodedDataMode::CPU: { 911 case DecodedDataMode::CPU: {
807 backing_memory = 912 backing_memory =
808 base::DiscardableMemoryAllocator::GetInstance() 913 base::DiscardableMemoryAllocator::GetInstance()
809 ->AllocateLockedDiscardableMemory(image_data->size); 914 ->AllocateLockedDiscardableMemory(image_data->size);
810 SkImageInfo image_info = CreateImageInfoForDrawImage(draw_image); 915 SkImageInfo image_info = CreateImageInfoForDrawImage(
811 if (!draw_image.image()->readPixels(image_info, backing_memory->data(), 916 draw_image, image_data->pre_scale_mip_level);
812 image_info.minRowBytes(), 0, 0, 917 // In order to match GPU scaling quality (which uses mip-maps at high
813 SkImage::kDisallow_CachingHint)) { 918 // quality), we want to use at most medium filter quality for the
919 // scale.
920 SkFilterQuality scale_quality =
921 std::max(kMedium_SkFilterQuality, draw_image.filter_quality());
922 SkPixmap image_pixmap(image_info, backing_memory->data(),
923 image_info.minRowBytes());
924 // Note that scalePixels falls back to readPixels if the sale is 1x, so
925 // no need to special case that as an optimization.
926 if (!draw_image.image()->scalePixels(image_pixmap, scale_quality,
927 SkImage::kDisallow_CachingHint)) {
814 backing_memory.reset(); 928 backing_memory.reset();
815 } 929 }
816 break; 930 break;
817 } 931 }
818 case DecodedDataMode::GPU: { 932 case DecodedDataMode::GPU: {
819 backing_memory = 933 backing_memory =
820 base::DiscardableMemoryAllocator::GetInstance() 934 base::DiscardableMemoryAllocator::GetInstance()
821 ->AllocateLockedDiscardableMemory(image_data->size); 935 ->AllocateLockedDiscardableMemory(image_data->size);
822 auto params = ParamsFromDrawImage(draw_image); 936 auto params =
937 ParamsFromDrawImage(draw_image, image_data->pre_scale_mip_level);
823 if (!draw_image.image()->getDeferredTextureImageData( 938 if (!draw_image.image()->getDeferredTextureImageData(
824 *context_threadsafe_proxy_.get(), &params, 1, 939 *context_threadsafe_proxy_.get(), &params, 1,
825 backing_memory->data())) { 940 backing_memory->data())) {
826 backing_memory.reset(); 941 backing_memory.reset();
827 } 942 }
828 break; 943 break;
829 } 944 }
830 } 945 }
831 } 946 }
832 947
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
868 // We are about to upload a new image and are holding the context lock. 983 // We are about to upload a new image and are holding the context lock.
869 // Ensure that any images which have been marked for deletion are actually 984 // Ensure that any images which have been marked for deletion are actually
870 // cleaned up so we don't exceed our memory limit during this upload. 985 // cleaned up so we don't exceed our memory limit during this upload.
871 DeletePendingImages(); 986 DeletePendingImages();
872 987
873 sk_sp<SkImage> uploaded_image; 988 sk_sp<SkImage> uploaded_image;
874 { 989 {
875 base::AutoUnlock unlock(lock_); 990 base::AutoUnlock unlock(lock_);
876 switch (image_data->mode) { 991 switch (image_data->mode) {
877 case DecodedDataMode::CPU: { 992 case DecodedDataMode::CPU: {
878 SkImageInfo image_info = CreateImageInfoForDrawImage(draw_image); 993 SkImageInfo image_info = CreateImageInfoForDrawImage(
994 draw_image, image_data->pre_scale_mip_level);
879 SkPixmap pixmap(image_info, image_data->decode.data()->data(), 995 SkPixmap pixmap(image_info, image_data->decode.data()->data(),
880 image_info.minRowBytes()); 996 image_info.minRowBytes());
881 uploaded_image = 997 uploaded_image =
882 SkImage::MakeFromRaster(pixmap, [](const void*, void*) {}, nullptr); 998 SkImage::MakeFromRaster(pixmap, [](const void*, void*) {}, nullptr);
883 break; 999 break;
884 } 1000 }
885 case DecodedDataMode::GPU: { 1001 case DecodedDataMode::GPU: {
886 uploaded_image = SkImage::MakeFromDeferredTextureImageData( 1002 uploaded_image = SkImage::MakeFromDeferredTextureImageData(
887 context_->GrContext(), image_data->decode.data()->data(), 1003 context_->GrContext(), image_data->decode.data()->data(),
888 SkBudgeted::kNo); 1004 SkBudgeted::kNo);
889 break; 1005 break;
890 } 1006 }
891 } 1007 }
892 } 1008 }
893 image_data->decode.mark_used(); 1009 image_data->decode.mark_used();
894 DCHECK(uploaded_image); 1010 DCHECK(uploaded_image);
895 1011
896 // At-raster may have decoded this while we were unlocked. If so, ignore our 1012 // At-raster may have decoded this while we were unlocked. If so, ignore our
897 // result. 1013 // result.
898 if (!image_data->upload.image()) 1014 if (!image_data->upload.image())
899 image_data->upload.SetImage(std::move(uploaded_image)); 1015 image_data->upload.SetImage(std::move(uploaded_image));
900 } 1016 }
901 1017
902 std::unique_ptr<GpuImageDecodeController::ImageData> 1018 scoped_refptr<GpuImageDecodeController::ImageData>
903 GpuImageDecodeController::CreateImageData(const DrawImage& draw_image) { 1019 GpuImageDecodeController::CreateImageData(const DrawImage& draw_image) {
904 lock_.AssertAcquired(); 1020 lock_.AssertAcquired();
905 1021
906 DecodedDataMode mode; 1022 DecodedDataMode mode;
907 SkImageInfo info = CreateImageInfoForDrawImage(draw_image); 1023 int pre_scale_mip_level = CalculatePreScaleMipLevel(draw_image);
908 SkImage::DeferredTextureImageUsageParams params = 1024 SkImage::DeferredTextureImageUsageParams params =
909 ParamsFromDrawImage(draw_image); 1025 ParamsFromDrawImage(draw_image, pre_scale_mip_level);
910 size_t data_size = draw_image.image()->getDeferredTextureImageData( 1026 size_t data_size = draw_image.image()->getDeferredTextureImageData(
911 *context_threadsafe_proxy_.get(), &params, 1, nullptr); 1027 *context_threadsafe_proxy_.get(), &params, 1, nullptr);
912 1028
913 if (data_size == 0) { 1029 if (data_size == 0) {
914 // Can't upload image, too large or other failure. Try to use SW fallback. 1030 // Can't upload image, too large or other failure. Try to use SW fallback.
915 data_size = info.getSafeSize(info.minRowBytes()); 1031 SkImageInfo image_info =
1032 CreateImageInfoForDrawImage(draw_image, pre_scale_mip_level);
1033 data_size = image_info.getSafeSize(image_info.minRowBytes());
916 mode = DecodedDataMode::CPU; 1034 mode = DecodedDataMode::CPU;
917 } else { 1035 } else {
918 mode = DecodedDataMode::GPU; 1036 mode = DecodedDataMode::GPU;
919 } 1037 }
920 1038
921 return base::WrapUnique(new ImageData(mode, data_size)); 1039 return make_scoped_refptr(new ImageData(mode, data_size, pre_scale_mip_level,
1040 draw_image.filter_quality()));
922 } 1041 }
923 1042
924 void GpuImageDecodeController::DeletePendingImages() { 1043 void GpuImageDecodeController::DeletePendingImages() {
925 context_->GetLock()->AssertAcquired(); 1044 context_->GetLock()->AssertAcquired();
926 lock_.AssertAcquired(); 1045 lock_.AssertAcquired();
927 images_pending_deletion_.clear(); 1046 images_pending_deletion_.clear();
928 } 1047 }
929 1048
930 SkImageInfo GpuImageDecodeController::CreateImageInfoForDrawImage( 1049 SkImageInfo GpuImageDecodeController::CreateImageInfoForDrawImage(
1050 const DrawImage& draw_image,
1051 int pre_scale_mip_level) const {
1052 DrawImage scaled_draw_image = draw_image.ApplyScale(
1053 CalculatePreScaleFactor(draw_image, pre_scale_mip_level));
1054 return SkImageInfo::Make(
1055 scaled_draw_image.image()->width() * scaled_draw_image.scale().width(),
1056 scaled_draw_image.image()->height() * scaled_draw_image.scale().height(),
1057 ResourceFormatToClosestSkColorType(format_), kPremul_SkAlphaType);
1058 }
1059
1060 // Tries to find an ImageData that can be used to draw the provided
1061 // |draw_image|. First looks for an exact entry in our |in_use_cache_|. If one
1062 // cannot be found, it looks for a compatible entry in our |persistent_cache_|.
1063 GpuImageDecodeController::ImageData*
1064 GpuImageDecodeController::GetImageDataForDrawImage(
1065 const DrawImage& draw_image) {
1066 lock_.AssertAcquired();
1067 {
1068 auto found = in_use_cache_.find(GenerateInUseCacheKey(draw_image));
vmpstr 2016/06/21 20:07:08 I prefer if you just name a variable a different t
ericrk 2016/06/22 18:56:37 Done.
1069 if (found != in_use_cache_.end()) {
vmpstr 2016/06/21 20:07:08 no braces
ericrk 2016/06/22 18:56:38 Done.
1070 return found->second.image_data.get();
1071 }
1072 }
1073 {
1074 auto found = persistent_cache_.Get(draw_image.image()->uniqueID());
1075 if (found != persistent_cache_.end()) {
1076 if (IsCompatibleWithDrawImage(found->second.get(), draw_image)) {
1077 return found->second.get();
1078 } else {
1079 found->second->is_orphaned = true;
1080 // Call RefCountChanged before erasing the orphaned task to ensure
1081 // that we clean up any SkImage safely.
1082 RefCountChanged(found->second.get());
vmpstr 2016/06/21 20:07:07 Where did we change the ref count? Can this be cur
ericrk 2016/06/22 18:56:37 RefCountChanged really now means "ownership change
1083 persistent_cache_.Erase(found);
1084 }
1085 }
1086 }
1087
1088 return nullptr;
1089 }
1090
1091 // Determines if we can draw the provided |draw_image| using the provided
1092 // |image_data|. This is true if the |image_data| is not scaled, or if it
1093 // is scaled at an equal or larger scale and equal or larger quality to
1094 // the provided |draw_image|.
1095 bool GpuImageDecodeController::IsCompatibleWithDrawImage(
1096 const ImageData* image_data,
931 const DrawImage& draw_image) const { 1097 const DrawImage& draw_image) const {
932 return SkImageInfo::Make( 1098 bool not_scaled = image_data->pre_scale_mip_level == 0;
vmpstr 2016/06/21 20:07:08 can you name bools in the positive? ie "is_scaled"
ericrk 2016/06/22 18:56:38 Done.
933 draw_image.image()->width(), draw_image.image()->height(), 1099 bool scale_is_compatible =
934 ResourceFormatToClosestSkColorType(format_), kPremul_SkAlphaType); 1100 CalculatePreScaleMipLevel(draw_image) >= image_data->pre_scale_mip_level;
1101 bool quality_is_compatible =
1102 draw_image.filter_quality() <= image_data->pre_scale_filter_quality;
1103 return not_scaled || (scale_is_compatible && quality_is_compatible);
1104 }
1105
1106 size_t GpuImageDecodeController::GetDrawImageSizeForTesting(
1107 const DrawImage& image) {
1108 base::AutoLock lock(lock_);
1109 scoped_refptr<ImageData> data = CreateImageData(image);
1110 return data->size;
935 } 1111 }
936 1112
937 void GpuImageDecodeController::SetImageDecodingFailedForTesting( 1113 void GpuImageDecodeController::SetImageDecodingFailedForTesting(
938 const DrawImage& image) { 1114 const DrawImage& image) {
939 base::AutoLock lock(lock_); 1115 base::AutoLock lock(lock_);
940 auto found = image_data_.Peek(image.image()->uniqueID()); 1116 auto found = persistent_cache_.Peek(image.image()->uniqueID());
941 DCHECK(found != image_data_.end()); 1117 DCHECK(found != persistent_cache_.end());
942 ImageData* image_data = found->second.get(); 1118 ImageData* image_data = found->second.get();
943 image_data->decode.decode_failure = true; 1119 image_data->decode.decode_failure = true;
944 } 1120 }
945 1121
946 bool GpuImageDecodeController::DiscardableIsLockedForTesting( 1122 bool GpuImageDecodeController::DiscardableIsLockedForTesting(
947 const DrawImage& image) { 1123 const DrawImage& image) {
948 base::AutoLock lock(lock_); 1124 base::AutoLock lock(lock_);
949 auto found = image_data_.Peek(image.image()->uniqueID()); 1125 auto found = persistent_cache_.Peek(image.image()->uniqueID());
950 DCHECK(found != image_data_.end()); 1126 DCHECK(found != persistent_cache_.end());
951 ImageData* image_data = found->second.get(); 1127 ImageData* image_data = found->second.get();
952 return image_data->decode.is_locked(); 1128 return image_data->decode.is_locked();
953 } 1129 }
954 1130
955 } // namespace cc 1131 } // namespace cc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698