Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1204)

Side by Side Diff: cc/tiles/gpu_image_decode_controller.cc

Issue 2541183002: cc: Rename ImageDecodeController to ImageDecodeCache. (Closed)
Patch Set: rename: update Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "cc/tiles/gpu_image_decode_controller.h"
6
7 #include <inttypes.h>
8
9 #include "base/debug/alias.h"
10 #include "base/memory/discardable_memory_allocator.h"
11 #include "base/memory/memory_coordinator_client_registry.h"
12 #include "base/memory/ptr_util.h"
13 #include "base/metrics/histogram_macros.h"
14 #include "base/numerics/safe_math.h"
15 #include "base/strings/stringprintf.h"
16 #include "base/threading/thread_task_runner_handle.h"
17 #include "base/trace_event/memory_dump_manager.h"
18 #include "cc/debug/devtools_instrumentation.h"
19 #include "cc/output/context_provider.h"
20 #include "cc/raster/tile_task.h"
21 #include "cc/resources/resource_format_utils.h"
22 #include "cc/tiles/mipmap_util.h"
23 #include "gpu/command_buffer/client/context_support.h"
24 #include "gpu/command_buffer/client/gles2_interface.h"
25 #include "gpu_image_decode_controller.h"
26 #include "skia/ext/texture_handle.h"
27 #include "third_party/skia/include/core/SkCanvas.h"
28 #include "third_party/skia/include/core/SkRefCnt.h"
29 #include "third_party/skia/include/core/SkSurface.h"
30 #include "third_party/skia/include/gpu/GrContext.h"
31 #include "third_party/skia/include/gpu/GrTexture.h"
32 #include "ui/gfx/skia_util.h"
33 #include "ui/gl/trace_util.h"
34
35 namespace cc {
36 namespace {
37
38 // The number or entries to keep in the cache, depending on the memory state of
39 // the system. This limit can be breached by in-use cache items, which cannot
40 // be deleted.
41 static const int kNormalMaxItemsInCache = 2000;
42 static const int kThrottledMaxItemsInCache = 100;
43 static const int kSuspendedMaxItemsInCache = 0;
44
45 // The factor by which to reduce the GPU memory size of the cache when in the
46 // THROTTLED memory state.
47 static const int kThrottledCacheSizeReductionFactor = 2;
48
49 // The maximum size in bytes of GPU memory in the cache while SUSPENDED or not
50 // visible. This limit can be breached by in-use cache items, which cannot be
51 // deleted.
52 static const int kSuspendedOrInvisibleMaxGpuImageBytes = 0;
53
54 // Returns true if an image would not be drawn and should therefore be
55 // skipped rather than decoded.
56 bool SkipImage(const DrawImage& draw_image) {
57 if (!SkIRect::Intersects(draw_image.src_rect(), draw_image.image()->bounds()))
58 return true;
59 if (std::abs(draw_image.scale().width()) <
60 std::numeric_limits<float>::epsilon() ||
61 std::abs(draw_image.scale().height()) <
62 std::numeric_limits<float>::epsilon()) {
63 return true;
64 }
65 return false;
66 }
67
68 // Returns the filter quality to use for scaling the image to upload scale. For
69 // GPU raster, medium and high filter quality are identical for downscales.
70 // Upload scaling is always a downscale, so cap our filter quality to medium.
71 SkFilterQuality CalculateUploadScaleFilterQuality(const DrawImage& draw_image) {
72 return std::min(kMedium_SkFilterQuality, draw_image.filter_quality());
73 }
74
75 // Calculate the mip level to upload-scale the image to before uploading. We use
76 // mip levels rather than exact scales to increase re-use of scaled images.
77 int CalculateUploadScaleMipLevel(const DrawImage& draw_image) {
78 // Images which are being clipped will have color-bleeding if scaled.
79 // TODO(ericrk): Investigate uploading clipped images to handle this case and
80 // provide further optimization. crbug.com/620899
81 if (draw_image.src_rect() != draw_image.image()->bounds())
82 return 0;
83
84 gfx::Size base_size(draw_image.image()->width(),
85 draw_image.image()->height());
86 // Ceil our scaled size so that the mip map generated is guaranteed to be
87 // larger. Take the abs of the scale, as mipmap functions don't handle
88 // (and aren't impacted by) negative image dimensions.
89 gfx::Size scaled_size =
90 gfx::ScaleToCeiledSize(base_size, std::abs(draw_image.scale().width()),
91 std::abs(draw_image.scale().height()));
92
93 return MipMapUtil::GetLevelForSize(base_size, scaled_size);
94 }
95
96 // Calculates the scale factor which can be used to scale an image to a given
97 // mip level.
98 SkSize CalculateScaleFactorForMipLevel(const DrawImage& draw_image,
99 int mip_level) {
100 gfx::Size base_size(draw_image.image()->width(),
101 draw_image.image()->height());
102 return MipMapUtil::GetScaleAdjustmentForLevel(base_size, mip_level);
103 }
104
105 // Calculates the size of a given mip level.
106 gfx::Size CalculateSizeForMipLevel(const DrawImage& draw_image, int mip_level) {
107 gfx::Size base_size(draw_image.image()->width(),
108 draw_image.image()->height());
109 return MipMapUtil::GetSizeForLevel(base_size, mip_level);
110 }
111
112 // Generates a uint64_t which uniquely identifies a DrawImage for the purposes
113 // of the |in_use_cache_|. The key is generated as follows:
114 // ╔══════════════════════╤═══════════╤═══════════╗
115 // ║ image_id │ mip_level │ quality ║
116 // ╚════════32═bits═══════╧══16═bits══╧══16═bits══╝
117 uint64_t GenerateInUseCacheKey(const DrawImage& draw_image) {
118 static_assert(
119 kLast_SkFilterQuality <= std::numeric_limits<uint16_t>::max(),
120 "InUseCacheKey depends on SkFilterQuality fitting in a uint16_t.");
121
122 SkFilterQuality filter_quality =
123 CalculateUploadScaleFilterQuality(draw_image);
124 DCHECK_LE(filter_quality, kLast_SkFilterQuality);
125
126 // An image has at most log_2(max(width, height)) mip levels, so given our
127 // usage of 32-bit sizes for images, key.mip_level is at most 31.
128 int32_t mip_level = CalculateUploadScaleMipLevel(draw_image);
129 DCHECK_LT(mip_level, 32);
130
131 return (static_cast<uint64_t>(draw_image.image()->uniqueID()) << 32) |
132 (mip_level << 16) | filter_quality;
133 }
134
135 } // namespace
136
137 GpuImageDecodeController::InUseCacheEntry::InUseCacheEntry(
138 scoped_refptr<ImageData> image_data)
139 : image_data(std::move(image_data)) {}
140 GpuImageDecodeController::InUseCacheEntry::InUseCacheEntry(
141 const InUseCacheEntry&) = default;
142 GpuImageDecodeController::InUseCacheEntry::InUseCacheEntry(InUseCacheEntry&&) =
143 default;
144 GpuImageDecodeController::InUseCacheEntry::~InUseCacheEntry() = default;
145
146 // Task which decodes an image and stores the result in discardable memory.
147 // This task does not use GPU resources and can be run on any thread.
148 class ImageDecodeTaskImpl : public TileTask {
149 public:
150 ImageDecodeTaskImpl(GpuImageDecodeController* controller,
151 const DrawImage& draw_image,
152 const ImageDecodeController::TracingInfo& tracing_info)
153 : TileTask(true),
154 controller_(controller),
155 image_(draw_image),
156 tracing_info_(tracing_info) {
157 DCHECK(!SkipImage(draw_image));
158 }
159
160 // Overridden from Task:
161 void RunOnWorkerThread() override {
162 TRACE_EVENT2("cc", "ImageDecodeTaskImpl::RunOnWorkerThread", "mode", "gpu",
163 "source_prepare_tiles_id", tracing_info_.prepare_tiles_id);
164 devtools_instrumentation::ScopedImageDecodeTask image_decode_task(
165 image_.image().get(),
166 devtools_instrumentation::ScopedImageDecodeTask::GPU);
167 controller_->DecodeImage(image_);
168 }
169
170 // Overridden from TileTask:
171 void OnTaskCompleted() override {
172 controller_->OnImageDecodeTaskCompleted(image_);
173 }
174
175 protected:
176 ~ImageDecodeTaskImpl() override {}
177
178 private:
179 GpuImageDecodeController* controller_;
180 DrawImage image_;
181 const ImageDecodeController::TracingInfo tracing_info_;
182
183 DISALLOW_COPY_AND_ASSIGN(ImageDecodeTaskImpl);
184 };
185
186 // Task which creates an image from decoded data. Typically this involves
187 // uploading data to the GPU, which requires this task be run on the non-
188 // concurrent thread.
189 class ImageUploadTaskImpl : public TileTask {
190 public:
191 ImageUploadTaskImpl(GpuImageDecodeController* controller,
192 const DrawImage& draw_image,
193 scoped_refptr<TileTask> decode_dependency,
194 const ImageDecodeController::TracingInfo& tracing_info)
195 : TileTask(false),
196 controller_(controller),
197 image_(draw_image),
198 tracing_info_(tracing_info) {
199 DCHECK(!SkipImage(draw_image));
200 // If an image is already decoded and locked, we will not generate a
201 // decode task.
202 if (decode_dependency)
203 dependencies_.push_back(std::move(decode_dependency));
204 }
205
206 // Override from Task:
207 void RunOnWorkerThread() override {
208 TRACE_EVENT2("cc", "ImageUploadTaskImpl::RunOnWorkerThread", "mode", "gpu",
209 "source_prepare_tiles_id", tracing_info_.prepare_tiles_id);
210 controller_->UploadImage(image_);
211 }
212
213 // Overridden from TileTask:
214 void OnTaskCompleted() override {
215 controller_->OnImageUploadTaskCompleted(image_);
216 }
217
218 protected:
219 ~ImageUploadTaskImpl() override {}
220
221 private:
222 GpuImageDecodeController* controller_;
223 DrawImage image_;
224 const ImageDecodeController::TracingInfo tracing_info_;
225
226 DISALLOW_COPY_AND_ASSIGN(ImageUploadTaskImpl);
227 };
228
229 GpuImageDecodeController::DecodedImageData::DecodedImageData() = default;
230 GpuImageDecodeController::DecodedImageData::~DecodedImageData() {
231 ResetData();
232 }
233
234 bool GpuImageDecodeController::DecodedImageData::Lock() {
235 DCHECK(!is_locked_);
236 is_locked_ = data_->Lock();
237 if (is_locked_)
238 ++usage_stats_.lock_count;
239 return is_locked_;
240 }
241
242 void GpuImageDecodeController::DecodedImageData::Unlock() {
243 DCHECK(is_locked_);
244 data_->Unlock();
245 if (usage_stats_.lock_count == 1)
246 usage_stats_.first_lock_wasted = !usage_stats_.used;
247 is_locked_ = false;
248 }
249
250 void GpuImageDecodeController::DecodedImageData::SetLockedData(
251 std::unique_ptr<base::DiscardableMemory> data) {
252 DCHECK(!is_locked_);
253 DCHECK(data);
254 DCHECK(!data_);
255 data_ = std::move(data);
256 is_locked_ = true;
257 }
258
259 void GpuImageDecodeController::DecodedImageData::ResetData() {
260 DCHECK(!is_locked_);
261 if (data_)
262 ReportUsageStats();
263 data_ = nullptr;
264 usage_stats_ = UsageStats();
265 }
266
267 void GpuImageDecodeController::DecodedImageData::ReportUsageStats() const {
268 // lock_count │ used │ result state
269 // ═══════════╪═══════╪══════════════════
270 // 1 │ false │ WASTED_ONCE
271 // 1 │ true │ USED_ONCE
272 // >1 │ false │ WASTED_RELOCKED
273 // >1 │ true │ USED_RELOCKED
274 // Note that it's important not to reorder the following enums, since the
275 // numerical values are used in the histogram code.
276 enum State : int {
277 DECODED_IMAGE_STATE_WASTED_ONCE,
278 DECODED_IMAGE_STATE_USED_ONCE,
279 DECODED_IMAGE_STATE_WASTED_RELOCKED,
280 DECODED_IMAGE_STATE_USED_RELOCKED,
281 DECODED_IMAGE_STATE_COUNT
282 } state = DECODED_IMAGE_STATE_WASTED_ONCE;
283
284 if (usage_stats_.lock_count == 1) {
285 if (usage_stats_.used)
286 state = DECODED_IMAGE_STATE_USED_ONCE;
287 else
288 state = DECODED_IMAGE_STATE_WASTED_ONCE;
289 } else {
290 if (usage_stats_.used)
291 state = DECODED_IMAGE_STATE_USED_RELOCKED;
292 else
293 state = DECODED_IMAGE_STATE_WASTED_RELOCKED;
294 }
295
296 UMA_HISTOGRAM_ENUMERATION("Renderer4.GpuImageDecodeState", state,
297 DECODED_IMAGE_STATE_COUNT);
298 UMA_HISTOGRAM_BOOLEAN("Renderer4.GpuImageDecodeState.FirstLockWasted",
299 usage_stats_.first_lock_wasted);
300 }
301
302 GpuImageDecodeController::UploadedImageData::UploadedImageData() = default;
303 GpuImageDecodeController::UploadedImageData::~UploadedImageData() {
304 SetImage(nullptr);
305 }
306
307 void GpuImageDecodeController::UploadedImageData::SetImage(
308 sk_sp<SkImage> image) {
309 DCHECK(!image_ || !image);
310 if (image_) {
311 ReportUsageStats();
312 usage_stats_ = UsageStats();
313 }
314 image_ = std::move(image);
315 }
316
317 void GpuImageDecodeController::UploadedImageData::ReportUsageStats() const {
318 UMA_HISTOGRAM_BOOLEAN("Renderer4.GpuImageUploadState.Used",
319 usage_stats_.used);
320 UMA_HISTOGRAM_BOOLEAN("Renderer4.GpuImageUploadState.FirstRefWasted",
321 usage_stats_.first_ref_wasted);
322 }
323
324 GpuImageDecodeController::ImageData::ImageData(
325 DecodedDataMode mode,
326 size_t size,
327 const SkImage::DeferredTextureImageUsageParams& upload_params)
328 : mode(mode), size(size), upload_params(upload_params) {}
329
330 GpuImageDecodeController::ImageData::~ImageData() {
331 // We should never delete ImageData while it is in use or before it has been
332 // cleaned up.
333 DCHECK_EQ(0u, upload.ref_count);
334 DCHECK_EQ(0u, decode.ref_count);
335 DCHECK_EQ(false, decode.is_locked());
336 // This should always be cleaned up before deleting the image, as it needs to
337 // be freed with the GL context lock held.
338 DCHECK(!upload.image());
339 }
340
341 GpuImageDecodeController::GpuImageDecodeController(ContextProvider* context,
342 ResourceFormat decode_format,
343 size_t max_gpu_image_bytes)
344 : format_(decode_format),
345 context_(context),
346 persistent_cache_(PersistentCache::NO_AUTO_EVICT),
347 normal_max_gpu_image_bytes_(max_gpu_image_bytes) {
348 // Acquire the context_lock so that we can safely retrieve the
349 // GrContextThreadSafeProxy. This proxy can then be used with no lock held.
350 {
351 ContextProvider::ScopedContextLock context_lock(context_);
352 context_threadsafe_proxy_ = sk_sp<GrContextThreadSafeProxy>(
353 context->GrContext()->threadSafeProxy());
354 }
355
356 // In certain cases, ThreadTaskRunnerHandle isn't set (Android Webview).
357 // Don't register a dump provider in these cases.
358 if (base::ThreadTaskRunnerHandle::IsSet()) {
359 base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
360 this, "cc::GpuImageDecodeController",
361 base::ThreadTaskRunnerHandle::Get());
362 }
363 // Register this component with base::MemoryCoordinatorClientRegistry.
364 base::MemoryCoordinatorClientRegistry::GetInstance()->Register(this);
365 }
366
367 GpuImageDecodeController::~GpuImageDecodeController() {
368 // SetShouldAggressivelyFreeResources will zero our limits and free all
369 // outstanding image memory.
370 SetShouldAggressivelyFreeResources(true);
371
372 // It is safe to unregister, even if we didn't register in the constructor.
373 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
374 this);
375 // Unregister this component with memory_coordinator::ClientRegistry.
376 base::MemoryCoordinatorClientRegistry::GetInstance()->Unregister(this);
377 }
378
379 bool GpuImageDecodeController::GetTaskForImageAndRef(
380 const DrawImage& draw_image,
381 const TracingInfo& tracing_info,
382 scoped_refptr<TileTask>* task) {
383 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
384 "GpuImageDecodeController::GetTaskForImageAndRef");
385 if (SkipImage(draw_image)) {
386 *task = nullptr;
387 return false;
388 }
389
390 base::AutoLock lock(lock_);
391 const auto image_id = draw_image.image()->uniqueID();
392 ImageData* image_data = GetImageDataForDrawImage(draw_image);
393 scoped_refptr<ImageData> new_data;
394 if (!image_data) {
395 // We need an ImageData, create one now.
396 new_data = CreateImageData(draw_image);
397 image_data = new_data.get();
398 } else if (image_data->is_at_raster) {
399 // Image is at-raster, just return, this usage will be at-raster as well.
400 *task = nullptr;
401 return false;
402 } else if (image_data->decode.decode_failure) {
403 // We have already tried and failed to decode this image, so just return.
404 *task = nullptr;
405 return false;
406 } else if (image_data->upload.image()) {
407 // The image is already uploaded, ref and return.
408 RefImage(draw_image);
409 *task = nullptr;
410 return true;
411 } else if (image_data->upload.task) {
412 // We had an existing upload task, ref the image and return the task.
413 RefImage(draw_image);
414 *task = image_data->upload.task;
415 return true;
416 }
417
418 // Ensure that the image we're about to decode/upload will fit in memory.
419 if (!EnsureCapacity(image_data->size)) {
420 // Image will not fit, do an at-raster decode.
421 *task = nullptr;
422 return false;
423 }
424
425 // If we had to create new image data, add it to our map now that we know it
426 // will fit.
427 if (new_data)
428 persistent_cache_.Put(image_id, std::move(new_data));
429
430 // Ref image and create a upload and decode tasks. We will release this ref
431 // in UploadTaskCompleted.
432 RefImage(draw_image);
433 *task = make_scoped_refptr(new ImageUploadTaskImpl(
434 this, draw_image, GetImageDecodeTaskAndRef(draw_image, tracing_info),
435 tracing_info));
436 image_data->upload.task = *task;
437
438 // Ref the image again - this ref is owned by the caller, and it is their
439 // responsibility to release it by calling UnrefImage.
440 RefImage(draw_image);
441 return true;
442 }
443
444 void GpuImageDecodeController::UnrefImage(const DrawImage& draw_image) {
445 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
446 "GpuImageDecodeController::UnrefImage");
447 base::AutoLock lock(lock_);
448 UnrefImageInternal(draw_image);
449 }
450
451 DecodedDrawImage GpuImageDecodeController::GetDecodedImageForDraw(
452 const DrawImage& draw_image) {
453 TRACE_EVENT0("cc", "GpuImageDecodeController::GetDecodedImageForDraw");
454
455 // We are being called during raster. The context lock must already be
456 // acquired by the caller.
457 context_->GetLock()->AssertAcquired();
458
459 if (SkipImage(draw_image))
460 return DecodedDrawImage(nullptr, draw_image.filter_quality());
461
462 base::AutoLock lock(lock_);
463 ImageData* image_data = GetImageDataForDrawImage(draw_image);
464 if (!image_data) {
465 // We didn't find the image, create a new entry.
466 auto data = CreateImageData(draw_image);
467 image_data = data.get();
468 persistent_cache_.Put(draw_image.image()->uniqueID(), std::move(data));
469 }
470
471 if (!image_data->upload.budgeted) {
472 // If image data is not budgeted by this point, it is at-raster.
473 image_data->is_at_raster = true;
474 }
475
476 // Ref the image and decode so that they stay alive while we are
477 // decoding/uploading.
478 RefImage(draw_image);
479 RefImageDecode(draw_image);
480
481 // We may or may not need to decode and upload the image we've found, the
482 // following functions early-out to if we already decoded.
483 DecodeImageIfNecessary(draw_image, image_data);
484 UploadImageIfNecessary(draw_image, image_data);
485 // Unref the image decode, but not the image. The image ref will be released
486 // in DrawWithImageFinished.
487 UnrefImageDecode(draw_image);
488
489 sk_sp<SkImage> image = image_data->upload.image();
490 image_data->upload.mark_used();
491 DCHECK(image || image_data->decode.decode_failure);
492
493 SkSize scale_factor = CalculateScaleFactorForMipLevel(
494 draw_image, image_data->upload_params.fPreScaleMipLevel);
495 DecodedDrawImage decoded_draw_image(std::move(image), SkSize(), scale_factor,
496 draw_image.filter_quality());
497 decoded_draw_image.set_at_raster_decode(image_data->is_at_raster);
498 return decoded_draw_image;
499 }
500
501 void GpuImageDecodeController::DrawWithImageFinished(
502 const DrawImage& draw_image,
503 const DecodedDrawImage& decoded_draw_image) {
504 TRACE_EVENT0("cc", "GpuImageDecodeController::DrawWithImageFinished");
505
506 // We are being called during raster. The context lock must already be
507 // acquired by the caller.
508 context_->GetLock()->AssertAcquired();
509
510 if (SkipImage(draw_image))
511 return;
512
513 base::AutoLock lock(lock_);
514 UnrefImageInternal(draw_image);
515
516 // We are mid-draw and holding the context lock, ensure we clean up any
517 // textures (especially at-raster), which may have just been marked for
518 // deletion by UnrefImage.
519 DeletePendingImages();
520 }
521
522 void GpuImageDecodeController::ReduceCacheUsage() {
523 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
524 "GpuImageDecodeController::ReduceCacheUsage");
525 base::AutoLock lock(lock_);
526 EnsureCapacity(0);
527 }
528
529 void GpuImageDecodeController::SetShouldAggressivelyFreeResources(
530 bool aggressively_free_resources) {
531 TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
532 "GpuImageDecodeController::SetShouldAggressivelyFreeResources",
533 "agressive_free_resources", aggressively_free_resources);
534 if (aggressively_free_resources) {
535 ContextProvider::ScopedContextLock context_lock(context_);
536 base::AutoLock lock(lock_);
537 // We want to keep as little in our cache as possible. Set our memory limit
538 // to zero and EnsureCapacity to clean up memory.
539 cached_bytes_limit_ = kSuspendedOrInvisibleMaxGpuImageBytes;
540 EnsureCapacity(0);
541
542 // We are holding the context lock, so finish cleaning up deleted images
543 // now.
544 DeletePendingImages();
545 } else {
546 base::AutoLock lock(lock_);
547 cached_bytes_limit_ = normal_max_gpu_image_bytes_;
548 }
549 }
550
551 bool GpuImageDecodeController::OnMemoryDump(
552 const base::trace_event::MemoryDumpArgs& args,
553 base::trace_event::ProcessMemoryDump* pmd) {
554 using base::trace_event::MemoryAllocatorDump;
555 using base::trace_event::MemoryAllocatorDumpGuid;
556 using base::trace_event::MemoryDumpLevelOfDetail;
557
558 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
559 "GpuImageDecodeController::OnMemoryDump");
560
561 if (args.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND) {
562 std::string dump_name =
563 base::StringPrintf("cc/image_memory/controller_0x%" PRIXPTR,
564 reinterpret_cast<uintptr_t>(this));
565 MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(dump_name);
566 dump->AddScalar(MemoryAllocatorDump::kNameSize,
567 MemoryAllocatorDump::kUnitsBytes, bytes_used_);
568
569 // Early out, no need for more detail in a BACKGROUND dump.
570 return true;
571 }
572
573 for (const auto& image_pair : persistent_cache_) {
574 const ImageData* image_data = image_pair.second.get();
575 const uint32_t image_id = image_pair.first;
576
577 // If we have discardable decoded data, dump this here.
578 if (image_data->decode.data()) {
579 std::string discardable_dump_name = base::StringPrintf(
580 "cc/image_memory/controller_0x%" PRIXPTR "/discardable/image_%d",
581 reinterpret_cast<uintptr_t>(this), image_id);
582 MemoryAllocatorDump* dump =
583 image_data->decode.data()->CreateMemoryAllocatorDump(
584 discardable_dump_name.c_str(), pmd);
585 // If our image is locked, dump the "locked_size" as an additional
586 // column.
587 // This lets us see the amount of discardable which is contributing to
588 // memory pressure.
589 if (image_data->decode.is_locked()) {
590 dump->AddScalar("locked_size", MemoryAllocatorDump::kUnitsBytes,
591 image_data->size);
592 }
593 }
594
595 // If we have an uploaded image (that is actually on the GPU, not just a
596 // CPU
597 // wrapper), upload it here.
598 if (image_data->upload.image() &&
599 image_data->mode == DecodedDataMode::GPU) {
600 std::string gpu_dump_name = base::StringPrintf(
601 "cc/image_memory/controller_0x%" PRIXPTR "/gpu/image_%d",
602 reinterpret_cast<uintptr_t>(this), image_id);
603 MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(gpu_dump_name);
604 dump->AddScalar(MemoryAllocatorDump::kNameSize,
605 MemoryAllocatorDump::kUnitsBytes, image_data->size);
606
607 // Create a global shred GUID to associate this data with its GPU
608 // process
609 // counterpart.
610 GLuint gl_id = skia::GrBackendObjectToGrGLTextureInfo(
611 image_data->upload.image()->getTextureHandle(
612 false /* flushPendingGrContextIO */))
613 ->fID;
614 MemoryAllocatorDumpGuid guid = gl::GetGLTextureClientGUIDForTracing(
615 context_->ContextSupport()->ShareGroupTracingGUID(), gl_id);
616
617 // kImportance is somewhat arbitrary - we chose 3 to be higher than the
618 // value used in the GPU process (1), and Skia (2), causing us to appear
619 // as the owner in memory traces.
620 const int kImportance = 3;
621 pmd->CreateSharedGlobalAllocatorDump(guid);
622 pmd->AddOwnershipEdge(dump->guid(), guid, kImportance);
623 }
624 }
625
626 return true;
627 }
628
629 void GpuImageDecodeController::DecodeImage(const DrawImage& draw_image) {
630 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
631 "GpuImageDecodeController::DecodeImage");
632 base::AutoLock lock(lock_);
633 ImageData* image_data = GetImageDataForDrawImage(draw_image);
634 DCHECK(image_data);
635 DCHECK(!image_data->is_at_raster);
636 DecodeImageIfNecessary(draw_image, image_data);
637 }
638
639 void GpuImageDecodeController::UploadImage(const DrawImage& draw_image) {
640 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
641 "GpuImageDecodeController::UploadImage");
642 ContextProvider::ScopedContextLock context_lock(context_);
643 base::AutoLock lock(lock_);
644 ImageData* image_data = GetImageDataForDrawImage(draw_image);
645 DCHECK(image_data);
646 DCHECK(!image_data->is_at_raster);
647 UploadImageIfNecessary(draw_image, image_data);
648 }
649
650 void GpuImageDecodeController::OnImageDecodeTaskCompleted(
651 const DrawImage& draw_image) {
652 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
653 "GpuImageDecodeController::OnImageDecodeTaskCompleted");
654 base::AutoLock lock(lock_);
655 // Decode task is complete, remove our reference to it.
656 ImageData* image_data = GetImageDataForDrawImage(draw_image);
657 DCHECK(image_data);
658 DCHECK(image_data->decode.task);
659 image_data->decode.task = nullptr;
660
661 // While the decode task is active, we keep a ref on the decoded data.
662 // Release that ref now.
663 UnrefImageDecode(draw_image);
664 }
665
666 void GpuImageDecodeController::OnImageUploadTaskCompleted(
667 const DrawImage& draw_image) {
668 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
669 "GpuImageDecodeController::OnImageUploadTaskCompleted");
670 base::AutoLock lock(lock_);
671 // Upload task is complete, remove our reference to it.
672 ImageData* image_data = GetImageDataForDrawImage(draw_image);
673 DCHECK(image_data);
674 DCHECK(image_data->upload.task);
675 image_data->upload.task = nullptr;
676
677 // While the upload task is active, we keep a ref on both the image it will be
678 // populating, as well as the decode it needs to populate it. Release these
679 // refs now.
680 UnrefImageDecode(draw_image);
681 UnrefImageInternal(draw_image);
682 }
683
684 // Checks if an existing image decode exists. If not, returns a task to produce
685 // the requested decode.
686 scoped_refptr<TileTask> GpuImageDecodeController::GetImageDecodeTaskAndRef(
687 const DrawImage& draw_image,
688 const TracingInfo& tracing_info) {
689 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
690 "GpuImageDecodeController::GetImageDecodeTaskAndRef");
691 lock_.AssertAcquired();
692
693 // This ref is kept alive while an upload task may need this decode. We
694 // release this ref in UploadTaskCompleted.
695 RefImageDecode(draw_image);
696
697 ImageData* image_data = GetImageDataForDrawImage(draw_image);
698 DCHECK(image_data);
699 if (image_data->decode.is_locked()) {
700 // We should never be creating a decode task for an at raster image.
701 DCHECK(!image_data->is_at_raster);
702 // We should never be creating a decode for an already-uploaded image.
703 DCHECK(!image_data->upload.image());
704 return nullptr;
705 }
706
707 // We didn't have an existing locked image, create a task to lock or decode.
708 scoped_refptr<TileTask>& existing_task = image_data->decode.task;
709 if (!existing_task) {
710 // Ref image decode and create a decode task. This ref will be released in
711 // DecodeTaskCompleted.
712 RefImageDecode(draw_image);
713 existing_task = make_scoped_refptr(
714 new ImageDecodeTaskImpl(this, draw_image, tracing_info));
715 }
716 return existing_task;
717 }
718
719 void GpuImageDecodeController::RefImageDecode(const DrawImage& draw_image) {
720 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
721 "GpuImageDecodeController::RefImageDecode");
722 lock_.AssertAcquired();
723 auto found = in_use_cache_.find(GenerateInUseCacheKey(draw_image));
724 DCHECK(found != in_use_cache_.end());
725 ++found->second.ref_count;
726 ++found->second.image_data->decode.ref_count;
727 OwnershipChanged(draw_image, found->second.image_data.get());
728 }
729
730 void GpuImageDecodeController::UnrefImageDecode(const DrawImage& draw_image) {
731 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
732 "GpuImageDecodeController::UnrefImageDecode");
733 lock_.AssertAcquired();
734 auto found = in_use_cache_.find(GenerateInUseCacheKey(draw_image));
735 DCHECK(found != in_use_cache_.end());
736 DCHECK_GT(found->second.image_data->decode.ref_count, 0u);
737 DCHECK_GT(found->second.ref_count, 0u);
738 --found->second.ref_count;
739 --found->second.image_data->decode.ref_count;
740 OwnershipChanged(draw_image, found->second.image_data.get());
741 if (found->second.ref_count == 0u) {
742 in_use_cache_.erase(found);
743 }
744 }
745
746 void GpuImageDecodeController::RefImage(const DrawImage& draw_image) {
747 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
748 "GpuImageDecodeController::RefImage");
749 lock_.AssertAcquired();
750 InUseCacheKey key = GenerateInUseCacheKey(draw_image);
751 auto found = in_use_cache_.find(key);
752
753 // If no secondary cache entry was found for the given |draw_image|, then
754 // the draw_image only exists in the |persistent_cache_|. Create an in-use
755 // cache entry now.
756 if (found == in_use_cache_.end()) {
757 auto found_image = persistent_cache_.Peek(draw_image.image()->uniqueID());
758 DCHECK(found_image != persistent_cache_.end());
759 DCHECK(found_image->second->upload_params.fPreScaleMipLevel <=
760 CalculateUploadScaleMipLevel(draw_image));
761 found = in_use_cache_
762 .insert(InUseCache::value_type(
763 key, InUseCacheEntry(found_image->second)))
764 .first;
765 }
766
767 DCHECK(found != in_use_cache_.end());
768 ++found->second.ref_count;
769 ++found->second.image_data->upload.ref_count;
770 OwnershipChanged(draw_image, found->second.image_data.get());
771 }
772
773 void GpuImageDecodeController::UnrefImageInternal(const DrawImage& draw_image) {
774 lock_.AssertAcquired();
775 auto found = in_use_cache_.find(GenerateInUseCacheKey(draw_image));
776 DCHECK(found != in_use_cache_.end());
777 DCHECK_GT(found->second.image_data->upload.ref_count, 0u);
778 DCHECK_GT(found->second.ref_count, 0u);
779 --found->second.ref_count;
780 --found->second.image_data->upload.ref_count;
781 OwnershipChanged(draw_image, found->second.image_data.get());
782 if (found->second.ref_count == 0u) {
783 in_use_cache_.erase(found);
784 }
785 }
786
787 // Called any time an image or decode ref count changes. Takes care of any
788 // necessary memory budget book-keeping and cleanup.
789 void GpuImageDecodeController::OwnershipChanged(const DrawImage& draw_image,
790 ImageData* image_data) {
791 lock_.AssertAcquired();
792
793 bool has_any_refs =
794 image_data->upload.ref_count > 0 || image_data->decode.ref_count > 0;
795
796 // Don't keep around completely empty images. This can happen if an image's
797 // decode/upload tasks were both cancelled before completing.
798 if (!has_any_refs && !image_data->upload.image() &&
799 !image_data->decode.data()) {
800 auto found_persistent =
801 persistent_cache_.Peek(draw_image.image()->uniqueID());
802 if (found_persistent != persistent_cache_.end())
803 persistent_cache_.Erase(found_persistent);
804 }
805
806 // Don't keep around orphaned images.
807 if (image_data->is_orphaned && !has_any_refs) {
808 images_pending_deletion_.push_back(std::move(image_data->upload.image()));
809 image_data->upload.SetImage(nullptr);
810 }
811
812 // Don't keep CPU images if they are unused, these images can be recreated by
813 // re-locking discardable (rather than requiring a full upload like GPU
814 // images).
815 if (image_data->mode == DecodedDataMode::CPU && !has_any_refs) {
816 images_pending_deletion_.push_back(image_data->upload.image());
817 image_data->upload.SetImage(nullptr);
818 }
819
820 if (image_data->is_at_raster && !has_any_refs) {
821 // We have an at-raster image which has reached zero refs. If it won't fit
822 // in our cache, delete the image to allow it to fit.
823 if (image_data->upload.image() && !CanFitSize(image_data->size)) {
824 images_pending_deletion_.push_back(image_data->upload.image());
825 image_data->upload.SetImage(nullptr);
826 }
827
828 // We now have an at-raster image which will fit in our cache. Convert it
829 // to not-at-raster.
830 image_data->is_at_raster = false;
831 if (image_data->upload.image()) {
832 bytes_used_ += image_data->size;
833 image_data->upload.budgeted = true;
834 }
835 }
836
837 // If we have image refs on a non-at-raster image, it must be budgeted, as it
838 // is either uploaded or pending upload.
839 if (image_data->upload.ref_count > 0 && !image_data->upload.budgeted &&
840 !image_data->is_at_raster) {
841 // We should only be taking non-at-raster refs on images that fit in cache.
842 DCHECK(CanFitSize(image_data->size));
843
844 bytes_used_ += image_data->size;
845 image_data->upload.budgeted = true;
846 }
847
848 // If we have no image refs on an image, it should only be budgeted if it has
849 // an uploaded image. If no image exists (upload was cancelled), we should
850 // un-budget the image.
851 if (image_data->upload.ref_count == 0 && image_data->upload.budgeted &&
852 !image_data->upload.image()) {
853 DCHECK_GE(bytes_used_, image_data->size);
854 bytes_used_ -= image_data->size;
855 image_data->upload.budgeted = false;
856 }
857
858 // We should unlock the discardable memory for the image in two cases:
859 // 1) The image is no longer being used (no decode or upload refs).
860 // 2) This is a GPU backed image that has already been uploaded (no decode
861 // refs).
862 bool should_unlock_discardable =
863 !has_any_refs || (image_data->mode == DecodedDataMode::GPU &&
864 !image_data->decode.ref_count);
865
866 if (should_unlock_discardable && image_data->decode.is_locked()) {
867 DCHECK(image_data->decode.data());
868 image_data->decode.Unlock();
869 }
870
871 #if DCHECK_IS_ON()
872 // Sanity check the above logic.
873 if (image_data->upload.image()) {
874 DCHECK(image_data->is_at_raster || image_data->upload.budgeted);
875 if (image_data->mode == DecodedDataMode::CPU)
876 DCHECK(image_data->decode.is_locked());
877 } else {
878 DCHECK(!image_data->upload.budgeted || image_data->upload.ref_count > 0);
879 }
880 #endif
881 }
882
883 // Ensures that we can fit a new image of size |required_size| in our cache. In
884 // doing so, this function will free unreferenced image data as necessary to
885 // create rooom.
886 bool GpuImageDecodeController::EnsureCapacity(size_t required_size) {
887 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
888 "GpuImageDecodeController::EnsureCapacity");
889 lock_.AssertAcquired();
890
891 if (CanFitSize(required_size) && !ExceedsPreferredCount())
892 return true;
893
894 // While we are over memory or preferred item capacity, we iterate through
895 // our set of cached image data in LRU order. For each image, we can do two
896 // things: 1) We can free the uploaded image, reducing the memory usage of
897 // the cache and 2) we can remove the entry entirely, reducing the count of
898 // elements in the cache.
899 for (auto it = persistent_cache_.rbegin(); it != persistent_cache_.rend();) {
900 if (it->second->decode.ref_count != 0 ||
901 it->second->upload.ref_count != 0) {
902 ++it;
903 continue;
904 }
905
906 // Current entry has no refs. Ensure it is not locked.
907 DCHECK(!it->second->decode.is_locked());
908
909 // If an image without refs is budgeted, it must have an associated image
910 // upload.
911 DCHECK(!it->second->upload.budgeted || it->second->upload.image());
912
913 // Free the uploaded image if possible.
914 if (it->second->upload.image()) {
915 DCHECK(it->second->upload.budgeted);
916 DCHECK_GE(bytes_used_, it->second->size);
917 bytes_used_ -= it->second->size;
918 images_pending_deletion_.push_back(it->second->upload.image());
919 it->second->upload.SetImage(nullptr);
920 it->second->upload.budgeted = false;
921 }
922
923 // Free the entire entry if necessary.
924 if (ExceedsPreferredCount()) {
925 it = persistent_cache_.Erase(it);
926 } else {
927 ++it;
928 }
929
930 if (CanFitSize(required_size) && !ExceedsPreferredCount())
931 return true;
932 }
933
934 // Preferred count is only used as a guideline when triming the cache. Allow
935 // new elements to be added as long as we are below our size limit.
936 return CanFitSize(required_size);
937 }
938
939 bool GpuImageDecodeController::CanFitSize(size_t size) const {
940 lock_.AssertAcquired();
941
942 size_t bytes_limit;
943 if (memory_state_ == base::MemoryState::NORMAL) {
944 bytes_limit = cached_bytes_limit_;
945 } else if (memory_state_ == base::MemoryState::THROTTLED) {
946 bytes_limit = cached_bytes_limit_ / kThrottledCacheSizeReductionFactor;
947 } else {
948 DCHECK_EQ(base::MemoryState::SUSPENDED, memory_state_);
949 bytes_limit = kSuspendedOrInvisibleMaxGpuImageBytes;
950 }
951
952 base::CheckedNumeric<uint32_t> new_size(bytes_used_);
953 new_size += size;
954 return new_size.IsValid() && new_size.ValueOrDie() <= bytes_limit;
955 }
956
957 bool GpuImageDecodeController::ExceedsPreferredCount() const {
958 lock_.AssertAcquired();
959
960 size_t items_limit;
961 if (memory_state_ == base::MemoryState::NORMAL) {
962 items_limit = kNormalMaxItemsInCache;
963 } else if (memory_state_ == base::MemoryState::THROTTLED) {
964 items_limit = kThrottledMaxItemsInCache;
965 } else {
966 DCHECK_EQ(base::MemoryState::SUSPENDED, memory_state_);
967 items_limit = kSuspendedMaxItemsInCache;
968 }
969
970 return persistent_cache_.size() > items_limit;
971 }
972
973 void GpuImageDecodeController::DecodeImageIfNecessary(
974 const DrawImage& draw_image,
975 ImageData* image_data) {
976 lock_.AssertAcquired();
977
978 DCHECK_GT(image_data->decode.ref_count, 0u);
979
980 if (image_data->decode.decode_failure) {
981 // We have already tried and failed to decode this image. Don't try again.
982 return;
983 }
984
985 if (image_data->upload.image()) {
986 // We already have an uploaded image, no reason to decode.
987 return;
988 }
989
990 if (image_data->decode.data() &&
991 (image_data->decode.is_locked() || image_data->decode.Lock())) {
992 // We already decoded this, or we just needed to lock, early out.
993 return;
994 }
995
996 TRACE_EVENT0("cc", "GpuImageDecodeController::DecodeImage");
997
998 image_data->decode.ResetData();
999 std::unique_ptr<base::DiscardableMemory> backing_memory;
1000 {
1001 base::AutoUnlock unlock(lock_);
1002
1003 backing_memory = base::DiscardableMemoryAllocator::GetInstance()
1004 ->AllocateLockedDiscardableMemory(image_data->size);
1005
1006 switch (image_data->mode) {
1007 case DecodedDataMode::CPU: {
1008 SkImageInfo image_info = CreateImageInfoForDrawImage(
1009 draw_image, image_data->upload_params.fPreScaleMipLevel);
1010 // In order to match GPU scaling quality (which uses mip-maps at high
1011 // quality), we want to use at most medium filter quality for the
1012 // scale.
1013 SkPixmap image_pixmap(image_info, backing_memory->data(),
1014 image_info.minRowBytes());
1015 // Note that scalePixels falls back to readPixels if the sale is 1x, so
1016 // no need to special case that as an optimization.
1017 if (!draw_image.image()->scalePixels(
1018 image_pixmap, CalculateUploadScaleFilterQuality(draw_image),
1019 SkImage::kDisallow_CachingHint)) {
1020 backing_memory->Unlock();
1021 backing_memory.reset();
1022 }
1023 break;
1024 }
1025 case DecodedDataMode::GPU: {
1026 // TODO(crbug.com/649167): Params should not have changed since initial
1027 // sizing. Somehow this still happens. We should investigate and re-add
1028 // DCHECKs here to enforce this.
1029
1030 if (!draw_image.image()->getDeferredTextureImageData(
1031 *context_threadsafe_proxy_.get(), &image_data->upload_params, 1,
1032 backing_memory->data())) {
1033 backing_memory->Unlock();
1034 backing_memory.reset();
1035 }
1036 break;
1037 }
1038 }
1039 }
1040
1041 if (image_data->decode.data()) {
1042 // An at-raster task decoded this before us. Ingore our decode.
1043 return;
1044 }
1045
1046 if (!backing_memory) {
1047 // If |backing_memory| was not populated, we had a non-decodable image.
1048 image_data->decode.decode_failure = true;
1049 return;
1050 }
1051
1052 image_data->decode.SetLockedData(std::move(backing_memory));
1053 }
1054
1055 void GpuImageDecodeController::UploadImageIfNecessary(
1056 const DrawImage& draw_image,
1057 ImageData* image_data) {
1058 context_->GetLock()->AssertAcquired();
1059 lock_.AssertAcquired();
1060
1061 if (image_data->decode.decode_failure) {
1062 // We were unnable to decode this image. Don't try to upload.
1063 return;
1064 }
1065
1066 if (image_data->upload.image()) {
1067 // Someone has uploaded this image before us (at raster).
1068 return;
1069 }
1070
1071 TRACE_EVENT0("cc", "GpuImageDecodeController::UploadImage");
1072 DCHECK(image_data->decode.is_locked());
1073 DCHECK_GT(image_data->decode.ref_count, 0u);
1074 DCHECK_GT(image_data->upload.ref_count, 0u);
1075
1076 // We are about to upload a new image and are holding the context lock.
1077 // Ensure that any images which have been marked for deletion are actually
1078 // cleaned up so we don't exceed our memory limit during this upload.
1079 DeletePendingImages();
1080
1081 sk_sp<SkImage> uploaded_image;
1082 {
1083 base::AutoUnlock unlock(lock_);
1084 switch (image_data->mode) {
1085 case DecodedDataMode::CPU: {
1086 SkImageInfo image_info = CreateImageInfoForDrawImage(
1087 draw_image, image_data->upload_params.fPreScaleMipLevel);
1088 SkPixmap pixmap(image_info, image_data->decode.data()->data(),
1089 image_info.minRowBytes());
1090 uploaded_image =
1091 SkImage::MakeFromRaster(pixmap, [](const void*, void*) {}, nullptr);
1092 break;
1093 }
1094 case DecodedDataMode::GPU: {
1095 uploaded_image = SkImage::MakeFromDeferredTextureImageData(
1096 context_->GrContext(), image_data->decode.data()->data(),
1097 SkBudgeted::kNo);
1098 break;
1099 }
1100 }
1101 }
1102 image_data->decode.mark_used();
1103 DCHECK(uploaded_image);
1104
1105 // At-raster may have decoded this while we were unlocked. If so, ignore our
1106 // result.
1107 if (!image_data->upload.image())
1108 image_data->upload.SetImage(std::move(uploaded_image));
1109 }
1110
1111 scoped_refptr<GpuImageDecodeController::ImageData>
1112 GpuImageDecodeController::CreateImageData(const DrawImage& draw_image) {
1113 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
1114 "GpuImageDecodeController::CreateImageData");
1115 lock_.AssertAcquired();
1116
1117 DecodedDataMode mode;
1118 int upload_scale_mip_level = CalculateUploadScaleMipLevel(draw_image);
1119 auto params = SkImage::DeferredTextureImageUsageParams(
1120 draw_image.matrix(), CalculateUploadScaleFilterQuality(draw_image),
1121 upload_scale_mip_level);
1122 size_t data_size = draw_image.image()->getDeferredTextureImageData(
1123 *context_threadsafe_proxy_.get(), &params, 1, nullptr);
1124
1125 if (data_size == 0) {
1126 // Can't upload image, too large or other failure. Try to use SW fallback.
1127 SkImageInfo image_info =
1128 CreateImageInfoForDrawImage(draw_image, upload_scale_mip_level);
1129 data_size = image_info.getSafeSize(image_info.minRowBytes());
1130 mode = DecodedDataMode::CPU;
1131 } else {
1132 mode = DecodedDataMode::GPU;
1133 }
1134
1135 return make_scoped_refptr(new ImageData(mode, data_size, params));
1136 }
1137
1138 void GpuImageDecodeController::DeletePendingImages() {
1139 context_->GetLock()->AssertAcquired();
1140 lock_.AssertAcquired();
1141 images_pending_deletion_.clear();
1142 }
1143
1144 SkImageInfo GpuImageDecodeController::CreateImageInfoForDrawImage(
1145 const DrawImage& draw_image,
1146 int upload_scale_mip_level) const {
1147 gfx::Size mip_size =
1148 CalculateSizeForMipLevel(draw_image, upload_scale_mip_level);
1149 return SkImageInfo::Make(mip_size.width(), mip_size.height(),
1150 ResourceFormatToClosestSkColorType(format_),
1151 kPremul_SkAlphaType);
1152 }
1153
1154 // Tries to find an ImageData that can be used to draw the provided
1155 // |draw_image|. First looks for an exact entry in our |in_use_cache_|. If one
1156 // cannot be found, it looks for a compatible entry in our |persistent_cache_|.
1157 GpuImageDecodeController::ImageData*
1158 GpuImageDecodeController::GetImageDataForDrawImage(
1159 const DrawImage& draw_image) {
1160 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
1161 "GpuImageDecodeController::GetImageDataForDrawImage");
1162 lock_.AssertAcquired();
1163 auto found_in_use = in_use_cache_.find(GenerateInUseCacheKey(draw_image));
1164 if (found_in_use != in_use_cache_.end())
1165 return found_in_use->second.image_data.get();
1166
1167 auto found_persistent = persistent_cache_.Get(draw_image.image()->uniqueID());
1168 if (found_persistent != persistent_cache_.end()) {
1169 ImageData* image_data = found_persistent->second.get();
1170 if (IsCompatible(image_data, draw_image)) {
1171 return image_data;
1172 } else {
1173 found_persistent->second->is_orphaned = true;
1174 // Call OwnershipChanged before erasing the orphaned task from the
1175 // persistent cache. This ensures that if the orphaned task has 0
1176 // references, it is cleaned up safely before it is deleted.
1177 OwnershipChanged(draw_image, image_data);
1178 persistent_cache_.Erase(found_persistent);
1179 }
1180 }
1181
1182 return nullptr;
1183 }
1184
1185 // Determines if we can draw the provided |draw_image| using the provided
1186 // |image_data|. This is true if the |image_data| is not scaled, or if it
1187 // is scaled at an equal or larger scale and equal or larger quality to
1188 // the provided |draw_image|.
1189 bool GpuImageDecodeController::IsCompatible(const ImageData* image_data,
1190 const DrawImage& draw_image) const {
1191 bool is_scaled = image_data->upload_params.fPreScaleMipLevel != 0;
1192 bool scale_is_compatible = CalculateUploadScaleMipLevel(draw_image) >=
1193 image_data->upload_params.fPreScaleMipLevel;
1194 bool quality_is_compatible = CalculateUploadScaleFilterQuality(draw_image) <=
1195 image_data->upload_params.fQuality;
1196 return !is_scaled || (scale_is_compatible && quality_is_compatible);
1197 }
1198
1199 size_t GpuImageDecodeController::GetDrawImageSizeForTesting(
1200 const DrawImage& image) {
1201 base::AutoLock lock(lock_);
1202 scoped_refptr<ImageData> data = CreateImageData(image);
1203 return data->size;
1204 }
1205
1206 void GpuImageDecodeController::SetImageDecodingFailedForTesting(
1207 const DrawImage& image) {
1208 base::AutoLock lock(lock_);
1209 auto found = persistent_cache_.Peek(image.image()->uniqueID());
1210 DCHECK(found != persistent_cache_.end());
1211 ImageData* image_data = found->second.get();
1212 image_data->decode.decode_failure = true;
1213 }
1214
1215 bool GpuImageDecodeController::DiscardableIsLockedForTesting(
1216 const DrawImage& image) {
1217 base::AutoLock lock(lock_);
1218 auto found = persistent_cache_.Peek(image.image()->uniqueID());
1219 DCHECK(found != persistent_cache_.end());
1220 ImageData* image_data = found->second.get();
1221 return image_data->decode.is_locked();
1222 }
1223
1224 void GpuImageDecodeController::OnMemoryStateChange(base::MemoryState state) {
1225 switch (state) {
1226 case base::MemoryState::NORMAL:
1227 memory_state_ = state;
1228 break;
1229 case base::MemoryState::THROTTLED:
1230 case base::MemoryState::SUSPENDED: {
1231 memory_state_ = state;
1232
1233 // We've just changed our memory state to a (potentially) more
1234 // restrictive one. Re-enforce cache limits.
1235 base::AutoLock lock(lock_);
1236 EnsureCapacity(0);
1237 break;
1238 }
1239 case base::MemoryState::UNKNOWN:
1240 // NOT_REACHED.
1241 break;
1242 }
1243 }
1244
1245 } // namespace cc
OLDNEW
« no previous file with comments | « cc/tiles/gpu_image_decode_controller.h ('k') | cc/tiles/gpu_image_decode_controller_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698