Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(144)

Side by Side Diff: cc/tiles/gpu_image_decode_cache.cc

Issue 2541183002: cc: Rename ImageDecodeController to ImageDecodeCache. (Closed)
Patch Set: rename: update Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « cc/tiles/gpu_image_decode_cache.h ('k') | cc/tiles/gpu_image_decode_cache_unittest.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2016 The Chromium Authors. All rights reserved. 1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "cc/tiles/gpu_image_decode_controller.h" 5 #include "cc/tiles/gpu_image_decode_cache.h"
6 6
7 #include <inttypes.h> 7 #include <inttypes.h>
8 8
9 #include "base/debug/alias.h" 9 #include "base/debug/alias.h"
10 #include "base/memory/discardable_memory_allocator.h" 10 #include "base/memory/discardable_memory_allocator.h"
11 #include "base/memory/memory_coordinator_client_registry.h" 11 #include "base/memory/memory_coordinator_client_registry.h"
12 #include "base/memory/ptr_util.h" 12 #include "base/memory/ptr_util.h"
13 #include "base/metrics/histogram_macros.h" 13 #include "base/metrics/histogram_macros.h"
14 #include "base/numerics/safe_math.h" 14 #include "base/numerics/safe_math.h"
15 #include "base/strings/stringprintf.h" 15 #include "base/strings/stringprintf.h"
16 #include "base/threading/thread_task_runner_handle.h" 16 #include "base/threading/thread_task_runner_handle.h"
17 #include "base/trace_event/memory_dump_manager.h" 17 #include "base/trace_event/memory_dump_manager.h"
18 #include "cc/debug/devtools_instrumentation.h" 18 #include "cc/debug/devtools_instrumentation.h"
19 #include "cc/output/context_provider.h" 19 #include "cc/output/context_provider.h"
20 #include "cc/raster/tile_task.h" 20 #include "cc/raster/tile_task.h"
21 #include "cc/resources/resource_format_utils.h" 21 #include "cc/resources/resource_format_utils.h"
22 #include "cc/tiles/mipmap_util.h" 22 #include "cc/tiles/mipmap_util.h"
23 #include "gpu/command_buffer/client/context_support.h" 23 #include "gpu/command_buffer/client/context_support.h"
24 #include "gpu/command_buffer/client/gles2_interface.h" 24 #include "gpu/command_buffer/client/gles2_interface.h"
25 #include "gpu_image_decode_controller.h" 25 #include "gpu_image_decode_cache.h"
26 #include "skia/ext/texture_handle.h" 26 #include "skia/ext/texture_handle.h"
27 #include "third_party/skia/include/core/SkCanvas.h" 27 #include "third_party/skia/include/core/SkCanvas.h"
28 #include "third_party/skia/include/core/SkRefCnt.h" 28 #include "third_party/skia/include/core/SkRefCnt.h"
29 #include "third_party/skia/include/core/SkSurface.h" 29 #include "third_party/skia/include/core/SkSurface.h"
30 #include "third_party/skia/include/gpu/GrContext.h" 30 #include "third_party/skia/include/gpu/GrContext.h"
31 #include "third_party/skia/include/gpu/GrTexture.h" 31 #include "third_party/skia/include/gpu/GrTexture.h"
32 #include "ui/gfx/skia_util.h" 32 #include "ui/gfx/skia_util.h"
33 #include "ui/gl/trace_util.h" 33 #include "ui/gl/trace_util.h"
34 34
35 namespace cc { 35 namespace cc {
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after
127 // usage of 32-bit sizes for images, key.mip_level is at most 31. 127 // usage of 32-bit sizes for images, key.mip_level is at most 31.
128 int32_t mip_level = CalculateUploadScaleMipLevel(draw_image); 128 int32_t mip_level = CalculateUploadScaleMipLevel(draw_image);
129 DCHECK_LT(mip_level, 32); 129 DCHECK_LT(mip_level, 32);
130 130
131 return (static_cast<uint64_t>(draw_image.image()->uniqueID()) << 32) | 131 return (static_cast<uint64_t>(draw_image.image()->uniqueID()) << 32) |
132 (mip_level << 16) | filter_quality; 132 (mip_level << 16) | filter_quality;
133 } 133 }
134 134
135 } // namespace 135 } // namespace
136 136
137 GpuImageDecodeController::InUseCacheEntry::InUseCacheEntry( 137 GpuImageDecodeCache::InUseCacheEntry::InUseCacheEntry(
138 scoped_refptr<ImageData> image_data) 138 scoped_refptr<ImageData> image_data)
139 : image_data(std::move(image_data)) {} 139 : image_data(std::move(image_data)) {}
140 GpuImageDecodeController::InUseCacheEntry::InUseCacheEntry( 140 GpuImageDecodeCache::InUseCacheEntry::InUseCacheEntry(const InUseCacheEntry&) =
141 const InUseCacheEntry&) = default;
142 GpuImageDecodeController::InUseCacheEntry::InUseCacheEntry(InUseCacheEntry&&) =
143 default; 141 default;
144 GpuImageDecodeController::InUseCacheEntry::~InUseCacheEntry() = default; 142 GpuImageDecodeCache::InUseCacheEntry::InUseCacheEntry(InUseCacheEntry&&) =
143 default;
144 GpuImageDecodeCache::InUseCacheEntry::~InUseCacheEntry() = default;
145 145
146 // Task which decodes an image and stores the result in discardable memory. 146 // Task which decodes an image and stores the result in discardable memory.
147 // This task does not use GPU resources and can be run on any thread. 147 // This task does not use GPU resources and can be run on any thread.
148 class ImageDecodeTaskImpl : public TileTask { 148 class ImageDecodeTaskImpl : public TileTask {
149 public: 149 public:
150 ImageDecodeTaskImpl(GpuImageDecodeController* controller, 150 ImageDecodeTaskImpl(GpuImageDecodeCache* cache,
151 const DrawImage& draw_image, 151 const DrawImage& draw_image,
152 const ImageDecodeController::TracingInfo& tracing_info) 152 const ImageDecodeCache::TracingInfo& tracing_info)
153 : TileTask(true), 153 : TileTask(true),
154 controller_(controller), 154 cache_(cache),
155 image_(draw_image), 155 image_(draw_image),
156 tracing_info_(tracing_info) { 156 tracing_info_(tracing_info) {
157 DCHECK(!SkipImage(draw_image)); 157 DCHECK(!SkipImage(draw_image));
158 } 158 }
159 159
160 // Overridden from Task: 160 // Overridden from Task:
161 void RunOnWorkerThread() override { 161 void RunOnWorkerThread() override {
162 TRACE_EVENT2("cc", "ImageDecodeTaskImpl::RunOnWorkerThread", "mode", "gpu", 162 TRACE_EVENT2("cc", "ImageDecodeTaskImpl::RunOnWorkerThread", "mode", "gpu",
163 "source_prepare_tiles_id", tracing_info_.prepare_tiles_id); 163 "source_prepare_tiles_id", tracing_info_.prepare_tiles_id);
164 devtools_instrumentation::ScopedImageDecodeTask image_decode_task( 164 devtools_instrumentation::ScopedImageDecodeTask image_decode_task(
165 image_.image().get(), 165 image_.image().get(),
166 devtools_instrumentation::ScopedImageDecodeTask::GPU); 166 devtools_instrumentation::ScopedImageDecodeTask::GPU);
167 controller_->DecodeImage(image_); 167 cache_->DecodeImage(image_);
168 } 168 }
169 169
170 // Overridden from TileTask: 170 // Overridden from TileTask:
171 void OnTaskCompleted() override { 171 void OnTaskCompleted() override {
172 controller_->OnImageDecodeTaskCompleted(image_); 172 cache_->OnImageDecodeTaskCompleted(image_);
173 } 173 }
174 174
175 protected: 175 protected:
176 ~ImageDecodeTaskImpl() override {} 176 ~ImageDecodeTaskImpl() override {}
177 177
178 private: 178 private:
179 GpuImageDecodeController* controller_; 179 GpuImageDecodeCache* cache_;
180 DrawImage image_; 180 DrawImage image_;
181 const ImageDecodeController::TracingInfo tracing_info_; 181 const ImageDecodeCache::TracingInfo tracing_info_;
182 182
183 DISALLOW_COPY_AND_ASSIGN(ImageDecodeTaskImpl); 183 DISALLOW_COPY_AND_ASSIGN(ImageDecodeTaskImpl);
184 }; 184 };
185 185
186 // Task which creates an image from decoded data. Typically this involves 186 // Task which creates an image from decoded data. Typically this involves
187 // uploading data to the GPU, which requires this task be run on the non- 187 // uploading data to the GPU, which requires this task be run on the non-
188 // concurrent thread. 188 // concurrent thread.
189 class ImageUploadTaskImpl : public TileTask { 189 class ImageUploadTaskImpl : public TileTask {
190 public: 190 public:
191 ImageUploadTaskImpl(GpuImageDecodeController* controller, 191 ImageUploadTaskImpl(GpuImageDecodeCache* cache,
192 const DrawImage& draw_image, 192 const DrawImage& draw_image,
193 scoped_refptr<TileTask> decode_dependency, 193 scoped_refptr<TileTask> decode_dependency,
194 const ImageDecodeController::TracingInfo& tracing_info) 194 const ImageDecodeCache::TracingInfo& tracing_info)
195 : TileTask(false), 195 : TileTask(false),
196 controller_(controller), 196 cache_(cache),
197 image_(draw_image), 197 image_(draw_image),
198 tracing_info_(tracing_info) { 198 tracing_info_(tracing_info) {
199 DCHECK(!SkipImage(draw_image)); 199 DCHECK(!SkipImage(draw_image));
200 // If an image is already decoded and locked, we will not generate a 200 // If an image is already decoded and locked, we will not generate a
201 // decode task. 201 // decode task.
202 if (decode_dependency) 202 if (decode_dependency)
203 dependencies_.push_back(std::move(decode_dependency)); 203 dependencies_.push_back(std::move(decode_dependency));
204 } 204 }
205 205
206 // Override from Task: 206 // Override from Task:
207 void RunOnWorkerThread() override { 207 void RunOnWorkerThread() override {
208 TRACE_EVENT2("cc", "ImageUploadTaskImpl::RunOnWorkerThread", "mode", "gpu", 208 TRACE_EVENT2("cc", "ImageUploadTaskImpl::RunOnWorkerThread", "mode", "gpu",
209 "source_prepare_tiles_id", tracing_info_.prepare_tiles_id); 209 "source_prepare_tiles_id", tracing_info_.prepare_tiles_id);
210 controller_->UploadImage(image_); 210 cache_->UploadImage(image_);
211 } 211 }
212 212
213 // Overridden from TileTask: 213 // Overridden from TileTask:
214 void OnTaskCompleted() override { 214 void OnTaskCompleted() override {
215 controller_->OnImageUploadTaskCompleted(image_); 215 cache_->OnImageUploadTaskCompleted(image_);
216 } 216 }
217 217
218 protected: 218 protected:
219 ~ImageUploadTaskImpl() override {} 219 ~ImageUploadTaskImpl() override {}
220 220
221 private: 221 private:
222 GpuImageDecodeController* controller_; 222 GpuImageDecodeCache* cache_;
223 DrawImage image_; 223 DrawImage image_;
224 const ImageDecodeController::TracingInfo tracing_info_; 224 const ImageDecodeCache::TracingInfo tracing_info_;
225 225
226 DISALLOW_COPY_AND_ASSIGN(ImageUploadTaskImpl); 226 DISALLOW_COPY_AND_ASSIGN(ImageUploadTaskImpl);
227 }; 227 };
228 228
229 GpuImageDecodeController::DecodedImageData::DecodedImageData() = default; 229 GpuImageDecodeCache::DecodedImageData::DecodedImageData() = default;
230 GpuImageDecodeController::DecodedImageData::~DecodedImageData() { 230 GpuImageDecodeCache::DecodedImageData::~DecodedImageData() {
231 ResetData(); 231 ResetData();
232 } 232 }
233 233
234 bool GpuImageDecodeController::DecodedImageData::Lock() { 234 bool GpuImageDecodeCache::DecodedImageData::Lock() {
235 DCHECK(!is_locked_); 235 DCHECK(!is_locked_);
236 is_locked_ = data_->Lock(); 236 is_locked_ = data_->Lock();
237 if (is_locked_) 237 if (is_locked_)
238 ++usage_stats_.lock_count; 238 ++usage_stats_.lock_count;
239 return is_locked_; 239 return is_locked_;
240 } 240 }
241 241
242 void GpuImageDecodeController::DecodedImageData::Unlock() { 242 void GpuImageDecodeCache::DecodedImageData::Unlock() {
243 DCHECK(is_locked_); 243 DCHECK(is_locked_);
244 data_->Unlock(); 244 data_->Unlock();
245 if (usage_stats_.lock_count == 1) 245 if (usage_stats_.lock_count == 1)
246 usage_stats_.first_lock_wasted = !usage_stats_.used; 246 usage_stats_.first_lock_wasted = !usage_stats_.used;
247 is_locked_ = false; 247 is_locked_ = false;
248 } 248 }
249 249
250 void GpuImageDecodeController::DecodedImageData::SetLockedData( 250 void GpuImageDecodeCache::DecodedImageData::SetLockedData(
251 std::unique_ptr<base::DiscardableMemory> data) { 251 std::unique_ptr<base::DiscardableMemory> data) {
252 DCHECK(!is_locked_); 252 DCHECK(!is_locked_);
253 DCHECK(data); 253 DCHECK(data);
254 DCHECK(!data_); 254 DCHECK(!data_);
255 data_ = std::move(data); 255 data_ = std::move(data);
256 is_locked_ = true; 256 is_locked_ = true;
257 } 257 }
258 258
259 void GpuImageDecodeController::DecodedImageData::ResetData() { 259 void GpuImageDecodeCache::DecodedImageData::ResetData() {
260 DCHECK(!is_locked_); 260 DCHECK(!is_locked_);
261 if (data_) 261 if (data_)
262 ReportUsageStats(); 262 ReportUsageStats();
263 data_ = nullptr; 263 data_ = nullptr;
264 usage_stats_ = UsageStats(); 264 usage_stats_ = UsageStats();
265 } 265 }
266 266
267 void GpuImageDecodeController::DecodedImageData::ReportUsageStats() const { 267 void GpuImageDecodeCache::DecodedImageData::ReportUsageStats() const {
268 // lock_count │ used │ result state 268 // lock_count │ used │ result state
269 // ═══════════╪═══════╪══════════════════ 269 // ═══════════╪═══════╪══════════════════
270 // 1 │ false │ WASTED_ONCE 270 // 1 │ false │ WASTED_ONCE
271 // 1 │ true │ USED_ONCE 271 // 1 │ true │ USED_ONCE
272 // >1 │ false │ WASTED_RELOCKED 272 // >1 │ false │ WASTED_RELOCKED
273 // >1 │ true │ USED_RELOCKED 273 // >1 │ true │ USED_RELOCKED
274 // Note that it's important not to reorder the following enums, since the 274 // Note that it's important not to reorder the following enums, since the
275 // numerical values are used in the histogram code. 275 // numerical values are used in the histogram code.
276 enum State : int { 276 enum State : int {
277 DECODED_IMAGE_STATE_WASTED_ONCE, 277 DECODED_IMAGE_STATE_WASTED_ONCE,
(...skipping 14 matching lines...) Expand all
292 else 292 else
293 state = DECODED_IMAGE_STATE_WASTED_RELOCKED; 293 state = DECODED_IMAGE_STATE_WASTED_RELOCKED;
294 } 294 }
295 295
296 UMA_HISTOGRAM_ENUMERATION("Renderer4.GpuImageDecodeState", state, 296 UMA_HISTOGRAM_ENUMERATION("Renderer4.GpuImageDecodeState", state,
297 DECODED_IMAGE_STATE_COUNT); 297 DECODED_IMAGE_STATE_COUNT);
298 UMA_HISTOGRAM_BOOLEAN("Renderer4.GpuImageDecodeState.FirstLockWasted", 298 UMA_HISTOGRAM_BOOLEAN("Renderer4.GpuImageDecodeState.FirstLockWasted",
299 usage_stats_.first_lock_wasted); 299 usage_stats_.first_lock_wasted);
300 } 300 }
301 301
302 GpuImageDecodeController::UploadedImageData::UploadedImageData() = default; 302 GpuImageDecodeCache::UploadedImageData::UploadedImageData() = default;
303 GpuImageDecodeController::UploadedImageData::~UploadedImageData() { 303 GpuImageDecodeCache::UploadedImageData::~UploadedImageData() {
304 SetImage(nullptr); 304 SetImage(nullptr);
305 } 305 }
306 306
307 void GpuImageDecodeController::UploadedImageData::SetImage( 307 void GpuImageDecodeCache::UploadedImageData::SetImage(sk_sp<SkImage> image) {
308 sk_sp<SkImage> image) {
309 DCHECK(!image_ || !image); 308 DCHECK(!image_ || !image);
310 if (image_) { 309 if (image_) {
311 ReportUsageStats(); 310 ReportUsageStats();
312 usage_stats_ = UsageStats(); 311 usage_stats_ = UsageStats();
313 } 312 }
314 image_ = std::move(image); 313 image_ = std::move(image);
315 } 314 }
316 315
317 void GpuImageDecodeController::UploadedImageData::ReportUsageStats() const { 316 void GpuImageDecodeCache::UploadedImageData::ReportUsageStats() const {
318 UMA_HISTOGRAM_BOOLEAN("Renderer4.GpuImageUploadState.Used", 317 UMA_HISTOGRAM_BOOLEAN("Renderer4.GpuImageUploadState.Used",
319 usage_stats_.used); 318 usage_stats_.used);
320 UMA_HISTOGRAM_BOOLEAN("Renderer4.GpuImageUploadState.FirstRefWasted", 319 UMA_HISTOGRAM_BOOLEAN("Renderer4.GpuImageUploadState.FirstRefWasted",
321 usage_stats_.first_ref_wasted); 320 usage_stats_.first_ref_wasted);
322 } 321 }
323 322
324 GpuImageDecodeController::ImageData::ImageData( 323 GpuImageDecodeCache::ImageData::ImageData(
325 DecodedDataMode mode, 324 DecodedDataMode mode,
326 size_t size, 325 size_t size,
327 const SkImage::DeferredTextureImageUsageParams& upload_params) 326 const SkImage::DeferredTextureImageUsageParams& upload_params)
328 : mode(mode), size(size), upload_params(upload_params) {} 327 : mode(mode), size(size), upload_params(upload_params) {}
329 328
330 GpuImageDecodeController::ImageData::~ImageData() { 329 GpuImageDecodeCache::ImageData::~ImageData() {
331 // We should never delete ImageData while it is in use or before it has been 330 // We should never delete ImageData while it is in use or before it has been
332 // cleaned up. 331 // cleaned up.
333 DCHECK_EQ(0u, upload.ref_count); 332 DCHECK_EQ(0u, upload.ref_count);
334 DCHECK_EQ(0u, decode.ref_count); 333 DCHECK_EQ(0u, decode.ref_count);
335 DCHECK_EQ(false, decode.is_locked()); 334 DCHECK_EQ(false, decode.is_locked());
336 // This should always be cleaned up before deleting the image, as it needs to 335 // This should always be cleaned up before deleting the image, as it needs to
337 // be freed with the GL context lock held. 336 // be freed with the GL context lock held.
338 DCHECK(!upload.image()); 337 DCHECK(!upload.image());
339 } 338 }
340 339
341 GpuImageDecodeController::GpuImageDecodeController(ContextProvider* context, 340 GpuImageDecodeCache::GpuImageDecodeCache(ContextProvider* context,
342 ResourceFormat decode_format, 341 ResourceFormat decode_format,
343 size_t max_gpu_image_bytes) 342 size_t max_gpu_image_bytes)
344 : format_(decode_format), 343 : format_(decode_format),
345 context_(context), 344 context_(context),
346 persistent_cache_(PersistentCache::NO_AUTO_EVICT), 345 persistent_cache_(PersistentCache::NO_AUTO_EVICT),
347 normal_max_gpu_image_bytes_(max_gpu_image_bytes) { 346 normal_max_gpu_image_bytes_(max_gpu_image_bytes) {
348 // Acquire the context_lock so that we can safely retrieve the 347 // Acquire the context_lock so that we can safely retrieve the
349 // GrContextThreadSafeProxy. This proxy can then be used with no lock held. 348 // GrContextThreadSafeProxy. This proxy can then be used with no lock held.
350 { 349 {
351 ContextProvider::ScopedContextLock context_lock(context_); 350 ContextProvider::ScopedContextLock context_lock(context_);
352 context_threadsafe_proxy_ = sk_sp<GrContextThreadSafeProxy>( 351 context_threadsafe_proxy_ = sk_sp<GrContextThreadSafeProxy>(
353 context->GrContext()->threadSafeProxy()); 352 context->GrContext()->threadSafeProxy());
354 } 353 }
355 354
356 // In certain cases, ThreadTaskRunnerHandle isn't set (Android Webview). 355 // In certain cases, ThreadTaskRunnerHandle isn't set (Android Webview).
357 // Don't register a dump provider in these cases. 356 // Don't register a dump provider in these cases.
358 if (base::ThreadTaskRunnerHandle::IsSet()) { 357 if (base::ThreadTaskRunnerHandle::IsSet()) {
359 base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider( 358 base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
360 this, "cc::GpuImageDecodeController", 359 this, "cc::GpuImageDecodeCache", base::ThreadTaskRunnerHandle::Get());
361 base::ThreadTaskRunnerHandle::Get());
362 } 360 }
363 // Register this component with base::MemoryCoordinatorClientRegistry. 361 // Register this component with base::MemoryCoordinatorClientRegistry.
364 base::MemoryCoordinatorClientRegistry::GetInstance()->Register(this); 362 base::MemoryCoordinatorClientRegistry::GetInstance()->Register(this);
365 } 363 }
366 364
367 GpuImageDecodeController::~GpuImageDecodeController() { 365 GpuImageDecodeCache::~GpuImageDecodeCache() {
368 // SetShouldAggressivelyFreeResources will zero our limits and free all 366 // SetShouldAggressivelyFreeResources will zero our limits and free all
369 // outstanding image memory. 367 // outstanding image memory.
370 SetShouldAggressivelyFreeResources(true); 368 SetShouldAggressivelyFreeResources(true);
371 369
372 // It is safe to unregister, even if we didn't register in the constructor. 370 // It is safe to unregister, even if we didn't register in the constructor.
373 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider( 371 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
374 this); 372 this);
375 // Unregister this component with memory_coordinator::ClientRegistry. 373 // Unregister this component with memory_coordinator::ClientRegistry.
376 base::MemoryCoordinatorClientRegistry::GetInstance()->Unregister(this); 374 base::MemoryCoordinatorClientRegistry::GetInstance()->Unregister(this);
377 } 375 }
378 376
379 bool GpuImageDecodeController::GetTaskForImageAndRef( 377 bool GpuImageDecodeCache::GetTaskForImageAndRef(const DrawImage& draw_image,
380 const DrawImage& draw_image, 378 const TracingInfo& tracing_info,
381 const TracingInfo& tracing_info, 379 scoped_refptr<TileTask>* task) {
382 scoped_refptr<TileTask>* task) {
383 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"), 380 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
384 "GpuImageDecodeController::GetTaskForImageAndRef"); 381 "GpuImageDecodeCache::GetTaskForImageAndRef");
385 if (SkipImage(draw_image)) { 382 if (SkipImage(draw_image)) {
386 *task = nullptr; 383 *task = nullptr;
387 return false; 384 return false;
388 } 385 }
389 386
390 base::AutoLock lock(lock_); 387 base::AutoLock lock(lock_);
391 const auto image_id = draw_image.image()->uniqueID(); 388 const auto image_id = draw_image.image()->uniqueID();
392 ImageData* image_data = GetImageDataForDrawImage(draw_image); 389 ImageData* image_data = GetImageDataForDrawImage(draw_image);
393 scoped_refptr<ImageData> new_data; 390 scoped_refptr<ImageData> new_data;
394 if (!image_data) { 391 if (!image_data) {
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
434 this, draw_image, GetImageDecodeTaskAndRef(draw_image, tracing_info), 431 this, draw_image, GetImageDecodeTaskAndRef(draw_image, tracing_info),
435 tracing_info)); 432 tracing_info));
436 image_data->upload.task = *task; 433 image_data->upload.task = *task;
437 434
438 // Ref the image again - this ref is owned by the caller, and it is their 435 // Ref the image again - this ref is owned by the caller, and it is their
439 // responsibility to release it by calling UnrefImage. 436 // responsibility to release it by calling UnrefImage.
440 RefImage(draw_image); 437 RefImage(draw_image);
441 return true; 438 return true;
442 } 439 }
443 440
444 void GpuImageDecodeController::UnrefImage(const DrawImage& draw_image) { 441 void GpuImageDecodeCache::UnrefImage(const DrawImage& draw_image) {
445 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"), 442 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
446 "GpuImageDecodeController::UnrefImage"); 443 "GpuImageDecodeCache::UnrefImage");
447 base::AutoLock lock(lock_); 444 base::AutoLock lock(lock_);
448 UnrefImageInternal(draw_image); 445 UnrefImageInternal(draw_image);
449 } 446 }
450 447
451 DecodedDrawImage GpuImageDecodeController::GetDecodedImageForDraw( 448 DecodedDrawImage GpuImageDecodeCache::GetDecodedImageForDraw(
452 const DrawImage& draw_image) { 449 const DrawImage& draw_image) {
453 TRACE_EVENT0("cc", "GpuImageDecodeController::GetDecodedImageForDraw"); 450 TRACE_EVENT0("cc", "GpuImageDecodeCache::GetDecodedImageForDraw");
454 451
455 // We are being called during raster. The context lock must already be 452 // We are being called during raster. The context lock must already be
456 // acquired by the caller. 453 // acquired by the caller.
457 context_->GetLock()->AssertAcquired(); 454 context_->GetLock()->AssertAcquired();
458 455
459 if (SkipImage(draw_image)) 456 if (SkipImage(draw_image))
460 return DecodedDrawImage(nullptr, draw_image.filter_quality()); 457 return DecodedDrawImage(nullptr, draw_image.filter_quality());
461 458
462 base::AutoLock lock(lock_); 459 base::AutoLock lock(lock_);
463 ImageData* image_data = GetImageDataForDrawImage(draw_image); 460 ImageData* image_data = GetImageDataForDrawImage(draw_image);
(...skipping 27 matching lines...) Expand all
491 DCHECK(image || image_data->decode.decode_failure); 488 DCHECK(image || image_data->decode.decode_failure);
492 489
493 SkSize scale_factor = CalculateScaleFactorForMipLevel( 490 SkSize scale_factor = CalculateScaleFactorForMipLevel(
494 draw_image, image_data->upload_params.fPreScaleMipLevel); 491 draw_image, image_data->upload_params.fPreScaleMipLevel);
495 DecodedDrawImage decoded_draw_image(std::move(image), SkSize(), scale_factor, 492 DecodedDrawImage decoded_draw_image(std::move(image), SkSize(), scale_factor,
496 draw_image.filter_quality()); 493 draw_image.filter_quality());
497 decoded_draw_image.set_at_raster_decode(image_data->is_at_raster); 494 decoded_draw_image.set_at_raster_decode(image_data->is_at_raster);
498 return decoded_draw_image; 495 return decoded_draw_image;
499 } 496 }
500 497
501 void GpuImageDecodeController::DrawWithImageFinished( 498 void GpuImageDecodeCache::DrawWithImageFinished(
502 const DrawImage& draw_image, 499 const DrawImage& draw_image,
503 const DecodedDrawImage& decoded_draw_image) { 500 const DecodedDrawImage& decoded_draw_image) {
504 TRACE_EVENT0("cc", "GpuImageDecodeController::DrawWithImageFinished"); 501 TRACE_EVENT0("cc", "GpuImageDecodeCache::DrawWithImageFinished");
505 502
506 // We are being called during raster. The context lock must already be 503 // We are being called during raster. The context lock must already be
507 // acquired by the caller. 504 // acquired by the caller.
508 context_->GetLock()->AssertAcquired(); 505 context_->GetLock()->AssertAcquired();
509 506
510 if (SkipImage(draw_image)) 507 if (SkipImage(draw_image))
511 return; 508 return;
512 509
513 base::AutoLock lock(lock_); 510 base::AutoLock lock(lock_);
514 UnrefImageInternal(draw_image); 511 UnrefImageInternal(draw_image);
515 512
516 // We are mid-draw and holding the context lock, ensure we clean up any 513 // We are mid-draw and holding the context lock, ensure we clean up any
517 // textures (especially at-raster), which may have just been marked for 514 // textures (especially at-raster), which may have just been marked for
518 // deletion by UnrefImage. 515 // deletion by UnrefImage.
519 DeletePendingImages(); 516 DeletePendingImages();
520 } 517 }
521 518
522 void GpuImageDecodeController::ReduceCacheUsage() { 519 void GpuImageDecodeCache::ReduceCacheUsage() {
523 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"), 520 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
524 "GpuImageDecodeController::ReduceCacheUsage"); 521 "GpuImageDecodeCache::ReduceCacheUsage");
525 base::AutoLock lock(lock_); 522 base::AutoLock lock(lock_);
526 EnsureCapacity(0); 523 EnsureCapacity(0);
527 } 524 }
528 525
529 void GpuImageDecodeController::SetShouldAggressivelyFreeResources( 526 void GpuImageDecodeCache::SetShouldAggressivelyFreeResources(
530 bool aggressively_free_resources) { 527 bool aggressively_free_resources) {
531 TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("cc.debug"), 528 TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
532 "GpuImageDecodeController::SetShouldAggressivelyFreeResources", 529 "GpuImageDecodeCache::SetShouldAggressivelyFreeResources",
533 "agressive_free_resources", aggressively_free_resources); 530 "agressive_free_resources", aggressively_free_resources);
534 if (aggressively_free_resources) { 531 if (aggressively_free_resources) {
535 ContextProvider::ScopedContextLock context_lock(context_); 532 ContextProvider::ScopedContextLock context_lock(context_);
536 base::AutoLock lock(lock_); 533 base::AutoLock lock(lock_);
537 // We want to keep as little in our cache as possible. Set our memory limit 534 // We want to keep as little in our cache as possible. Set our memory limit
538 // to zero and EnsureCapacity to clean up memory. 535 // to zero and EnsureCapacity to clean up memory.
539 cached_bytes_limit_ = kSuspendedOrInvisibleMaxGpuImageBytes; 536 cached_bytes_limit_ = kSuspendedOrInvisibleMaxGpuImageBytes;
540 EnsureCapacity(0); 537 EnsureCapacity(0);
541 538
542 // We are holding the context lock, so finish cleaning up deleted images 539 // We are holding the context lock, so finish cleaning up deleted images
543 // now. 540 // now.
544 DeletePendingImages(); 541 DeletePendingImages();
545 } else { 542 } else {
546 base::AutoLock lock(lock_); 543 base::AutoLock lock(lock_);
547 cached_bytes_limit_ = normal_max_gpu_image_bytes_; 544 cached_bytes_limit_ = normal_max_gpu_image_bytes_;
548 } 545 }
549 } 546 }
550 547
551 bool GpuImageDecodeController::OnMemoryDump( 548 bool GpuImageDecodeCache::OnMemoryDump(
552 const base::trace_event::MemoryDumpArgs& args, 549 const base::trace_event::MemoryDumpArgs& args,
553 base::trace_event::ProcessMemoryDump* pmd) { 550 base::trace_event::ProcessMemoryDump* pmd) {
554 using base::trace_event::MemoryAllocatorDump; 551 using base::trace_event::MemoryAllocatorDump;
555 using base::trace_event::MemoryAllocatorDumpGuid; 552 using base::trace_event::MemoryAllocatorDumpGuid;
556 using base::trace_event::MemoryDumpLevelOfDetail; 553 using base::trace_event::MemoryDumpLevelOfDetail;
557 554
558 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"), 555 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
559 "GpuImageDecodeController::OnMemoryDump"); 556 "GpuImageDecodeCache::OnMemoryDump");
560 557
561 if (args.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND) { 558 if (args.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND) {
562 std::string dump_name = 559 std::string dump_name = base::StringPrintf(
563 base::StringPrintf("cc/image_memory/controller_0x%" PRIXPTR, 560 "cc/image_memory/cache_0x%" PRIXPTR, reinterpret_cast<uintptr_t>(this));
564 reinterpret_cast<uintptr_t>(this));
565 MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(dump_name); 561 MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(dump_name);
566 dump->AddScalar(MemoryAllocatorDump::kNameSize, 562 dump->AddScalar(MemoryAllocatorDump::kNameSize,
567 MemoryAllocatorDump::kUnitsBytes, bytes_used_); 563 MemoryAllocatorDump::kUnitsBytes, bytes_used_);
568 564
569 // Early out, no need for more detail in a BACKGROUND dump. 565 // Early out, no need for more detail in a BACKGROUND dump.
570 return true; 566 return true;
571 } 567 }
572 568
573 for (const auto& image_pair : persistent_cache_) { 569 for (const auto& image_pair : persistent_cache_) {
574 const ImageData* image_data = image_pair.second.get(); 570 const ImageData* image_data = image_pair.second.get();
575 const uint32_t image_id = image_pair.first; 571 const uint32_t image_id = image_pair.first;
576 572
577 // If we have discardable decoded data, dump this here. 573 // If we have discardable decoded data, dump this here.
578 if (image_data->decode.data()) { 574 if (image_data->decode.data()) {
579 std::string discardable_dump_name = base::StringPrintf( 575 std::string discardable_dump_name = base::StringPrintf(
580 "cc/image_memory/controller_0x%" PRIXPTR "/discardable/image_%d", 576 "cc/image_memory/cache_0x%" PRIXPTR "/discardable/image_%d",
581 reinterpret_cast<uintptr_t>(this), image_id); 577 reinterpret_cast<uintptr_t>(this), image_id);
582 MemoryAllocatorDump* dump = 578 MemoryAllocatorDump* dump =
583 image_data->decode.data()->CreateMemoryAllocatorDump( 579 image_data->decode.data()->CreateMemoryAllocatorDump(
584 discardable_dump_name.c_str(), pmd); 580 discardable_dump_name.c_str(), pmd);
585 // If our image is locked, dump the "locked_size" as an additional 581 // If our image is locked, dump the "locked_size" as an additional
586 // column. 582 // column.
587 // This lets us see the amount of discardable which is contributing to 583 // This lets us see the amount of discardable which is contributing to
588 // memory pressure. 584 // memory pressure.
589 if (image_data->decode.is_locked()) { 585 if (image_data->decode.is_locked()) {
590 dump->AddScalar("locked_size", MemoryAllocatorDump::kUnitsBytes, 586 dump->AddScalar("locked_size", MemoryAllocatorDump::kUnitsBytes,
591 image_data->size); 587 image_data->size);
592 } 588 }
593 } 589 }
594 590
595 // If we have an uploaded image (that is actually on the GPU, not just a 591 // If we have an uploaded image (that is actually on the GPU, not just a
596 // CPU 592 // CPU
597 // wrapper), upload it here. 593 // wrapper), upload it here.
598 if (image_data->upload.image() && 594 if (image_data->upload.image() &&
599 image_data->mode == DecodedDataMode::GPU) { 595 image_data->mode == DecodedDataMode::GPU) {
600 std::string gpu_dump_name = base::StringPrintf( 596 std::string gpu_dump_name = base::StringPrintf(
601 "cc/image_memory/controller_0x%" PRIXPTR "/gpu/image_%d", 597 "cc/image_memory/cache_0x%" PRIXPTR "/gpu/image_%d",
602 reinterpret_cast<uintptr_t>(this), image_id); 598 reinterpret_cast<uintptr_t>(this), image_id);
603 MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(gpu_dump_name); 599 MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(gpu_dump_name);
604 dump->AddScalar(MemoryAllocatorDump::kNameSize, 600 dump->AddScalar(MemoryAllocatorDump::kNameSize,
605 MemoryAllocatorDump::kUnitsBytes, image_data->size); 601 MemoryAllocatorDump::kUnitsBytes, image_data->size);
606 602
607 // Create a global shred GUID to associate this data with its GPU 603 // Create a global shred GUID to associate this data with its GPU
608 // process 604 // process
609 // counterpart. 605 // counterpart.
610 GLuint gl_id = skia::GrBackendObjectToGrGLTextureInfo( 606 GLuint gl_id = skia::GrBackendObjectToGrGLTextureInfo(
611 image_data->upload.image()->getTextureHandle( 607 image_data->upload.image()->getTextureHandle(
612 false /* flushPendingGrContextIO */)) 608 false /* flushPendingGrContextIO */))
613 ->fID; 609 ->fID;
614 MemoryAllocatorDumpGuid guid = gl::GetGLTextureClientGUIDForTracing( 610 MemoryAllocatorDumpGuid guid = gl::GetGLTextureClientGUIDForTracing(
615 context_->ContextSupport()->ShareGroupTracingGUID(), gl_id); 611 context_->ContextSupport()->ShareGroupTracingGUID(), gl_id);
616 612
617 // kImportance is somewhat arbitrary - we chose 3 to be higher than the 613 // kImportance is somewhat arbitrary - we chose 3 to be higher than the
618 // value used in the GPU process (1), and Skia (2), causing us to appear 614 // value used in the GPU process (1), and Skia (2), causing us to appear
619 // as the owner in memory traces. 615 // as the owner in memory traces.
620 const int kImportance = 3; 616 const int kImportance = 3;
621 pmd->CreateSharedGlobalAllocatorDump(guid); 617 pmd->CreateSharedGlobalAllocatorDump(guid);
622 pmd->AddOwnershipEdge(dump->guid(), guid, kImportance); 618 pmd->AddOwnershipEdge(dump->guid(), guid, kImportance);
623 } 619 }
624 } 620 }
625 621
626 return true; 622 return true;
627 } 623 }
628 624
629 void GpuImageDecodeController::DecodeImage(const DrawImage& draw_image) { 625 void GpuImageDecodeCache::DecodeImage(const DrawImage& draw_image) {
630 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"), 626 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
631 "GpuImageDecodeController::DecodeImage"); 627 "GpuImageDecodeCache::DecodeImage");
632 base::AutoLock lock(lock_); 628 base::AutoLock lock(lock_);
633 ImageData* image_data = GetImageDataForDrawImage(draw_image); 629 ImageData* image_data = GetImageDataForDrawImage(draw_image);
634 DCHECK(image_data); 630 DCHECK(image_data);
635 DCHECK(!image_data->is_at_raster); 631 DCHECK(!image_data->is_at_raster);
636 DecodeImageIfNecessary(draw_image, image_data); 632 DecodeImageIfNecessary(draw_image, image_data);
637 } 633 }
638 634
639 void GpuImageDecodeController::UploadImage(const DrawImage& draw_image) { 635 void GpuImageDecodeCache::UploadImage(const DrawImage& draw_image) {
640 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"), 636 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
641 "GpuImageDecodeController::UploadImage"); 637 "GpuImageDecodeCache::UploadImage");
642 ContextProvider::ScopedContextLock context_lock(context_); 638 ContextProvider::ScopedContextLock context_lock(context_);
643 base::AutoLock lock(lock_); 639 base::AutoLock lock(lock_);
644 ImageData* image_data = GetImageDataForDrawImage(draw_image); 640 ImageData* image_data = GetImageDataForDrawImage(draw_image);
645 DCHECK(image_data); 641 DCHECK(image_data);
646 DCHECK(!image_data->is_at_raster); 642 DCHECK(!image_data->is_at_raster);
647 UploadImageIfNecessary(draw_image, image_data); 643 UploadImageIfNecessary(draw_image, image_data);
648 } 644 }
649 645
650 void GpuImageDecodeController::OnImageDecodeTaskCompleted( 646 void GpuImageDecodeCache::OnImageDecodeTaskCompleted(
651 const DrawImage& draw_image) { 647 const DrawImage& draw_image) {
652 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"), 648 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
653 "GpuImageDecodeController::OnImageDecodeTaskCompleted"); 649 "GpuImageDecodeCache::OnImageDecodeTaskCompleted");
654 base::AutoLock lock(lock_); 650 base::AutoLock lock(lock_);
655 // Decode task is complete, remove our reference to it. 651 // Decode task is complete, remove our reference to it.
656 ImageData* image_data = GetImageDataForDrawImage(draw_image); 652 ImageData* image_data = GetImageDataForDrawImage(draw_image);
657 DCHECK(image_data); 653 DCHECK(image_data);
658 DCHECK(image_data->decode.task); 654 DCHECK(image_data->decode.task);
659 image_data->decode.task = nullptr; 655 image_data->decode.task = nullptr;
660 656
661 // While the decode task is active, we keep a ref on the decoded data. 657 // While the decode task is active, we keep a ref on the decoded data.
662 // Release that ref now. 658 // Release that ref now.
663 UnrefImageDecode(draw_image); 659 UnrefImageDecode(draw_image);
664 } 660 }
665 661
666 void GpuImageDecodeController::OnImageUploadTaskCompleted( 662 void GpuImageDecodeCache::OnImageUploadTaskCompleted(
667 const DrawImage& draw_image) { 663 const DrawImage& draw_image) {
668 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"), 664 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
669 "GpuImageDecodeController::OnImageUploadTaskCompleted"); 665 "GpuImageDecodeCache::OnImageUploadTaskCompleted");
670 base::AutoLock lock(lock_); 666 base::AutoLock lock(lock_);
671 // Upload task is complete, remove our reference to it. 667 // Upload task is complete, remove our reference to it.
672 ImageData* image_data = GetImageDataForDrawImage(draw_image); 668 ImageData* image_data = GetImageDataForDrawImage(draw_image);
673 DCHECK(image_data); 669 DCHECK(image_data);
674 DCHECK(image_data->upload.task); 670 DCHECK(image_data->upload.task);
675 image_data->upload.task = nullptr; 671 image_data->upload.task = nullptr;
676 672
677 // While the upload task is active, we keep a ref on both the image it will be 673 // While the upload task is active, we keep a ref on both the image it will be
678 // populating, as well as the decode it needs to populate it. Release these 674 // populating, as well as the decode it needs to populate it. Release these
679 // refs now. 675 // refs now.
680 UnrefImageDecode(draw_image); 676 UnrefImageDecode(draw_image);
681 UnrefImageInternal(draw_image); 677 UnrefImageInternal(draw_image);
682 } 678 }
683 679
684 // Checks if an existing image decode exists. If not, returns a task to produce 680 // Checks if an existing image decode exists. If not, returns a task to produce
685 // the requested decode. 681 // the requested decode.
686 scoped_refptr<TileTask> GpuImageDecodeController::GetImageDecodeTaskAndRef( 682 scoped_refptr<TileTask> GpuImageDecodeCache::GetImageDecodeTaskAndRef(
687 const DrawImage& draw_image, 683 const DrawImage& draw_image,
688 const TracingInfo& tracing_info) { 684 const TracingInfo& tracing_info) {
689 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"), 685 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
690 "GpuImageDecodeController::GetImageDecodeTaskAndRef"); 686 "GpuImageDecodeCache::GetImageDecodeTaskAndRef");
691 lock_.AssertAcquired(); 687 lock_.AssertAcquired();
692 688
693 // This ref is kept alive while an upload task may need this decode. We 689 // This ref is kept alive while an upload task may need this decode. We
694 // release this ref in UploadTaskCompleted. 690 // release this ref in UploadTaskCompleted.
695 RefImageDecode(draw_image); 691 RefImageDecode(draw_image);
696 692
697 ImageData* image_data = GetImageDataForDrawImage(draw_image); 693 ImageData* image_data = GetImageDataForDrawImage(draw_image);
698 DCHECK(image_data); 694 DCHECK(image_data);
699 if (image_data->decode.is_locked()) { 695 if (image_data->decode.is_locked()) {
700 // We should never be creating a decode task for an at raster image. 696 // We should never be creating a decode task for an at raster image.
701 DCHECK(!image_data->is_at_raster); 697 DCHECK(!image_data->is_at_raster);
702 // We should never be creating a decode for an already-uploaded image. 698 // We should never be creating a decode for an already-uploaded image.
703 DCHECK(!image_data->upload.image()); 699 DCHECK(!image_data->upload.image());
704 return nullptr; 700 return nullptr;
705 } 701 }
706 702
707 // We didn't have an existing locked image, create a task to lock or decode. 703 // We didn't have an existing locked image, create a task to lock or decode.
708 scoped_refptr<TileTask>& existing_task = image_data->decode.task; 704 scoped_refptr<TileTask>& existing_task = image_data->decode.task;
709 if (!existing_task) { 705 if (!existing_task) {
710 // Ref image decode and create a decode task. This ref will be released in 706 // Ref image decode and create a decode task. This ref will be released in
711 // DecodeTaskCompleted. 707 // DecodeTaskCompleted.
712 RefImageDecode(draw_image); 708 RefImageDecode(draw_image);
713 existing_task = make_scoped_refptr( 709 existing_task = make_scoped_refptr(
714 new ImageDecodeTaskImpl(this, draw_image, tracing_info)); 710 new ImageDecodeTaskImpl(this, draw_image, tracing_info));
715 } 711 }
716 return existing_task; 712 return existing_task;
717 } 713 }
718 714
719 void GpuImageDecodeController::RefImageDecode(const DrawImage& draw_image) { 715 void GpuImageDecodeCache::RefImageDecode(const DrawImage& draw_image) {
720 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"), 716 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
721 "GpuImageDecodeController::RefImageDecode"); 717 "GpuImageDecodeCache::RefImageDecode");
722 lock_.AssertAcquired(); 718 lock_.AssertAcquired();
723 auto found = in_use_cache_.find(GenerateInUseCacheKey(draw_image)); 719 auto found = in_use_cache_.find(GenerateInUseCacheKey(draw_image));
724 DCHECK(found != in_use_cache_.end()); 720 DCHECK(found != in_use_cache_.end());
725 ++found->second.ref_count; 721 ++found->second.ref_count;
726 ++found->second.image_data->decode.ref_count; 722 ++found->second.image_data->decode.ref_count;
727 OwnershipChanged(draw_image, found->second.image_data.get()); 723 OwnershipChanged(draw_image, found->second.image_data.get());
728 } 724 }
729 725
730 void GpuImageDecodeController::UnrefImageDecode(const DrawImage& draw_image) { 726 void GpuImageDecodeCache::UnrefImageDecode(const DrawImage& draw_image) {
731 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"), 727 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
732 "GpuImageDecodeController::UnrefImageDecode"); 728 "GpuImageDecodeCache::UnrefImageDecode");
733 lock_.AssertAcquired(); 729 lock_.AssertAcquired();
734 auto found = in_use_cache_.find(GenerateInUseCacheKey(draw_image)); 730 auto found = in_use_cache_.find(GenerateInUseCacheKey(draw_image));
735 DCHECK(found != in_use_cache_.end()); 731 DCHECK(found != in_use_cache_.end());
736 DCHECK_GT(found->second.image_data->decode.ref_count, 0u); 732 DCHECK_GT(found->second.image_data->decode.ref_count, 0u);
737 DCHECK_GT(found->second.ref_count, 0u); 733 DCHECK_GT(found->second.ref_count, 0u);
738 --found->second.ref_count; 734 --found->second.ref_count;
739 --found->second.image_data->decode.ref_count; 735 --found->second.image_data->decode.ref_count;
740 OwnershipChanged(draw_image, found->second.image_data.get()); 736 OwnershipChanged(draw_image, found->second.image_data.get());
741 if (found->second.ref_count == 0u) { 737 if (found->second.ref_count == 0u) {
742 in_use_cache_.erase(found); 738 in_use_cache_.erase(found);
743 } 739 }
744 } 740 }
745 741
746 void GpuImageDecodeController::RefImage(const DrawImage& draw_image) { 742 void GpuImageDecodeCache::RefImage(const DrawImage& draw_image) {
747 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"), 743 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
748 "GpuImageDecodeController::RefImage"); 744 "GpuImageDecodeCache::RefImage");
749 lock_.AssertAcquired(); 745 lock_.AssertAcquired();
750 InUseCacheKey key = GenerateInUseCacheKey(draw_image); 746 InUseCacheKey key = GenerateInUseCacheKey(draw_image);
751 auto found = in_use_cache_.find(key); 747 auto found = in_use_cache_.find(key);
752 748
753 // If no secondary cache entry was found for the given |draw_image|, then 749 // If no secondary cache entry was found for the given |draw_image|, then
754 // the draw_image only exists in the |persistent_cache_|. Create an in-use 750 // the draw_image only exists in the |persistent_cache_|. Create an in-use
755 // cache entry now. 751 // cache entry now.
756 if (found == in_use_cache_.end()) { 752 if (found == in_use_cache_.end()) {
757 auto found_image = persistent_cache_.Peek(draw_image.image()->uniqueID()); 753 auto found_image = persistent_cache_.Peek(draw_image.image()->uniqueID());
758 DCHECK(found_image != persistent_cache_.end()); 754 DCHECK(found_image != persistent_cache_.end());
759 DCHECK(found_image->second->upload_params.fPreScaleMipLevel <= 755 DCHECK(found_image->second->upload_params.fPreScaleMipLevel <=
760 CalculateUploadScaleMipLevel(draw_image)); 756 CalculateUploadScaleMipLevel(draw_image));
761 found = in_use_cache_ 757 found = in_use_cache_
762 .insert(InUseCache::value_type( 758 .insert(InUseCache::value_type(
763 key, InUseCacheEntry(found_image->second))) 759 key, InUseCacheEntry(found_image->second)))
764 .first; 760 .first;
765 } 761 }
766 762
767 DCHECK(found != in_use_cache_.end()); 763 DCHECK(found != in_use_cache_.end());
768 ++found->second.ref_count; 764 ++found->second.ref_count;
769 ++found->second.image_data->upload.ref_count; 765 ++found->second.image_data->upload.ref_count;
770 OwnershipChanged(draw_image, found->second.image_data.get()); 766 OwnershipChanged(draw_image, found->second.image_data.get());
771 } 767 }
772 768
773 void GpuImageDecodeController::UnrefImageInternal(const DrawImage& draw_image) { 769 void GpuImageDecodeCache::UnrefImageInternal(const DrawImage& draw_image) {
774 lock_.AssertAcquired(); 770 lock_.AssertAcquired();
775 auto found = in_use_cache_.find(GenerateInUseCacheKey(draw_image)); 771 auto found = in_use_cache_.find(GenerateInUseCacheKey(draw_image));
776 DCHECK(found != in_use_cache_.end()); 772 DCHECK(found != in_use_cache_.end());
777 DCHECK_GT(found->second.image_data->upload.ref_count, 0u); 773 DCHECK_GT(found->second.image_data->upload.ref_count, 0u);
778 DCHECK_GT(found->second.ref_count, 0u); 774 DCHECK_GT(found->second.ref_count, 0u);
779 --found->second.ref_count; 775 --found->second.ref_count;
780 --found->second.image_data->upload.ref_count; 776 --found->second.image_data->upload.ref_count;
781 OwnershipChanged(draw_image, found->second.image_data.get()); 777 OwnershipChanged(draw_image, found->second.image_data.get());
782 if (found->second.ref_count == 0u) { 778 if (found->second.ref_count == 0u) {
783 in_use_cache_.erase(found); 779 in_use_cache_.erase(found);
784 } 780 }
785 } 781 }
786 782
787 // Called any time an image or decode ref count changes. Takes care of any 783 // Called any time an image or decode ref count changes. Takes care of any
788 // necessary memory budget book-keeping and cleanup. 784 // necessary memory budget book-keeping and cleanup.
789 void GpuImageDecodeController::OwnershipChanged(const DrawImage& draw_image, 785 void GpuImageDecodeCache::OwnershipChanged(const DrawImage& draw_image,
790 ImageData* image_data) { 786 ImageData* image_data) {
791 lock_.AssertAcquired(); 787 lock_.AssertAcquired();
792 788
793 bool has_any_refs = 789 bool has_any_refs =
794 image_data->upload.ref_count > 0 || image_data->decode.ref_count > 0; 790 image_data->upload.ref_count > 0 || image_data->decode.ref_count > 0;
795 791
796 // Don't keep around completely empty images. This can happen if an image's 792 // Don't keep around completely empty images. This can happen if an image's
797 // decode/upload tasks were both cancelled before completing. 793 // decode/upload tasks were both cancelled before completing.
798 if (!has_any_refs && !image_data->upload.image() && 794 if (!has_any_refs && !image_data->upload.image() &&
799 !image_data->decode.data()) { 795 !image_data->decode.data()) {
800 auto found_persistent = 796 auto found_persistent =
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
876 DCHECK(image_data->decode.is_locked()); 872 DCHECK(image_data->decode.is_locked());
877 } else { 873 } else {
878 DCHECK(!image_data->upload.budgeted || image_data->upload.ref_count > 0); 874 DCHECK(!image_data->upload.budgeted || image_data->upload.ref_count > 0);
879 } 875 }
880 #endif 876 #endif
881 } 877 }
882 878
883 // Ensures that we can fit a new image of size |required_size| in our cache. In 879 // Ensures that we can fit a new image of size |required_size| in our cache. In
884 // doing so, this function will free unreferenced image data as necessary to 880 // doing so, this function will free unreferenced image data as necessary to
885 // create rooom. 881 // create rooom.
886 bool GpuImageDecodeController::EnsureCapacity(size_t required_size) { 882 bool GpuImageDecodeCache::EnsureCapacity(size_t required_size) {
887 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"), 883 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
888 "GpuImageDecodeController::EnsureCapacity"); 884 "GpuImageDecodeCache::EnsureCapacity");
889 lock_.AssertAcquired(); 885 lock_.AssertAcquired();
890 886
891 if (CanFitSize(required_size) && !ExceedsPreferredCount()) 887 if (CanFitSize(required_size) && !ExceedsPreferredCount())
892 return true; 888 return true;
893 889
894 // While we are over memory or preferred item capacity, we iterate through 890 // While we are over memory or preferred item capacity, we iterate through
895 // our set of cached image data in LRU order. For each image, we can do two 891 // our set of cached image data in LRU order. For each image, we can do two
896 // things: 1) We can free the uploaded image, reducing the memory usage of 892 // things: 1) We can free the uploaded image, reducing the memory usage of
897 // the cache and 2) we can remove the entry entirely, reducing the count of 893 // the cache and 2) we can remove the entry entirely, reducing the count of
898 // elements in the cache. 894 // elements in the cache.
(...skipping 30 matching lines...) Expand all
929 925
930 if (CanFitSize(required_size) && !ExceedsPreferredCount()) 926 if (CanFitSize(required_size) && !ExceedsPreferredCount())
931 return true; 927 return true;
932 } 928 }
933 929
934 // Preferred count is only used as a guideline when triming the cache. Allow 930 // Preferred count is only used as a guideline when triming the cache. Allow
935 // new elements to be added as long as we are below our size limit. 931 // new elements to be added as long as we are below our size limit.
936 return CanFitSize(required_size); 932 return CanFitSize(required_size);
937 } 933 }
938 934
939 bool GpuImageDecodeController::CanFitSize(size_t size) const { 935 bool GpuImageDecodeCache::CanFitSize(size_t size) const {
940 lock_.AssertAcquired(); 936 lock_.AssertAcquired();
941 937
942 size_t bytes_limit; 938 size_t bytes_limit;
943 if (memory_state_ == base::MemoryState::NORMAL) { 939 if (memory_state_ == base::MemoryState::NORMAL) {
944 bytes_limit = cached_bytes_limit_; 940 bytes_limit = cached_bytes_limit_;
945 } else if (memory_state_ == base::MemoryState::THROTTLED) { 941 } else if (memory_state_ == base::MemoryState::THROTTLED) {
946 bytes_limit = cached_bytes_limit_ / kThrottledCacheSizeReductionFactor; 942 bytes_limit = cached_bytes_limit_ / kThrottledCacheSizeReductionFactor;
947 } else { 943 } else {
948 DCHECK_EQ(base::MemoryState::SUSPENDED, memory_state_); 944 DCHECK_EQ(base::MemoryState::SUSPENDED, memory_state_);
949 bytes_limit = kSuspendedOrInvisibleMaxGpuImageBytes; 945 bytes_limit = kSuspendedOrInvisibleMaxGpuImageBytes;
950 } 946 }
951 947
952 base::CheckedNumeric<uint32_t> new_size(bytes_used_); 948 base::CheckedNumeric<uint32_t> new_size(bytes_used_);
953 new_size += size; 949 new_size += size;
954 return new_size.IsValid() && new_size.ValueOrDie() <= bytes_limit; 950 return new_size.IsValid() && new_size.ValueOrDie() <= bytes_limit;
955 } 951 }
956 952
957 bool GpuImageDecodeController::ExceedsPreferredCount() const { 953 bool GpuImageDecodeCache::ExceedsPreferredCount() const {
958 lock_.AssertAcquired(); 954 lock_.AssertAcquired();
959 955
960 size_t items_limit; 956 size_t items_limit;
961 if (memory_state_ == base::MemoryState::NORMAL) { 957 if (memory_state_ == base::MemoryState::NORMAL) {
962 items_limit = kNormalMaxItemsInCache; 958 items_limit = kNormalMaxItemsInCache;
963 } else if (memory_state_ == base::MemoryState::THROTTLED) { 959 } else if (memory_state_ == base::MemoryState::THROTTLED) {
964 items_limit = kThrottledMaxItemsInCache; 960 items_limit = kThrottledMaxItemsInCache;
965 } else { 961 } else {
966 DCHECK_EQ(base::MemoryState::SUSPENDED, memory_state_); 962 DCHECK_EQ(base::MemoryState::SUSPENDED, memory_state_);
967 items_limit = kSuspendedMaxItemsInCache; 963 items_limit = kSuspendedMaxItemsInCache;
968 } 964 }
969 965
970 return persistent_cache_.size() > items_limit; 966 return persistent_cache_.size() > items_limit;
971 } 967 }
972 968
973 void GpuImageDecodeController::DecodeImageIfNecessary( 969 void GpuImageDecodeCache::DecodeImageIfNecessary(const DrawImage& draw_image,
974 const DrawImage& draw_image, 970 ImageData* image_data) {
975 ImageData* image_data) {
976 lock_.AssertAcquired(); 971 lock_.AssertAcquired();
977 972
978 DCHECK_GT(image_data->decode.ref_count, 0u); 973 DCHECK_GT(image_data->decode.ref_count, 0u);
979 974
980 if (image_data->decode.decode_failure) { 975 if (image_data->decode.decode_failure) {
981 // We have already tried and failed to decode this image. Don't try again. 976 // We have already tried and failed to decode this image. Don't try again.
982 return; 977 return;
983 } 978 }
984 979
985 if (image_data->upload.image()) { 980 if (image_data->upload.image()) {
986 // We already have an uploaded image, no reason to decode. 981 // We already have an uploaded image, no reason to decode.
987 return; 982 return;
988 } 983 }
989 984
990 if (image_data->decode.data() && 985 if (image_data->decode.data() &&
991 (image_data->decode.is_locked() || image_data->decode.Lock())) { 986 (image_data->decode.is_locked() || image_data->decode.Lock())) {
992 // We already decoded this, or we just needed to lock, early out. 987 // We already decoded this, or we just needed to lock, early out.
993 return; 988 return;
994 } 989 }
995 990
996 TRACE_EVENT0("cc", "GpuImageDecodeController::DecodeImage"); 991 TRACE_EVENT0("cc", "GpuImageDecodeCache::DecodeImage");
997 992
998 image_data->decode.ResetData(); 993 image_data->decode.ResetData();
999 std::unique_ptr<base::DiscardableMemory> backing_memory; 994 std::unique_ptr<base::DiscardableMemory> backing_memory;
1000 { 995 {
1001 base::AutoUnlock unlock(lock_); 996 base::AutoUnlock unlock(lock_);
1002 997
1003 backing_memory = base::DiscardableMemoryAllocator::GetInstance() 998 backing_memory = base::DiscardableMemoryAllocator::GetInstance()
1004 ->AllocateLockedDiscardableMemory(image_data->size); 999 ->AllocateLockedDiscardableMemory(image_data->size);
1005 1000
1006 switch (image_data->mode) { 1001 switch (image_data->mode) {
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
1045 1040
1046 if (!backing_memory) { 1041 if (!backing_memory) {
1047 // If |backing_memory| was not populated, we had a non-decodable image. 1042 // If |backing_memory| was not populated, we had a non-decodable image.
1048 image_data->decode.decode_failure = true; 1043 image_data->decode.decode_failure = true;
1049 return; 1044 return;
1050 } 1045 }
1051 1046
1052 image_data->decode.SetLockedData(std::move(backing_memory)); 1047 image_data->decode.SetLockedData(std::move(backing_memory));
1053 } 1048 }
1054 1049
1055 void GpuImageDecodeController::UploadImageIfNecessary( 1050 void GpuImageDecodeCache::UploadImageIfNecessary(const DrawImage& draw_image,
1056 const DrawImage& draw_image, 1051 ImageData* image_data) {
1057 ImageData* image_data) {
1058 context_->GetLock()->AssertAcquired(); 1052 context_->GetLock()->AssertAcquired();
1059 lock_.AssertAcquired(); 1053 lock_.AssertAcquired();
1060 1054
1061 if (image_data->decode.decode_failure) { 1055 if (image_data->decode.decode_failure) {
1062 // We were unnable to decode this image. Don't try to upload. 1056 // We were unnable to decode this image. Don't try to upload.
1063 return; 1057 return;
1064 } 1058 }
1065 1059
1066 if (image_data->upload.image()) { 1060 if (image_data->upload.image()) {
1067 // Someone has uploaded this image before us (at raster). 1061 // Someone has uploaded this image before us (at raster).
1068 return; 1062 return;
1069 } 1063 }
1070 1064
1071 TRACE_EVENT0("cc", "GpuImageDecodeController::UploadImage"); 1065 TRACE_EVENT0("cc", "GpuImageDecodeCache::UploadImage");
1072 DCHECK(image_data->decode.is_locked()); 1066 DCHECK(image_data->decode.is_locked());
1073 DCHECK_GT(image_data->decode.ref_count, 0u); 1067 DCHECK_GT(image_data->decode.ref_count, 0u);
1074 DCHECK_GT(image_data->upload.ref_count, 0u); 1068 DCHECK_GT(image_data->upload.ref_count, 0u);
1075 1069
1076 // We are about to upload a new image and are holding the context lock. 1070 // We are about to upload a new image and are holding the context lock.
1077 // Ensure that any images which have been marked for deletion are actually 1071 // Ensure that any images which have been marked for deletion are actually
1078 // cleaned up so we don't exceed our memory limit during this upload. 1072 // cleaned up so we don't exceed our memory limit during this upload.
1079 DeletePendingImages(); 1073 DeletePendingImages();
1080 1074
1081 sk_sp<SkImage> uploaded_image; 1075 sk_sp<SkImage> uploaded_image;
(...skipping 19 matching lines...) Expand all
1101 } 1095 }
1102 image_data->decode.mark_used(); 1096 image_data->decode.mark_used();
1103 DCHECK(uploaded_image); 1097 DCHECK(uploaded_image);
1104 1098
1105 // At-raster may have decoded this while we were unlocked. If so, ignore our 1099 // At-raster may have decoded this while we were unlocked. If so, ignore our
1106 // result. 1100 // result.
1107 if (!image_data->upload.image()) 1101 if (!image_data->upload.image())
1108 image_data->upload.SetImage(std::move(uploaded_image)); 1102 image_data->upload.SetImage(std::move(uploaded_image));
1109 } 1103 }
1110 1104
1111 scoped_refptr<GpuImageDecodeController::ImageData> 1105 scoped_refptr<GpuImageDecodeCache::ImageData>
1112 GpuImageDecodeController::CreateImageData(const DrawImage& draw_image) { 1106 GpuImageDecodeCache::CreateImageData(const DrawImage& draw_image) {
1113 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"), 1107 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
1114 "GpuImageDecodeController::CreateImageData"); 1108 "GpuImageDecodeCache::CreateImageData");
1115 lock_.AssertAcquired(); 1109 lock_.AssertAcquired();
1116 1110
1117 DecodedDataMode mode; 1111 DecodedDataMode mode;
1118 int upload_scale_mip_level = CalculateUploadScaleMipLevel(draw_image); 1112 int upload_scale_mip_level = CalculateUploadScaleMipLevel(draw_image);
1119 auto params = SkImage::DeferredTextureImageUsageParams( 1113 auto params = SkImage::DeferredTextureImageUsageParams(
1120 draw_image.matrix(), CalculateUploadScaleFilterQuality(draw_image), 1114 draw_image.matrix(), CalculateUploadScaleFilterQuality(draw_image),
1121 upload_scale_mip_level); 1115 upload_scale_mip_level);
1122 size_t data_size = draw_image.image()->getDeferredTextureImageData( 1116 size_t data_size = draw_image.image()->getDeferredTextureImageData(
1123 *context_threadsafe_proxy_.get(), &params, 1, nullptr); 1117 *context_threadsafe_proxy_.get(), &params, 1, nullptr);
1124 1118
1125 if (data_size == 0) { 1119 if (data_size == 0) {
1126 // Can't upload image, too large or other failure. Try to use SW fallback. 1120 // Can't upload image, too large or other failure. Try to use SW fallback.
1127 SkImageInfo image_info = 1121 SkImageInfo image_info =
1128 CreateImageInfoForDrawImage(draw_image, upload_scale_mip_level); 1122 CreateImageInfoForDrawImage(draw_image, upload_scale_mip_level);
1129 data_size = image_info.getSafeSize(image_info.minRowBytes()); 1123 data_size = image_info.getSafeSize(image_info.minRowBytes());
1130 mode = DecodedDataMode::CPU; 1124 mode = DecodedDataMode::CPU;
1131 } else { 1125 } else {
1132 mode = DecodedDataMode::GPU; 1126 mode = DecodedDataMode::GPU;
1133 } 1127 }
1134 1128
1135 return make_scoped_refptr(new ImageData(mode, data_size, params)); 1129 return make_scoped_refptr(new ImageData(mode, data_size, params));
1136 } 1130 }
1137 1131
1138 void GpuImageDecodeController::DeletePendingImages() { 1132 void GpuImageDecodeCache::DeletePendingImages() {
1139 context_->GetLock()->AssertAcquired(); 1133 context_->GetLock()->AssertAcquired();
1140 lock_.AssertAcquired(); 1134 lock_.AssertAcquired();
1141 images_pending_deletion_.clear(); 1135 images_pending_deletion_.clear();
1142 } 1136 }
1143 1137
1144 SkImageInfo GpuImageDecodeController::CreateImageInfoForDrawImage( 1138 SkImageInfo GpuImageDecodeCache::CreateImageInfoForDrawImage(
1145 const DrawImage& draw_image, 1139 const DrawImage& draw_image,
1146 int upload_scale_mip_level) const { 1140 int upload_scale_mip_level) const {
1147 gfx::Size mip_size = 1141 gfx::Size mip_size =
1148 CalculateSizeForMipLevel(draw_image, upload_scale_mip_level); 1142 CalculateSizeForMipLevel(draw_image, upload_scale_mip_level);
1149 return SkImageInfo::Make(mip_size.width(), mip_size.height(), 1143 return SkImageInfo::Make(mip_size.width(), mip_size.height(),
1150 ResourceFormatToClosestSkColorType(format_), 1144 ResourceFormatToClosestSkColorType(format_),
1151 kPremul_SkAlphaType); 1145 kPremul_SkAlphaType);
1152 } 1146 }
1153 1147
1154 // Tries to find an ImageData that can be used to draw the provided 1148 // Tries to find an ImageData that can be used to draw the provided
1155 // |draw_image|. First looks for an exact entry in our |in_use_cache_|. If one 1149 // |draw_image|. First looks for an exact entry in our |in_use_cache_|. If one
1156 // cannot be found, it looks for a compatible entry in our |persistent_cache_|. 1150 // cannot be found, it looks for a compatible entry in our |persistent_cache_|.
1157 GpuImageDecodeController::ImageData* 1151 GpuImageDecodeCache::ImageData* GpuImageDecodeCache::GetImageDataForDrawImage(
1158 GpuImageDecodeController::GetImageDataForDrawImage(
1159 const DrawImage& draw_image) { 1152 const DrawImage& draw_image) {
1160 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"), 1153 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
1161 "GpuImageDecodeController::GetImageDataForDrawImage"); 1154 "GpuImageDecodeCache::GetImageDataForDrawImage");
1162 lock_.AssertAcquired(); 1155 lock_.AssertAcquired();
1163 auto found_in_use = in_use_cache_.find(GenerateInUseCacheKey(draw_image)); 1156 auto found_in_use = in_use_cache_.find(GenerateInUseCacheKey(draw_image));
1164 if (found_in_use != in_use_cache_.end()) 1157 if (found_in_use != in_use_cache_.end())
1165 return found_in_use->second.image_data.get(); 1158 return found_in_use->second.image_data.get();
1166 1159
1167 auto found_persistent = persistent_cache_.Get(draw_image.image()->uniqueID()); 1160 auto found_persistent = persistent_cache_.Get(draw_image.image()->uniqueID());
1168 if (found_persistent != persistent_cache_.end()) { 1161 if (found_persistent != persistent_cache_.end()) {
1169 ImageData* image_data = found_persistent->second.get(); 1162 ImageData* image_data = found_persistent->second.get();
1170 if (IsCompatible(image_data, draw_image)) { 1163 if (IsCompatible(image_data, draw_image)) {
1171 return image_data; 1164 return image_data;
1172 } else { 1165 } else {
1173 found_persistent->second->is_orphaned = true; 1166 found_persistent->second->is_orphaned = true;
1174 // Call OwnershipChanged before erasing the orphaned task from the 1167 // Call OwnershipChanged before erasing the orphaned task from the
1175 // persistent cache. This ensures that if the orphaned task has 0 1168 // persistent cache. This ensures that if the orphaned task has 0
1176 // references, it is cleaned up safely before it is deleted. 1169 // references, it is cleaned up safely before it is deleted.
1177 OwnershipChanged(draw_image, image_data); 1170 OwnershipChanged(draw_image, image_data);
1178 persistent_cache_.Erase(found_persistent); 1171 persistent_cache_.Erase(found_persistent);
1179 } 1172 }
1180 } 1173 }
1181 1174
1182 return nullptr; 1175 return nullptr;
1183 } 1176 }
1184 1177
1185 // Determines if we can draw the provided |draw_image| using the provided 1178 // Determines if we can draw the provided |draw_image| using the provided
1186 // |image_data|. This is true if the |image_data| is not scaled, or if it 1179 // |image_data|. This is true if the |image_data| is not scaled, or if it
1187 // is scaled at an equal or larger scale and equal or larger quality to 1180 // is scaled at an equal or larger scale and equal or larger quality to
1188 // the provided |draw_image|. 1181 // the provided |draw_image|.
1189 bool GpuImageDecodeController::IsCompatible(const ImageData* image_data, 1182 bool GpuImageDecodeCache::IsCompatible(const ImageData* image_data,
1190 const DrawImage& draw_image) const { 1183 const DrawImage& draw_image) const {
1191 bool is_scaled = image_data->upload_params.fPreScaleMipLevel != 0; 1184 bool is_scaled = image_data->upload_params.fPreScaleMipLevel != 0;
1192 bool scale_is_compatible = CalculateUploadScaleMipLevel(draw_image) >= 1185 bool scale_is_compatible = CalculateUploadScaleMipLevel(draw_image) >=
1193 image_data->upload_params.fPreScaleMipLevel; 1186 image_data->upload_params.fPreScaleMipLevel;
1194 bool quality_is_compatible = CalculateUploadScaleFilterQuality(draw_image) <= 1187 bool quality_is_compatible = CalculateUploadScaleFilterQuality(draw_image) <=
1195 image_data->upload_params.fQuality; 1188 image_data->upload_params.fQuality;
1196 return !is_scaled || (scale_is_compatible && quality_is_compatible); 1189 return !is_scaled || (scale_is_compatible && quality_is_compatible);
1197 } 1190 }
1198 1191
1199 size_t GpuImageDecodeController::GetDrawImageSizeForTesting( 1192 size_t GpuImageDecodeCache::GetDrawImageSizeForTesting(const DrawImage& image) {
1200 const DrawImage& image) {
1201 base::AutoLock lock(lock_); 1193 base::AutoLock lock(lock_);
1202 scoped_refptr<ImageData> data = CreateImageData(image); 1194 scoped_refptr<ImageData> data = CreateImageData(image);
1203 return data->size; 1195 return data->size;
1204 } 1196 }
1205 1197
1206 void GpuImageDecodeController::SetImageDecodingFailedForTesting( 1198 void GpuImageDecodeCache::SetImageDecodingFailedForTesting(
1207 const DrawImage& image) { 1199 const DrawImage& image) {
1208 base::AutoLock lock(lock_); 1200 base::AutoLock lock(lock_);
1209 auto found = persistent_cache_.Peek(image.image()->uniqueID()); 1201 auto found = persistent_cache_.Peek(image.image()->uniqueID());
1210 DCHECK(found != persistent_cache_.end()); 1202 DCHECK(found != persistent_cache_.end());
1211 ImageData* image_data = found->second.get(); 1203 ImageData* image_data = found->second.get();
1212 image_data->decode.decode_failure = true; 1204 image_data->decode.decode_failure = true;
1213 } 1205 }
1214 1206
1215 bool GpuImageDecodeController::DiscardableIsLockedForTesting( 1207 bool GpuImageDecodeCache::DiscardableIsLockedForTesting(
1216 const DrawImage& image) { 1208 const DrawImage& image) {
1217 base::AutoLock lock(lock_); 1209 base::AutoLock lock(lock_);
1218 auto found = persistent_cache_.Peek(image.image()->uniqueID()); 1210 auto found = persistent_cache_.Peek(image.image()->uniqueID());
1219 DCHECK(found != persistent_cache_.end()); 1211 DCHECK(found != persistent_cache_.end());
1220 ImageData* image_data = found->second.get(); 1212 ImageData* image_data = found->second.get();
1221 return image_data->decode.is_locked(); 1213 return image_data->decode.is_locked();
1222 } 1214 }
1223 1215
1224 void GpuImageDecodeController::OnMemoryStateChange(base::MemoryState state) { 1216 void GpuImageDecodeCache::OnMemoryStateChange(base::MemoryState state) {
1225 switch (state) { 1217 switch (state) {
1226 case base::MemoryState::NORMAL: 1218 case base::MemoryState::NORMAL:
1227 memory_state_ = state; 1219 memory_state_ = state;
1228 break; 1220 break;
1229 case base::MemoryState::THROTTLED: 1221 case base::MemoryState::THROTTLED:
1230 case base::MemoryState::SUSPENDED: { 1222 case base::MemoryState::SUSPENDED: {
1231 memory_state_ = state; 1223 memory_state_ = state;
1232 1224
1233 // We've just changed our memory state to a (potentially) more 1225 // We've just changed our memory state to a (potentially) more
1234 // restrictive one. Re-enforce cache limits. 1226 // restrictive one. Re-enforce cache limits.
1235 base::AutoLock lock(lock_); 1227 base::AutoLock lock(lock_);
1236 EnsureCapacity(0); 1228 EnsureCapacity(0);
1237 break; 1229 break;
1238 } 1230 }
1239 case base::MemoryState::UNKNOWN: 1231 case base::MemoryState::UNKNOWN:
1240 // NOT_REACHED. 1232 // NOT_REACHED.
1241 break; 1233 break;
1242 } 1234 }
1243 } 1235 }
1244 1236
1245 } // namespace cc 1237 } // namespace cc
OLDNEW
« no previous file with comments | « cc/tiles/gpu_image_decode_cache.h ('k') | cc/tiles/gpu_image_decode_cache_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698