Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(5)

Side by Side Diff: cc/tiles/image_decode_controller.cc

Issue 1418573002: cc: Add image decode control in the compositor. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 5 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "cc/tiles/image_decode_controller.h" 5 #include "cc/tiles/image_decode_controller.h"
6 6
7 #include "base/memory/discardable_memory.h"
7 #include "cc/debug/devtools_instrumentation.h" 8 #include "cc/debug/devtools_instrumentation.h"
9 #include "third_party/skia/include/core/SkCanvas.h"
10 #include "third_party/skia/include/core/SkImage.h"
11 #include "ui/gfx/skia_util.h"
8 12
9 namespace cc { 13 namespace cc {
10 namespace { 14 namespace {
11 15
16 // The amount of memory we can lock ahead of time (128MB). This limit is only
17 // used to inform the caller of the amount of space available in the cache. The
18 // caller can still request tasks which can cause this limit to be breached.
19 const size_t kLockedMemoryLimitBytes = 128 * 1024 * 1024;
20
21 // The largest single high quality image to try and process. Images above this
22 // size will drop down to medium quality.
23 const size_t kMaxHighQualityImageSizeBytes = 64 * 1024 * 1024;
24
25 // The number of entries to keep around in the cache. This limit can be breached
26 // if more items are locked. That is, locked items ignore this limit.
27 const size_t kMaxItemsInCache = 100;
28
12 class ImageDecodeTaskImpl : public ImageDecodeTask { 29 class ImageDecodeTaskImpl : public ImageDecodeTask {
13 public: 30 public:
14 ImageDecodeTaskImpl(ImageDecodeController* controller, 31 ImageDecodeTaskImpl(ImageDecodeController* controller,
15 const SkImage* image, 32 const ImageDecodeController::ImageKey& image_key,
16 int layer_id, 33 const DrawImage& image,
17 uint64_t source_prepare_tiles_id) 34 uint64_t source_prepare_tiles_id)
18 : controller_(controller), 35 : controller_(controller),
19 image_(skia::SharePtr(image)), 36 image_key_(image_key),
20 layer_id_(layer_id), 37 image_(image),
38 image_ref_(skia::SharePtr(image.image())),
21 source_prepare_tiles_id_(source_prepare_tiles_id) {} 39 source_prepare_tiles_id_(source_prepare_tiles_id) {}
22 40
23 // Overridden from Task: 41 // Overridden from Task:
24 void RunOnWorkerThread() override { 42 void RunOnWorkerThread() override {
25 TRACE_EVENT1("cc", "ImageDecodeTaskImpl::RunOnWorkerThread", 43 TRACE_EVENT1("cc", "ImageDecodeTaskImpl::RunOnWorkerThread",
26 "source_prepare_tiles_id", source_prepare_tiles_id_); 44 "source_prepare_tiles_id", source_prepare_tiles_id_);
27 devtools_instrumentation::ScopedImageDecodeTask image_decode_task( 45 devtools_instrumentation::ScopedImageDecodeTask image_decode_task(
28 image_.get()); 46 image_ref_.get());
29 controller_->DecodeImage(image_.get()); 47 controller_->DecodeImage(image_key_, image_);
30
31 // Release the reference after decoding image to ensure that it is not kept
32 // alive unless needed.
33 image_.clear();
34 } 48 }
35 49
36 // Overridden from TileTask: 50 // Overridden from TileTask:
37 void ScheduleOnOriginThread(TileTaskClient* client) override {} 51 void ScheduleOnOriginThread(TileTaskClient* client) override {}
38 void CompleteOnOriginThread(TileTaskClient* client) override { 52 void CompleteOnOriginThread(TileTaskClient* client) override {
39 controller_->OnImageDecodeTaskCompleted(layer_id_, image_.get(), 53 controller_->RemovePendingTask(image_key_);
40 !HasFinishedRunning());
41 } 54 }
42 55
43 protected: 56 protected:
44 ~ImageDecodeTaskImpl() override {} 57 ~ImageDecodeTaskImpl() override {}
45 58
46 private: 59 private:
47 ImageDecodeController* controller_; 60 ImageDecodeController* controller_;
48 skia::RefPtr<const SkImage> image_; 61 ImageDecodeController::ImageKey image_key_;
49 int layer_id_; 62 DrawImage image_;
63 skia::RefPtr<const SkImage> image_ref_;
50 uint64_t source_prepare_tiles_id_; 64 uint64_t source_prepare_tiles_id_;
51 65
52 DISALLOW_COPY_AND_ASSIGN(ImageDecodeTaskImpl); 66 DISALLOW_COPY_AND_ASSIGN(ImageDecodeTaskImpl);
53 }; 67 };
54 68
69 template <typename Type>
70 typename std::deque<Type>::iterator FindImage(
71 std::deque<Type>* collection,
72 const ImageDecodeControllerKey& key) {
73 return std::find_if(collection->begin(), collection->end(),
74 [key](const Type& image) { return image.first == key; });
75 }
76
77 SkSize GetScaleAdjustment(const ImageDecodeControllerKey& key,
78 const SkImage* original_image) {
79 float x_scale =
80 key.target_size().width() / static_cast<float>(original_image->width());
81 float y_scale =
82 key.target_size().height() / static_cast<float>(original_image->height());
83 return SkSize::Make(x_scale, y_scale);
84 }
85
55 } // namespace 86 } // namespace
56 87
57 ImageDecodeController::ImageDecodeController() {} 88 ImageDecodeController::ImageDecodeController()
58 89 : is_using_gpu_rasterization_(false),
59 ImageDecodeController::~ImageDecodeController() {} 90 locked_images_budget_(kLockedMemoryLimitBytes) {}
60 91
61 scoped_refptr<ImageDecodeTask> ImageDecodeController::GetTaskForImage( 92 ImageDecodeController::~ImageDecodeController() {
93 DCHECK_EQ(0u, decoded_images_ref_counts_.size());
94 DCHECK_EQ(0u, at_raster_decoded_images_ref_counts_.size());
95 }
96
97 bool ImageDecodeController::GetTaskForImageAndRef(
62 const DrawImage& image, 98 const DrawImage& image,
63 int layer_id, 99 uint64_t prepare_tiles_id,
64 uint64_t prepare_tiles_id) { 100 scoped_refptr<ImageDecodeTask>* task) {
65 uint32_t generation_id = image.image()->uniqueID(); 101 // If the image already exists or if we're going to create a task for it, then
66 scoped_refptr<ImageDecodeTask>& decode_task = 102 // we'll likely need to ref this image (the exception is if we're prerolling
67 image_decode_tasks_[layer_id][generation_id]; 103 // the image only). That means the image is or will be in the cache. When the
68 if (!decode_task) 104 // ref goes to 0, it will be unpinned but will remain in the cache. If the
69 decode_task = CreateTaskForImage(image.image(), layer_id, prepare_tiles_id); 105 // image does not fit into the budget, then we don't ref this image, since it
70 return decode_task; 106 // will be decoded at raster time which is when it will be temporarily put in
71 } 107 // the cache.
72 108 ImageKey key = ImageKey::FromDrawImage(image);
73 scoped_refptr<ImageDecodeTask> ImageDecodeController::CreateTaskForImage( 109 TRACE_EVENT1("cc", "ImageDecodeController::GetTaskForImageAndRef", "key",
74 const SkImage* image, 110 key.ToString());
75 int layer_id, 111 // If we're not going to do a scale, we will just create a task to preroll the
76 uint64_t prepare_tiles_id) { 112 // image the first time we see it. This doesn't need to account for memory.
113 // TODO(vmpstr): We can also lock the original sized image, in which case it
114 // does require memory bookkeeping.
115 if (!ShouldDecodeAndScaleImage(key, image)) {
116 base::AutoLock lock(lock_);
117 if (prerolled_images_.count(key.image_id()) == 0) {
118 scoped_refptr<ImageDecodeTask>& existing_task = pending_image_tasks_[key];
119 if (!existing_task) {
120 existing_task = make_scoped_refptr(
121 new ImageDecodeTaskImpl(this, key, image, prepare_tiles_id));
122 }
123 *task = existing_task;
124 } else {
125 *task = nullptr;
126 }
127 return false;
128 }
129
130 base::AutoLock lock(lock_);
131
132 // If we already have the image in cache, then we can return it.
133 auto decoded_it = FindImage(&decoded_images_, key);
134 bool new_image_fits_in_memory =
135 locked_images_budget_.AvailableMemoryBytes() >= key.target_bytes();
136 if (decoded_it != decoded_images_.end()) {
137 if (decoded_it->second->is_locked() ||
138 (new_image_fits_in_memory && decoded_it->second->Lock())) {
139 RefImage(key);
140 *task = nullptr;
141 SanityCheckState(__LINE__, true);
142 return true;
143 }
144 // If the image fits in memory, then we at least tried to lock it and
145 // failed. This means that it's not valid anymore.
146 if (new_image_fits_in_memory)
147 decoded_images_.erase(decoded_it);
148 }
149
150 // If the task exists, return it.
151 scoped_refptr<ImageDecodeTask>& existing_task = pending_image_tasks_[key];
152 if (existing_task) {
153 RefImage(key);
154 *task = existing_task;
155 SanityCheckState(__LINE__, true);
156 return true;
157 }
158
159 // At this point, we have to create a new image/task, so we need to abort if
160 // it doesn't fit into memory and there are currently no raster tasks that
161 // would have already accounted for memory. The latter part is possible if
162 // there's a running raster task that could not be canceled, and still has a
163 // ref to the image that is now being reffed for the new schedule.
164 if (!new_image_fits_in_memory && (decoded_images_ref_counts_.find(key) ==
165 decoded_images_ref_counts_.end())) {
166 *task = nullptr;
167 SanityCheckState(__LINE__, true);
168 return false;
169 }
170
171 // Actually create the task. RefImage will account for memory on the first
172 // ref.
173 RefImage(key);
174 existing_task = make_scoped_refptr(
175 new ImageDecodeTaskImpl(this, key, image, prepare_tiles_id));
176 *task = existing_task;
177 SanityCheckState(__LINE__, true);
178 return true;
179 }
180
181 void ImageDecodeController::RefImage(const ImageKey& key) {
182 TRACE_EVENT1("cc", "ImageDecodeController::RefImage", "key", key.ToString());
183 lock_.AssertAcquired();
184 int ref = ++decoded_images_ref_counts_[key];
185 if (ref == 1) {
186 DCHECK_GE(locked_images_budget_.AvailableMemoryBytes(), key.target_bytes());
187 locked_images_budget_.AddUsage(key.target_bytes());
188 }
189 }
190
191 void ImageDecodeController::UnrefImage(const DrawImage& image) {
192 // When we unref the image, there are several situations we need to consider:
193 // 1. The ref did not reach 0, which means we have to keep the image locked.
194 // 2. The ref reached 0, we should unlock it.
195 // 2a. The image isn't in the locked cache because we didn't get to decode
196 // it yet.
197 // 2b. Unlock the image but keep it in list.
198 const ImageKey& key = ImageKey::FromDrawImage(image);
199 DCHECK(ShouldDecodeAndScaleImage(key, image));
200 TRACE_EVENT1("cc", "ImageDecodeController::UnrefImage", "key",
201 key.ToString());
202
203 base::AutoLock lock(lock_);
204 auto ref_count_it = decoded_images_ref_counts_.find(key);
205 DCHECK(ref_count_it != decoded_images_ref_counts_.end());
206
207 --ref_count_it->second;
208 if (ref_count_it->second == 0) {
209 decoded_images_ref_counts_.erase(ref_count_it);
210 locked_images_budget_.SubtractUsage(key.target_bytes());
211
212 auto decoded_image_it = FindImage(&decoded_images_, key);
213 // If we've never decoded the image before ref reached 0, then we wouldn't
214 // have it in our cache. This would happen if we canceled tasks.
215 if (decoded_image_it == decoded_images_.end()) {
216 SanityCheckState(__LINE__, true);
217 return;
218 }
219 DCHECK(decoded_image_it->second->is_locked());
220 decoded_image_it->second->Unlock();
221 }
222 SanityCheckState(__LINE__, true);
223 }
224
225 void ImageDecodeController::DecodeImage(const ImageKey& key,
226 const DrawImage& image) {
227 TRACE_EVENT1("cc", "ImageDecodeController::DecodeImage", "key",
228 key.ToString());
229 if (!ShouldDecodeAndScaleImage(key, image)) {
230 image.image()->preroll();
231
232 base::AutoLock lock(lock_);
233 prerolled_images_.insert(key.image_id());
234 // Erase the pending task from the queue, since the task won't be doing
235 // anything useful after this function terminates. Since we don't preroll
236 // images twice, this is actually not necessary but it behaves similar to
237 // the other code path: when this function finishes, the task isn't in the
238 // pending_image_tasks_ list.
239 pending_image_tasks_.erase(key);
240 return;
241 }
242
243 base::AutoLock lock(lock_);
244
245 auto image_it = FindImage(&decoded_images_, key);
246 if (image_it != decoded_images_.end()) {
247 if (image_it->second->is_locked() || image_it->second->Lock()) {
248 pending_image_tasks_.erase(key);
249 return;
250 }
251 decoded_images_.erase(image_it);
252 }
253
254 scoped_refptr<DecodedImage> decoded_image;
255 {
256 base::AutoUnlock unlock(lock_);
257 decoded_image = DecodeImageInternal(key, image.image());
258 }
259
260 // Erase the pending task from the queue, since the task won't be doing
261 // anything useful after this function terminates. That is, if this image
262 // needs to be decoded again, we have to create a new task.
263 pending_image_tasks_.erase(key);
264
265 // We could have finished all of the raster tasks (cancelled) while this image
266 // decode task was running, which means that we now have a locked image but no
267 // ref counts. Unlock it immediately in this case.
268 if (decoded_images_ref_counts_.find(key) ==
269 decoded_images_ref_counts_.end()) {
270 decoded_image->Unlock();
271 }
272
273 // At this point, it could have been the case that this image was decoded in
274 // place by an already running raster task from a previous schedule. If that's
275 // the case, then it would have already been placed into the cache (possibly
276 // locked). Remove it if that was the case.
277 image_it = FindImage(&decoded_images_, key);
278 if (image_it != decoded_images_.end()) {
279 if (image_it->second->is_locked() || image_it->second->Lock()) {
280 pending_image_tasks_.erase(key);
281 return;
282 }
283 decoded_images_.erase(image_it);
284 }
285 decoded_images_.push_back(AnnotatedDecodedImage(key, decoded_image));
286 SanityCheckState(__LINE__, true);
287 }
288
289 scoped_refptr<ImageDecodeController::DecodedImage>
290 ImageDecodeController::DecodeImageInternal(const ImageKey& key,
reed1 2015/12/08 22:01:18 I think this code is doing the following (but I ma
vmpstr 2015/12/08 22:04:49 Yeah you're right that this is what the code is do
reed1 2015/12/09 16:03:04 Got it, thanks.
291 const SkImage* image) {
292 TRACE_EVENT1("cc", "ImageDecodeController::DecodeImageInternal", "key",
293 key.ToString());
294
295 // Get the decoded image first (at the original scale).
296 SkImageInfo decoded_info = SkImageInfo::MakeN32Premul(
297 key.src_rect().width(), key.src_rect().height());
298 scoped_ptr<uint8_t[]> decoded_pixels(
299 new uint8_t[decoded_info.minRowBytes() * decoded_info.height()]);
300 bool result = image->readPixels(
301 decoded_info, decoded_pixels.get(), decoded_info.minRowBytes(),
302 key.src_rect().x(), key.src_rect().y(), SkImage::kAllow_CachingHint);
303 DCHECK(result);
304
305 skia::RefPtr<SkImage> decoded_image = skia::AdoptRef(SkImage::NewFromRaster(
reed1 2015/12/09 16:03:04 Do you need decoded_image? If the only use is so y
vmpstr 2015/12/09 23:53:41 Cool, thanks! Done.
306 decoded_info, decoded_pixels.get(), decoded_info.minRowBytes(),
307 [](const void* pixels, void* context) {}, nullptr));
308
309 // TODO(vmpstr): This scale is used to compute the target size to begin with,
310 // see if transporting it in the key is reasonable.
ericrk 2015/12/08 21:34:26 in what cases is the target_size not equal to the
vmpstr 2015/12/08 22:04:49 The size is different, because target_size refers
ericrk 2015/12/08 22:12:23 yup - seems fine, just potentially less efficient
311 float x_scale =
312 key.target_size().width() / static_cast<float>(image->width());
313 float y_scale =
314 key.target_size().height() / static_cast<float>(image->height());
315
316 int new_target_width = SkScalarRoundToInt(key.src_rect().width() * x_scale);
317 int new_target_height = SkScalarRoundToInt(key.src_rect().height() * y_scale);
318
319 // Now scale the pixels into the destination size.
320 SkImageInfo scaled_info =
321 SkImageInfo::MakeN32Premul(new_target_width, new_target_height);
322 scoped_ptr<base::DiscardableMemory> scaled_pixels =
323 base::DiscardableMemoryAllocator::GetInstance()
324 ->AllocateLockedDiscardableMemory(scaled_info.minRowBytes() *
325 scaled_info.height());
326 SkPixmap scaled_pixmap(scaled_info, scaled_pixels->data(),
327 scaled_info.minRowBytes());
328 // TODO(vmpstr): Start handling more than just high filter quality.
329 DCHECK_EQ(kHigh_SkFilterQuality, key.filter_quality());
330 result = decoded_image->scalePixels(scaled_pixmap, kHigh_SkFilterQuality,
331 SkImage::kDisallow_CachingHint);
332 DCHECK(result);
77 return make_scoped_refptr( 333 return make_scoped_refptr(
78 new ImageDecodeTaskImpl(this, image, layer_id, prepare_tiles_id)); 334 new DecodedImage(scaled_info, std::move(scaled_pixels),
79 } 335 SkSize::Make(-key.src_rect().x(), -key.src_rect().y())));
80 336 }
81 void ImageDecodeController::DecodeImage(const SkImage* image) { 337
82 image->preroll(); 338 DecodedDrawImage ImageDecodeController::GetDecodedImageForDraw(
83 } 339 const DrawImage& draw_image) {
84 340 ImageKey key = ImageKey::FromDrawImage(draw_image);
85 void ImageDecodeController::AddLayerUsedCount(int layer_id) { 341 TRACE_EVENT1("cc", "ImageDecodeController::GetDecodedImageAndRef", "key",
86 ++used_layer_counts_[layer_id]; 342 key.ToString());
87 } 343 if (!ShouldDecodeAndScaleImage(key, draw_image))
88 344 return DecodedDrawImage(draw_image.image(), draw_image.filter_quality());
89 void ImageDecodeController::SubtractLayerUsedCount(int layer_id) { 345
90 if (--used_layer_counts_[layer_id]) 346 base::AutoLock lock(lock_);
347 auto decoded_images_it = FindImage(&decoded_images_, key);
348 // If we found the image and it's locked, then return it. If it's not locked,
349 // erase it from the cache since it might be put into the at-raster cache.
350 scoped_refptr<DecodedImage> decoded_image;
351 if (decoded_images_it != decoded_images_.end()) {
352 decoded_image = decoded_images_it->second;
353 if (decoded_image->is_locked()) {
354 RefImage(key);
355 SanityCheckState(__LINE__, true);
356 return DecodedDrawImage(
357 decoded_image->image(), decoded_image->src_rect_offset(),
358 GetScaleAdjustment(key, draw_image.image()), kLow_SkFilterQuality);
359 } else {
360 decoded_images_.erase(decoded_images_it);
361 }
362 }
363
364 // See if another thread already decoded this image at raster time. If so, we
365 // can just use that result directly.
366 auto at_raster_images_it = FindImage(&at_raster_decoded_images_, key);
367 if (at_raster_images_it != at_raster_decoded_images_.end()) {
368 DCHECK(at_raster_images_it->second->is_locked());
369 RefAtRasterImage(key);
370 SanityCheckState(__LINE__, true);
371 auto decoded_draw_image = DecodedDrawImage(
372 at_raster_images_it->second->image(),
373 at_raster_images_it->second->src_rect_offset(),
374 GetScaleAdjustment(key, draw_image.image()), kLow_SkFilterQuality);
375 decoded_draw_image.set_at_raster_decode(true);
376 return decoded_draw_image;
377 }
378
379 // Now we know that we don't have a locked image, and we seem to be the first
380 // thread encountering this image (that might not be true, since other threads
381 // might be decoding it already). This means that we need to decode the image
382 // assuming we can't lock the one we found in the cache.
383 bool check_at_raster_cache = false;
384 if (!decoded_image || !decoded_image->Lock()) {
385 // Note that we have to release the lock, since this lock is also accessed
386 // on the compositor thread. This means holding on to the lock might stall
387 // the compositor thread for the duration of the decode!
388 base::AutoUnlock unlock(lock_);
389 decoded_image = DecodeImageInternal(key, draw_image.image());
390 check_at_raster_cache = true;
391 }
392
393 // While we unlocked the lock, it could be the case that another thread
394 // already decoded this already and put it in the at-raster cache. Look it up
395 // first.
396 bool need_to_add_image_to_cache = true;
397 if (check_at_raster_cache) {
398 at_raster_images_it = FindImage(&at_raster_decoded_images_, key);
399 if (at_raster_images_it != at_raster_decoded_images_.end()) {
400 // We have to drop our decode, since the one in the cache is being used by
401 // another thread.
402 decoded_image->Unlock();
403 decoded_image = at_raster_images_it->second;
404 need_to_add_image_to_cache = false;
405 }
406 }
407
408 // If we really are the first ones, or if the other thread already unlocked
409 // the image, then put our work into at-raster time cache.
410 if (need_to_add_image_to_cache) {
411 at_raster_decoded_images_.push_back(
412 AnnotatedDecodedImage(key, decoded_image));
413 }
414
415 DCHECK(decoded_image);
416 DCHECK(decoded_image->is_locked());
417 RefAtRasterImage(key);
418 SanityCheckState(__LINE__, true);
419 auto decoded_draw_image = DecodedDrawImage(
420 decoded_image->image(), decoded_image->src_rect_offset(),
421 GetScaleAdjustment(key, draw_image.image()), kLow_SkFilterQuality);
422 decoded_draw_image.set_at_raster_decode(true);
423 return decoded_draw_image;
424 }
425
426 void ImageDecodeController::DrawWithImageFinished(
427 const DrawImage& image,
428 const DecodedDrawImage& decoded_image) {
429 TRACE_EVENT1("cc", "ImageDecodeController::DrawWithImageFinished", "key",
430 ImageKey::FromDrawImage(image).ToString());
431 ImageKey key = ImageKey::FromDrawImage(image);
432 if (!ShouldDecodeAndScaleImage(key, image))
91 return; 433 return;
92 434
93 // Clean up decode tasks once a layer is no longer used. 435 if (decoded_image.is_at_raster_decode())
94 used_layer_counts_.erase(layer_id); 436 UnrefAtRasterImage(key);
95 image_decode_tasks_.erase(layer_id); 437 else
96 } 438 UnrefImage(image);
97 439 SanityCheckState(__LINE__, false);
98 void ImageDecodeController::OnImageDecodeTaskCompleted(int layer_id, 440 }
99 const SkImage* image, 441
100 bool was_canceled) { 442 void ImageDecodeController::RefAtRasterImage(const ImageKey& key) {
101 // If the task has successfully finished, then keep the task until the layer 443 TRACE_EVENT1("cc", "ImageDecodeController::RefAtRasterImage", "key",
102 // is no longer in use. This ensures that we only decode a image once. 444 key.ToString());
103 // TODO(vmpstr): Remove this when decode lifetime is controlled by cc. 445 DCHECK(FindImage(&at_raster_decoded_images_, key) !=
104 if (!was_canceled) 446 at_raster_decoded_images_.end());
447 ++at_raster_decoded_images_ref_counts_[key];
448 }
449
450 void ImageDecodeController::UnrefAtRasterImage(const ImageKey& key) {
451 TRACE_EVENT1("cc", "ImageDecodeController::UnrefAtRasterImage", "key",
452 key.ToString());
453 base::AutoLock lock(lock_);
454
455 auto ref_it = at_raster_decoded_images_ref_counts_.find(key);
456 DCHECK(ref_it != at_raster_decoded_images_ref_counts_.end());
457 --ref_it->second;
458 if (ref_it->second == 0) {
459 at_raster_decoded_images_ref_counts_.erase(ref_it);
460 auto at_raster_image_it = FindImage(&at_raster_decoded_images_, key);
461 DCHECK(at_raster_image_it != at_raster_decoded_images_.end());
462
463 // The ref for our image reached 0 and it's still locked. We need to figure
464 // out what the best thing to do with the image. There are several
465 // situations:
466 // 1. The image is not in the main cache and...
467 // 1a. ... its ref count is 0: unlock our image and put it in the main
468 // cache.
469 // 1b. ... ref count is not 0: keep the image locked and put it in the
470 // main cache.
471 // 2. The image is in the main cache...
472 // 2a. ... and is locked: unlock our image and discard it
473 // 2b. ... and is unlocked and...
474 // 2b1. ... its ref count is 0: unlock our image and replace the
475 // existing one with ours.
476 // 2b2. ... its ref count is not 0: this shouldn't be possible.
477 auto image_it = FindImage(&decoded_images_, key);
478 if (image_it == decoded_images_.end()) {
479 if (decoded_images_ref_counts_.find(key) ==
480 decoded_images_ref_counts_.end()) {
481 at_raster_image_it->second->Unlock();
482 }
483 decoded_images_.push_back(*at_raster_image_it);
484 } else if (image_it->second->is_locked()) {
485 at_raster_image_it->second->Unlock();
486 } else {
487 DCHECK(decoded_images_ref_counts_.find(key) ==
488 decoded_images_ref_counts_.end());
489 at_raster_image_it->second->Unlock();
490 decoded_images_.erase(image_it);
491 decoded_images_.push_back(*at_raster_image_it);
492 }
493 at_raster_decoded_images_.erase(at_raster_image_it);
494 }
495 }
496
497 bool ImageDecodeController::ShouldDecodeAndScaleImage(const ImageKey& key,
498 const DrawImage& image) {
499 // TODO(vmpstr): Handle GPU rasterization.
500 if (is_using_gpu_rasterization_)
501 return false;
502 if (!CanHandleFilterQuality(key.filter_quality()))
503 return false;
504 return true;
505 }
506
507 bool ImageDecodeController::CanHandleFilterQuality(
508 SkFilterQuality filter_quality) {
509 // We don't need to handle low quality filters.
510 if (filter_quality == kLow_SkFilterQuality ||
511 filter_quality == kNone_SkFilterQuality) {
512 return false;
513 }
514
515 // TODO(vmpstr): We need to start caching mipmaps for medium quality and
516 // caching the interpolated values from those. For now, we don't have this.
517 if (filter_quality == kMedium_SkFilterQuality)
518 return false;
519 DCHECK(filter_quality == kHigh_SkFilterQuality);
520 return true;
521 }
522
523 void ImageDecodeController::ReduceCacheUsage() {
524 TRACE_EVENT0("cc", "ImageDecodeController::ReduceCacheUsage");
525 base::AutoLock lock(lock_);
526 size_t num_to_remove = (decoded_images_.size() > kMaxItemsInCache)
527 ? (decoded_images_.size() - kMaxItemsInCache)
528 : 0;
529 for (auto it = decoded_images_.begin();
530 num_to_remove != 0 && it != decoded_images_.end();) {
531 if (it->second->is_locked()) {
532 ++it;
533 continue;
534 }
535
536 it = decoded_images_.erase(it);
537 --num_to_remove;
538 }
539 }
540
541 void ImageDecodeController::RemovePendingTask(const ImageKey& key) {
542 base::AutoLock lock(lock_);
543 pending_image_tasks_.erase(key);
544 }
545
546 void ImageDecodeController::SetIsUsingGpuRasterization(
547 bool is_using_gpu_rasterization) {
548 if (is_using_gpu_rasterization_ == is_using_gpu_rasterization)
105 return; 549 return;
106 550 is_using_gpu_rasterization_ = is_using_gpu_rasterization;
107 // Otherwise, we have to clean up the task so that a new one can be created if 551
108 // we need to decode the image again. 552 base::AutoLock lock(lock_);
109 LayerImageTaskMap::iterator layer_it = image_decode_tasks_.find(layer_id); 553
110 if (layer_it == image_decode_tasks_.end()) 554 DCHECK_EQ(0u, decoded_images_ref_counts_.size());
111 return; 555 DCHECK_EQ(0u, at_raster_decoded_images_ref_counts_.size());
112 556 DCHECK(std::find_if(decoded_images_.begin(), decoded_images_.end(),
113 ImageTaskMap& image_tasks = layer_it->second; 557 [](const AnnotatedDecodedImage& image) {
114 ImageTaskMap::iterator task_it = image_tasks.find(image->uniqueID()); 558 return image.second->is_locked();
115 if (task_it == image_tasks.end()) 559 }) == decoded_images_.end());
116 return; 560 DCHECK(std::find_if(at_raster_decoded_images_.begin(),
117 image_tasks.erase(task_it); 561 at_raster_decoded_images_.end(),
562 [](const AnnotatedDecodedImage& image) {
563 return image.second->is_locked();
564 }) == at_raster_decoded_images_.end());
565 decoded_images_.clear();
566 at_raster_decoded_images_.clear();
567 }
568
569 size_t ImageDecodeController::SanityCheckState(int line, bool lock_acquired) {
enne (OOO) 2015/12/08 23:27:24 Why does this return something?
vmpstr 2015/12/09 23:53:41 Removed the return.
570 #if DCHECK_IS_ON()
571 if (!lock_acquired) {
572 base::AutoLock lock(lock_);
573 return SanityCheckState(line, true);
574 }
575
576 MemoryBudget budget(kLockedMemoryLimitBytes);
577 for (const auto& annotated_image : decoded_images_) {
578 auto ref_it = decoded_images_ref_counts_.find(annotated_image.first);
579 if (annotated_image.second->is_locked()) {
580 budget.AddUsage(annotated_image.first.target_bytes());
581 DCHECK(ref_it != decoded_images_ref_counts_.end()) << line;
582 } else {
583 DCHECK(ref_it == decoded_images_ref_counts_.end() ||
584 pending_image_tasks_.find(annotated_image.first) !=
585 pending_image_tasks_.end())
586 << line;
587 }
588 }
589 DCHECK_GE(budget.AvailableMemoryBytes(),
590 locked_images_budget_.AvailableMemoryBytes())
591 << line;
592 return budget.AvailableMemoryBytes();
593 #else
594 return 0u;
595 #endif // DCHECK_IS_ON()
596 }
597
598 // ImageDecodeControllerKey
599 ImageDecodeControllerKey ImageDecodeControllerKey::FromDrawImage(
600 const DrawImage& image) {
601 const SkSize& scale = image.scale();
602 gfx::Size target_size(
603 SkScalarRoundToInt(std::abs(image.image()->width() * scale.width())),
604 SkScalarRoundToInt(std::abs(image.image()->height() * scale.height())));
605
606 // Start with the quality that was requested, but drop down immediately to low
607 // if we're not actually going to do any scale.
608 SkFilterQuality quality = image.filter_quality();
609 if (target_size.width() == image.image()->width() &&
610 target_size.height() == image.image()->height()) {
611 quality = std::min(quality, kLow_SkFilterQuality);
612 }
613
614 // Drop from high to medium if the image has perspective applied, the matrix
615 // we applied wasn't decomposable, or if the scaled image will be too large.
616 if (quality == kHigh_SkFilterQuality) {
617 if (image.matrix_has_perspective() || !image.matrix_is_decomposable()) {
618 quality = kMedium_SkFilterQuality;
619 } else {
620 base::CheckedNumeric<size_t> size = 4u;
621 size *= target_size.width();
622 size *= target_size.height();
623 if (size.ValueOrDefault(std::numeric_limits<size_t>::max()) >
624 kMaxHighQualityImageSizeBytes) {
625 quality = kMedium_SkFilterQuality;
626 }
627 }
628 }
629
630 // Drop from medium to low if the matrix we applied wasn't decomposable or if
631 // we're enlarging the image in both dimensions.
632 if (quality == kMedium_SkFilterQuality) {
633 if (!image.matrix_is_decomposable() ||
634 (scale.width() >= 1.f && scale.height() >= 1.f)) {
635 quality = kLow_SkFilterQuality;
636 }
637 }
638
639 return ImageDecodeControllerKey(image.image()->uniqueID(),
640 gfx::SkIRectToRect(image.src_rect()),
641 target_size, quality);
642 }
643
644 ImageDecodeControllerKey::ImageDecodeControllerKey(
645 uint32_t image_id,
646 const gfx::Rect& src_rect,
647 const gfx::Size& size,
648 SkFilterQuality filter_quality)
649 : image_id_(image_id),
650 src_rect_(src_rect),
651 size_(size),
652 filter_quality_(filter_quality) {}
653
654 std::string ImageDecodeControllerKey::ToString() const {
655 std::ostringstream str;
656 str << "id[" << image_id_ << "] src_rect[" << src_rect_.x() << ","
657 << src_rect_.y() << " " << src_rect_.width() << "x" << src_rect_.height()
658 << "] size[" << size_.width() << "x" << size_.height()
659 << "] filter_quality[" << filter_quality_ << "]";
660 return str.str();
661 }
662
663 // DecodedImage
664 ImageDecodeController::DecodedImage::DecodedImage(
665 const SkImageInfo& info,
666 scoped_ptr<base::DiscardableMemory> memory,
667 const SkSize& src_rect_offset)
668 : locked_(true),
669 image_info_(info),
670 memory_(memory.Pass()),
671 src_rect_offset_(src_rect_offset) {
672 image_ = skia::AdoptRef(SkImage::NewFromRaster(
673 image_info_, memory_->data(), image_info_.minRowBytes(),
674 [](const void* pixels, void* context) {}, nullptr));
675 }
676
677 ImageDecodeController::DecodedImage::~DecodedImage() {}
678
679 bool ImageDecodeController::DecodedImage::Lock() {
680 DCHECK(!locked_);
681 bool success = memory_->Lock();
682 if (!success)
683 return false;
684 locked_ = true;
685 return true;
686 }
687
688 void ImageDecodeController::DecodedImage::Unlock() {
689 DCHECK(locked_);
690 memory_->Unlock();
691 locked_ = false;
692 }
693
694 // MemoryBudget
695 ImageDecodeController::MemoryBudget::MemoryBudget(size_t limit_bytes)
696 : limit_bytes_(limit_bytes), current_usage_bytes_(0u) {}
697
698 size_t ImageDecodeController::MemoryBudget::AvailableMemoryBytes() const {
699 size_t usage = GetCurrentUsageSafe();
700 return usage >= limit_bytes_ ? 0u : (limit_bytes_ - usage);
701 }
702
703 void ImageDecodeController::MemoryBudget::AddUsage(size_t usage) {
704 current_usage_bytes_ += usage;
705 }
706
707 void ImageDecodeController::MemoryBudget::SubtractUsage(size_t usage) {
708 DCHECK_GE(current_usage_bytes_.ValueOrDefault(0u), usage);
709 current_usage_bytes_ -= usage;
710 }
711
712 void ImageDecodeController::MemoryBudget::ResetUsage() {
713 current_usage_bytes_ = 0;
714 }
715
716 size_t ImageDecodeController::MemoryBudget::GetCurrentUsageSafe() const {
717 return current_usage_bytes_.ValueOrDie();
118 } 718 }
119 719
120 } // namespace cc 720 } // namespace cc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698