OLD | NEW |
---|---|
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "cc/tiles/image_decode_controller.h" | 5 #include "cc/tiles/image_decode_controller.h" |
6 | 6 |
7 #include "base/memory/discardable_memory.h" | |
7 #include "cc/debug/devtools_instrumentation.h" | 8 #include "cc/debug/devtools_instrumentation.h" |
9 #include "third_party/skia/include/core/SkCanvas.h" | |
10 #include "third_party/skia/include/core/SkImage.h" | |
8 | 11 |
9 namespace cc { | 12 namespace cc { |
10 namespace { | 13 namespace { |
11 | 14 |
15 // The amount of memory we can lock ahead of time (128MB). This limit is only | |
16 // used to inform the caller of the amount of space available in the cache. The | |
17 // caller can still request tasks which can cause this limit to be breached. | |
18 const size_t kLockedMemoryLimitBytes = 128 * 1024 * 1024; | |
19 | |
20 // The largest single high quality image to try and process. Images above this | |
21 // size will drop down to medium quality. | |
22 const size_t kMaxHighQualityImageSizeBytes = 64 * 1024 * 1024; | |
23 | |
24 // The number of entries to keep around in the cache. This limit can be breached | |
25 // if more items are locked. That is, locked items ignore this limit. | |
26 const size_t kMaxItemsInCache = 100; | |
27 | |
12 class ImageDecodeTaskImpl : public ImageDecodeTask { | 28 class ImageDecodeTaskImpl : public ImageDecodeTask { |
13 public: | 29 public: |
14 ImageDecodeTaskImpl(ImageDecodeController* controller, | 30 ImageDecodeTaskImpl(ImageDecodeController* controller, |
15 const SkImage* image, | 31 const ImageDecodeController::ImageKey& image_key, |
16 int layer_id, | 32 const DrawImage& image, |
17 uint64_t source_prepare_tiles_id) | 33 uint64_t source_prepare_tiles_id) |
18 : controller_(controller), | 34 : controller_(controller), |
19 image_(skia::SharePtr(image)), | 35 image_key_(image_key), |
20 layer_id_(layer_id), | 36 image_(image), |
37 image_ref_(skia::SharePtr(image.image())), | |
21 source_prepare_tiles_id_(source_prepare_tiles_id) {} | 38 source_prepare_tiles_id_(source_prepare_tiles_id) {} |
22 | 39 |
23 // Overridden from Task: | 40 // Overridden from Task: |
24 void RunOnWorkerThread() override { | 41 void RunOnWorkerThread() override { |
25 TRACE_EVENT1("cc", "ImageDecodeTaskImpl::RunOnWorkerThread", | 42 TRACE_EVENT1("cc", "ImageDecodeTaskImpl::RunOnWorkerThread", |
26 "source_prepare_tiles_id", source_prepare_tiles_id_); | 43 "source_prepare_tiles_id", source_prepare_tiles_id_); |
27 devtools_instrumentation::ScopedImageDecodeTask image_decode_task( | 44 devtools_instrumentation::ScopedImageDecodeTask image_decode_task( |
28 image_.get()); | 45 image_ref_.get()); |
29 controller_->DecodeImage(image_.get()); | 46 controller_->DecodeImage(image_key_, image_); |
30 | |
31 // Release the reference after decoding image to ensure that it is not kept | |
32 // alive unless needed. | |
33 image_.clear(); | |
34 } | 47 } |
35 | 48 |
36 // Overridden from TileTask: | 49 // Overridden from TileTask: |
37 void ScheduleOnOriginThread(TileTaskClient* client) override {} | 50 void ScheduleOnOriginThread(TileTaskClient* client) override {} |
38 void CompleteOnOriginThread(TileTaskClient* client) override { | 51 void CompleteOnOriginThread(TileTaskClient* client) override { |
39 controller_->OnImageDecodeTaskCompleted(layer_id_, image_.get(), | 52 controller_->RemovePendingTask(image_key_); |
40 !HasFinishedRunning()); | |
41 } | 53 } |
42 | 54 |
43 protected: | 55 protected: |
44 ~ImageDecodeTaskImpl() override {} | 56 ~ImageDecodeTaskImpl() override {} |
45 | 57 |
46 private: | 58 private: |
47 ImageDecodeController* controller_; | 59 ImageDecodeController* controller_; |
48 skia::RefPtr<const SkImage> image_; | 60 ImageDecodeController::ImageKey image_key_; |
49 int layer_id_; | 61 DrawImage image_; |
62 skia::RefPtr<const SkImage> image_ref_; | |
50 uint64_t source_prepare_tiles_id_; | 63 uint64_t source_prepare_tiles_id_; |
51 | 64 |
52 DISALLOW_COPY_AND_ASSIGN(ImageDecodeTaskImpl); | 65 DISALLOW_COPY_AND_ASSIGN(ImageDecodeTaskImpl); |
53 }; | 66 }; |
54 | 67 |
68 template <typename Type> | |
69 typename std::deque<Type>::iterator FindImage( | |
70 std::deque<Type>* collection, | |
71 const ImageDecodeControllerKey& key) { | |
72 return std::find_if(collection->begin(), collection->end(), | |
73 [key](const Type& image) { return image.first == key; }); | |
74 } | |
75 | |
76 SkSize GetScaleAdjustment(const ImageDecodeControllerKey& key, | |
77 const SkImage* original_image) { | |
78 float x_scale = | |
79 key.target_size().width() / static_cast<float>(original_image->width()); | |
80 float y_scale = | |
81 key.target_size().height() / static_cast<float>(original_image->height()); | |
82 return SkSize::Make(x_scale, y_scale); | |
83 } | |
84 | |
55 } // namespace | 85 } // namespace |
56 | 86 |
57 ImageDecodeController::ImageDecodeController() {} | 87 ImageDecodeController::ImageDecodeController() |
58 | 88 : is_using_gpu_rasterization_(false), |
59 ImageDecodeController::~ImageDecodeController() {} | 89 locked_images_budget_(kLockedMemoryLimitBytes) {} |
60 | 90 |
61 scoped_refptr<ImageDecodeTask> ImageDecodeController::GetTaskForImage( | 91 ImageDecodeController::~ImageDecodeController() { |
92 DCHECK_EQ(0u, decoded_images_ref_counts_.size()); | |
93 DCHECK_EQ(0u, at_raster_decoded_images_ref_counts_.size()); | |
94 } | |
95 | |
96 bool ImageDecodeController::GetTaskForImageAndRef( | |
62 const DrawImage& image, | 97 const DrawImage& image, |
63 int layer_id, | 98 uint64_t prepare_tiles_id, |
64 uint64_t prepare_tiles_id) { | 99 scoped_refptr<ImageDecodeTask>* task) { |
65 uint32_t generation_id = image.image()->uniqueID(); | 100 // If the image already exists or if we're going to create a task for it, then |
ericrk
2015/12/04 00:50:46
this line is a bit unclear - you say "if we're goi
| |
66 scoped_refptr<ImageDecodeTask>& decode_task = | 101 // we need to ref this image. That means the image is or will be in the cache. |
67 image_decode_tasks_[layer_id][generation_id]; | 102 // When the ref goes to 0, it will be unpinned but will remain in the cache. |
68 if (!decode_task) | 103 // If the image does not fit into the budget, then we don't ref this image, |
69 decode_task = CreateTaskForImage(image.image(), layer_id, prepare_tiles_id); | 104 // since it will be decoded at raster time which is when it will be |
70 return decode_task; | 105 // temporarily put in the cache. |
71 } | 106 ImageKey key = ImageKey::FromDrawImage(image); |
72 | 107 TRACE_EVENT1("cc", "ImageDecodeController::GetTaskForImageAndRef", "key", |
73 scoped_refptr<ImageDecodeTask> ImageDecodeController::CreateTaskForImage( | 108 key.ToString()); |
74 const SkImage* image, | 109 // If we're not going to do a scale, we will just create a task to preroll the |
75 int layer_id, | 110 // image the first time we see it. This doesn't need to account for memory. |
76 uint64_t prepare_tiles_id) { | 111 // TODO(vmpstr): We can also lock the original sized image, in which case it |
112 // does require memory bookkeeping. | |
113 if (!ShouldDecodeAndScaleImage(key, image)) { | |
114 base::AutoLock lock(lock_); | |
115 if (prerolled_images_.count(key.image_id()) == 0) { | |
116 scoped_refptr<ImageDecodeTask>& existing_task = pending_image_tasks_[key]; | |
117 if (!existing_task) { | |
118 existing_task = make_scoped_refptr( | |
119 new ImageDecodeTaskImpl(this, key, image, prepare_tiles_id)); | |
120 } | |
121 *task = existing_task; | |
122 } else { | |
123 *task = nullptr; | |
124 } | |
125 return false; | |
126 } | |
127 | |
128 base::AutoLock lock(lock_); | |
129 | |
130 // If we already have the image in cache, then we can return it. | |
131 auto decoded_it = FindImage(&decoded_images_, key); | |
132 bool new_image_fits_in_memory = | |
133 locked_images_budget_.AvailableMemoryBytes() >= key.target_bytes(); | |
134 if (decoded_it != decoded_images_.end()) { | |
135 if (decoded_it->second->is_locked() || | |
136 (new_image_fits_in_memory && decoded_it->second->Lock())) { | |
137 RefImage(key); | |
138 *task = nullptr; | |
139 SanityCheckState(__LINE__, true); | |
140 return true; | |
141 } | |
142 // If the image fits in memory, then we at least tried to lock it and | |
143 // failed. This means that it's not valid anymore. | |
144 if (new_image_fits_in_memory) | |
145 decoded_images_.erase(decoded_it); | |
146 } | |
147 | |
148 // If the task exists, return it. | |
149 scoped_refptr<ImageDecodeTask>& existing_task = pending_image_tasks_[key]; | |
150 if (existing_task) { | |
151 RefImage(key); | |
152 *task = existing_task; | |
153 SanityCheckState(__LINE__, true); | |
154 return true; | |
155 } | |
156 | |
157 // At this point, we have to create a new image/task, so we need to abort if | |
158 // it doesn't fit into memory and there are currently no raster tasks that | |
159 // would have already accounted for memory. The latter part is possible if | |
160 // there's a running raster task that could not be canceled, and still has a | |
161 // ref to the image that is now being reffed for the new schedule. | |
162 if (!new_image_fits_in_memory && (decoded_images_ref_counts_.find(key) == | |
163 decoded_images_ref_counts_.end())) { | |
164 *task = nullptr; | |
165 SanityCheckState(__LINE__, true); | |
166 return false; | |
167 } | |
168 | |
169 // Actually create the task. RefImage will account for memory on the first | |
170 // ref. | |
171 RefImage(key); | |
172 existing_task = make_scoped_refptr( | |
173 new ImageDecodeTaskImpl(this, key, image, prepare_tiles_id)); | |
174 *task = existing_task; | |
175 SanityCheckState(__LINE__, true); | |
176 return true; | |
177 } | |
178 | |
179 void ImageDecodeController::RefImage(const ImageKey& key) { | |
180 TRACE_EVENT1("cc", "ImageDecodeController::RefImage", "key", key.ToString()); | |
181 lock_.AssertAcquired(); | |
182 int ref = ++decoded_images_ref_counts_[key]; | |
183 if (ref == 1) { | |
184 DCHECK_GE(locked_images_budget_.AvailableMemoryBytes(), key.target_bytes()); | |
185 locked_images_budget_.AddUsage(key.target_bytes()); | |
186 } | |
187 } | |
188 | |
189 void ImageDecodeController::UnrefImage(const DrawImage& image) { | |
190 // When we unref the image, there are several situations we need to consider: | |
191 // 1. The ref did not reach 0, which means we have to keep the image locked. | |
192 // 2. The ref reached 0, we should unlock it. | |
193 // 2a. The image isn't in the locked cache because we didn't get to decode | |
194 // it yet. | |
195 // 2b. Unlock the image but keep it in list. | |
196 const ImageKey& key = ImageKey::FromDrawImage(image); | |
197 DCHECK(ShouldDecodeAndScaleImage(key, image)); | |
198 TRACE_EVENT1("cc", "ImageDecodeController::UnrefImage", "key", | |
199 key.ToString()); | |
200 | |
201 base::AutoLock lock(lock_); | |
202 auto ref_count_it = decoded_images_ref_counts_.find(key); | |
203 DCHECK(ref_count_it != decoded_images_ref_counts_.end()); | |
204 | |
205 --ref_count_it->second; | |
206 if (ref_count_it->second == 0) { | |
207 decoded_images_ref_counts_.erase(ref_count_it); | |
208 locked_images_budget_.SubtractUsage(key.target_bytes()); | |
209 | |
210 auto decoded_image_it = FindImage(&decoded_images_, key); | |
211 // If we've never decoded the image before ref reached 0, then we wouldn't | |
212 // have it in our cache. This would happen if we canceled tasks. | |
213 if (decoded_image_it == decoded_images_.end()) { | |
214 SanityCheckState(__LINE__, true); | |
215 return; | |
216 } | |
217 DCHECK(decoded_image_it->second->is_locked()); | |
218 decoded_image_it->second->Unlock(); | |
219 } | |
220 SanityCheckState(__LINE__, true); | |
221 } | |
222 | |
223 void ImageDecodeController::DecodeImage(const ImageKey& key, | |
224 const DrawImage& image) { | |
225 TRACE_EVENT1("cc", "ImageDecodeController::DecodeImage", "key", | |
226 key.ToString()); | |
227 if (!ShouldDecodeAndScaleImage(key, image)) { | |
228 image.image()->preroll(); | |
229 | |
230 base::AutoLock lock(lock_); | |
231 prerolled_images_.insert(key.image_id()); | |
232 // Erase the pending task from the queue, since the task won't be doing | |
233 // anything useful after this function terminates. Since we don't preroll | |
234 // images twice, this is actually not necessary but it behaves similar to | |
235 // the other code path: when this function finishes, the task isn't in the | |
236 // pending_image_tasks_ list. | |
237 pending_image_tasks_.erase(key); | |
238 return; | |
239 } | |
240 | |
241 base::AutoLock lock(lock_); | |
242 | |
243 auto image_it = FindImage(&decoded_images_, key); | |
244 if (image_it != decoded_images_.end()) { | |
245 if (image_it->second->is_locked() || image_it->second->Lock()) { | |
246 pending_image_tasks_.erase(key); | |
247 return; | |
248 } | |
249 decoded_images_.erase(image_it); | |
250 } | |
251 | |
252 scoped_refptr<DecodedImage> decoded_image; | |
253 { | |
254 base::AutoUnlock unlock(lock_); | |
255 decoded_image = DecodeImageInternal(key, image.image()); | |
256 } | |
257 | |
258 // Erase the pending task from the queue, since the task won't be doing | |
259 // anything useful after this function terminates. That is, if this image | |
260 // needs to be decoded again, we have to create a new task. | |
261 pending_image_tasks_.erase(key); | |
262 | |
263 // We could have finished all of the raster tasks (cancelled) while this image | |
264 // decode task was running, which means that we now have a locked image but no | |
265 // ref counts. Unlock it immediately in this case. | |
266 if (decoded_images_ref_counts_.find(key) == | |
267 decoded_images_ref_counts_.end()) { | |
268 decoded_image->Unlock(); | |
269 } | |
270 | |
271 // At this point, it could have been the case that this image was decoded in | |
272 // place by an already running raster task from a previous schedule. If that's | |
273 // the case, then it would have already been placed into the cache (possibly | |
274 // locked). Remove it if that was the case. | |
275 image_it = FindImage(&decoded_images_, key); | |
276 if (image_it != decoded_images_.end()) { | |
277 if (image_it->second->is_locked() || image_it->second->Lock()) { | |
278 pending_image_tasks_.erase(key); | |
279 return; | |
280 } | |
281 decoded_images_.erase(image_it); | |
282 } | |
283 decoded_images_.push_back(AnnotatedDecodedImage(key, decoded_image)); | |
284 SanityCheckState(__LINE__, true); | |
285 } | |
286 | |
287 scoped_refptr<ImageDecodeController::DecodedImage> | |
288 ImageDecodeController::DecodeImageInternal(const ImageKey& key, | |
289 const SkImage* image) { | |
290 TRACE_EVENT1("cc", "ImageDecodeController::DecodeImageInternal", "key", | |
291 key.ToString()); | |
292 | |
293 // Get the decoded image first (at the original scale). | |
294 SkImageInfo decoded_info = | |
295 SkImageInfo::MakeN32Premul(image->width(), image->height()); | |
296 scoped_ptr<uint8_t[]> decoded_pixels( | |
297 new uint8_t[decoded_info.minRowBytes() * decoded_info.height()]); | |
298 bool result = image->readPixels(decoded_info, decoded_pixels.get(), | |
299 decoded_info.minRowBytes(), 0, 0, | |
300 SkImage::kAllow_CachingHint); | |
301 DCHECK(result); | |
302 | |
303 skia::RefPtr<SkImage> decoded_image = skia::AdoptRef(SkImage::NewFromRaster( | |
304 decoded_info, decoded_pixels.get(), decoded_info.minRowBytes(), | |
305 [](const void* pixels, void* context) {}, nullptr)); | |
306 | |
307 // Now scale the pixels into the destination size. | |
308 SkImageInfo scaled_info = SkImageInfo::MakeN32Premul( | |
309 key.target_size().width(), key.target_size().height()); | |
310 scoped_ptr<base::DiscardableMemory> scaled_pixels = | |
311 base::DiscardableMemoryAllocator::GetInstance() | |
312 ->AllocateLockedDiscardableMemory(scaled_info.minRowBytes() * | |
313 scaled_info.height()); | |
314 SkPixmap scaled_pixmap(scaled_info, scaled_pixels->data(), | |
315 scaled_info.minRowBytes()); | |
316 // TODO(vmpstr): Start handling more than just high filter quality. | |
317 DCHECK_EQ(kHigh_SkFilterQuality, key.filter_quality()); | |
318 result = decoded_image->scalePixels(scaled_pixmap, kHigh_SkFilterQuality, | |
319 SkImage::kDisallow_CachingHint); | |
320 DCHECK(result); | |
77 return make_scoped_refptr( | 321 return make_scoped_refptr( |
78 new ImageDecodeTaskImpl(this, image, layer_id, prepare_tiles_id)); | 322 new DecodedImage(scaled_info, std::move(scaled_pixels))); |
79 } | 323 } |
80 | 324 |
81 void ImageDecodeController::DecodeImage(const SkImage* image) { | 325 DecodedDrawImage ImageDecodeController::GetDecodedImageForDraw( |
82 image->preroll(); | 326 const DrawImage& draw_image) { |
83 } | 327 ImageKey key = ImageKey::FromDrawImage(draw_image); |
84 | 328 TRACE_EVENT1("cc", "ImageDecodeController::GetDecodedImageAndRef", "key", |
85 void ImageDecodeController::AddLayerUsedCount(int layer_id) { | 329 key.ToString()); |
86 ++used_layer_counts_[layer_id]; | 330 if (!ShouldDecodeAndScaleImage(key, draw_image)) { |
87 } | 331 return DecodedDrawImage(draw_image.image(), SkSize::Make(1.f, 1.f), |
88 | 332 draw_image.filter_quality()); |
89 void ImageDecodeController::SubtractLayerUsedCount(int layer_id) { | 333 } |
90 if (--used_layer_counts_[layer_id]) | 334 |
335 base::AutoLock lock(lock_); | |
336 auto decoded_images_it = FindImage(&decoded_images_, key); | |
337 // If we found the image and it's locked, then return it. If it's not locked, | |
338 // erase it from the cache since it might be put into the at-raster cache. | |
339 scoped_refptr<DecodedImage> decoded_image; | |
340 if (decoded_images_it != decoded_images_.end()) { | |
341 decoded_image = decoded_images_it->second; | |
342 if (decoded_image->is_locked()) { | |
343 RefImage(key); | |
344 SanityCheckState(__LINE__, true); | |
345 return DecodedDrawImage(decoded_image->image(), | |
346 GetScaleAdjustment(key, draw_image.image()), | |
347 kLow_SkFilterQuality); | |
348 } else { | |
349 decoded_images_.erase(decoded_images_it); | |
350 } | |
351 } | |
352 | |
353 // See if another thread already decoded this image at raster time. If so, we | |
354 // can just use that result directly. | |
355 auto at_raster_images_it = FindImage(&at_raster_decoded_images_, key); | |
356 if (at_raster_images_it != at_raster_decoded_images_.end()) { | |
357 DCHECK(at_raster_images_it->second->is_locked()); | |
358 RefAtRasterImage(key); | |
359 SanityCheckState(__LINE__, true); | |
360 auto decoded_draw_image = DecodedDrawImage( | |
361 at_raster_images_it->second->image(), | |
362 GetScaleAdjustment(key, draw_image.image()), kLow_SkFilterQuality); | |
363 decoded_draw_image.set_at_raster_decode(true); | |
364 return decoded_draw_image; | |
365 } | |
366 | |
367 // Now we know that we don't have a locked image, and we seem to be the first | |
368 // thread encountering this image (that might not be true, since other threads | |
369 // might be decoding it already). This means that we need to decode the image | |
370 // assuming we can't lock the one we found in the cache. | |
371 bool check_at_raster_cache = false; | |
372 if (!decoded_image || !decoded_image->Lock()) { | |
373 // Note that we have to release the lock, since this lock is also accessed | |
374 // on the compositor thread. This means holding on to the lock might stall | |
375 // the compositor thread for the duration of the decode! | |
376 base::AutoUnlock unlock(lock_); | |
377 decoded_image = DecodeImageInternal(key, draw_image.image()); | |
378 check_at_raster_cache = true; | |
379 } | |
380 | |
381 // While we unlocked the lock, it could be the case that another thread | |
382 // already decoded this already and put it in the at-raster cache. Look it up | |
383 // first. | |
384 bool need_to_add_image_to_cache = true; | |
385 if (check_at_raster_cache) { | |
386 at_raster_images_it = FindImage(&at_raster_decoded_images_, key); | |
387 if (at_raster_images_it != at_raster_decoded_images_.end()) { | |
388 // We have to drop our decode, since the one in the cache is being used by | |
389 // another thread. | |
390 decoded_image->Unlock(); | |
391 decoded_image = at_raster_images_it->second; | |
392 need_to_add_image_to_cache = false; | |
393 } | |
394 } | |
395 | |
396 // If we really are the first ones, or if the other thread already unlocked | |
397 // the image, then put our work into at-raster time cache. | |
398 if (need_to_add_image_to_cache) { | |
399 at_raster_decoded_images_.push_back( | |
400 AnnotatedDecodedImage(key, decoded_image)); | |
401 } | |
402 | |
403 DCHECK(decoded_image); | |
404 DCHECK(decoded_image->is_locked()); | |
405 RefAtRasterImage(key); | |
406 SanityCheckState(__LINE__, true); | |
407 auto decoded_draw_image = DecodedDrawImage( | |
408 decoded_image->image(), GetScaleAdjustment(key, draw_image.image()), | |
409 kLow_SkFilterQuality); | |
410 decoded_draw_image.set_at_raster_decode(true); | |
411 return decoded_draw_image; | |
412 } | |
413 | |
414 void ImageDecodeController::DrawWithImageFinished( | |
415 const DrawImage& image, | |
416 const DecodedDrawImage& decoded_image) { | |
417 TRACE_EVENT1("cc", "ImageDecodeController::DrawWithImageFinished", "key", | |
418 ImageKey::FromDrawImage(image).ToString()); | |
419 ImageKey key = ImageKey::FromDrawImage(image); | |
420 if (!ShouldDecodeAndScaleImage(key, image)) | |
91 return; | 421 return; |
92 | 422 |
93 // Clean up decode tasks once a layer is no longer used. | 423 if (decoded_image.is_at_raster_decode()) |
94 used_layer_counts_.erase(layer_id); | 424 UnrefAtRasterImage(key); |
95 image_decode_tasks_.erase(layer_id); | 425 else |
96 } | 426 UnrefImage(image); |
97 | 427 SanityCheckState(__LINE__, false); |
98 void ImageDecodeController::OnImageDecodeTaskCompleted(int layer_id, | 428 } |
99 const SkImage* image, | 429 |
100 bool was_canceled) { | 430 void ImageDecodeController::RefAtRasterImage(const ImageKey& key) { |
101 // If the task has successfully finished, then keep the task until the layer | 431 TRACE_EVENT1("cc", "ImageDecodeController::RefAtRasterImage", "key", |
102 // is no longer in use. This ensures that we only decode a image once. | 432 key.ToString()); |
103 // TODO(vmpstr): Remove this when decode lifetime is controlled by cc. | 433 DCHECK(FindImage(&at_raster_decoded_images_, key) != |
104 if (!was_canceled) | 434 at_raster_decoded_images_.end()); |
435 ++at_raster_decoded_images_ref_counts_[key]; | |
436 } | |
437 | |
438 void ImageDecodeController::UnrefAtRasterImage(const ImageKey& key) { | |
439 TRACE_EVENT1("cc", "ImageDecodeController::UnrefAtRasterImage", "key", | |
440 key.ToString()); | |
441 base::AutoLock lock(lock_); | |
442 | |
443 auto ref_it = at_raster_decoded_images_ref_counts_.find(key); | |
444 DCHECK(ref_it != at_raster_decoded_images_ref_counts_.end()); | |
445 --ref_it->second; | |
446 if (ref_it->second == 0) { | |
447 at_raster_decoded_images_ref_counts_.erase(ref_it); | |
448 auto at_raster_image_it = FindImage(&at_raster_decoded_images_, key); | |
449 DCHECK(at_raster_image_it != at_raster_decoded_images_.end()); | |
450 | |
451 // The ref for our image reached 0 and it's still locked. We need to figure | |
452 // out what the best thing to do with the image. There are several | |
453 // situations: | |
454 // 1. The image is not in the main cache and... | |
455 // 1a. ... its ref count is 0: unlock our image and put it in the main | |
456 // cache. | |
457 // 1b. ... ref count is not 0: keep the image locked and put it in the | |
458 // main cache. | |
459 // 2. The image is in the main cache... | |
460 // 2a. ... and is locked: unlock our image and discard it | |
461 // 2b. ... and is unlocked and... | |
462 // 2b1. ... its ref count is 0: unlock our image and replace the | |
463 // existing one with ours. | |
464 // 2b2. ... its ref count is not 0: this shouldn't be possible. | |
465 auto image_it = FindImage(&decoded_images_, key); | |
466 if (image_it == decoded_images_.end()) { | |
467 if (decoded_images_ref_counts_.find(key) == | |
468 decoded_images_ref_counts_.end()) { | |
469 at_raster_image_it->second->Unlock(); | |
470 } | |
471 decoded_images_.push_back(*at_raster_image_it); | |
472 } else if (image_it->second->is_locked()) { | |
473 at_raster_image_it->second->Unlock(); | |
474 } else { | |
475 DCHECK(decoded_images_ref_counts_.find(key) == | |
476 decoded_images_ref_counts_.end()); | |
477 at_raster_image_it->second->Unlock(); | |
478 decoded_images_.erase(image_it); | |
479 decoded_images_.push_back(*at_raster_image_it); | |
480 } | |
481 at_raster_decoded_images_.erase(at_raster_image_it); | |
482 } | |
483 } | |
484 | |
485 bool ImageDecodeController::ShouldDecodeAndScaleImage(const ImageKey& key, | |
486 const DrawImage& image) { | |
487 // TODO(vmpstr): Handle GPU rasterization. | |
488 if (is_using_gpu_rasterization_) | |
489 return false; | |
490 if (!CanHandleFilterQuality(key.filter_quality())) | |
491 return false; | |
492 return true; | |
493 } | |
494 | |
495 bool ImageDecodeController::CanHandleFilterQuality( | |
496 SkFilterQuality filter_quality) { | |
497 // We don't need to handle low quality filters. | |
498 if (filter_quality == kLow_SkFilterQuality || | |
499 filter_quality == kNone_SkFilterQuality) { | |
500 return false; | |
501 } | |
502 | |
503 // TODO(vmpstr): We need to start caching mipmaps for medium quality and | |
504 // caching the interpolated values from those. For now, we don't have this. | |
505 if (filter_quality == kMedium_SkFilterQuality) | |
506 return false; | |
507 DCHECK(filter_quality == kHigh_SkFilterQuality); | |
508 return true; | |
509 } | |
510 | |
511 void ImageDecodeController::ReduceCacheUsage() { | |
512 TRACE_EVENT0("cc", "ImageDecodeController::ReduceCacheUsage"); | |
513 base::AutoLock lock(lock_); | |
514 size_t num_to_remove = (decoded_images_.size() > kMaxItemsInCache) | |
515 ? (decoded_images_.size() - kMaxItemsInCache) | |
516 : 0; | |
517 for (auto it = decoded_images_.begin(); | |
518 num_to_remove != 0 && it != decoded_images_.end();) { | |
519 if (it->second->is_locked()) { | |
520 ++it; | |
521 continue; | |
522 } | |
523 | |
524 it = decoded_images_.erase(it); | |
525 --num_to_remove; | |
526 } | |
527 } | |
528 | |
529 void ImageDecodeController::RemovePendingTask(const ImageKey& key) { | |
530 base::AutoLock lock(lock_); | |
531 pending_image_tasks_.erase(key); | |
532 } | |
533 | |
534 void ImageDecodeController::SetIsUsingGpuRasterization( | |
535 bool is_using_gpu_rasterization) { | |
536 if (is_using_gpu_rasterization_ == is_using_gpu_rasterization) | |
105 return; | 537 return; |
106 | 538 is_using_gpu_rasterization_ = is_using_gpu_rasterization; |
107 // Otherwise, we have to clean up the task so that a new one can be created if | 539 |
108 // we need to decode the image again. | 540 base::AutoLock lock(lock_); |
109 LayerImageTaskMap::iterator layer_it = image_decode_tasks_.find(layer_id); | 541 |
110 if (layer_it == image_decode_tasks_.end()) | 542 DCHECK_EQ(0u, decoded_images_ref_counts_.size()); |
111 return; | 543 DCHECK_EQ(0u, at_raster_decoded_images_ref_counts_.size()); |
112 | 544 DCHECK(std::find_if(decoded_images_.begin(), decoded_images_.end(), |
113 ImageTaskMap& image_tasks = layer_it->second; | 545 [](const AnnotatedDecodedImage& image) { |
114 ImageTaskMap::iterator task_it = image_tasks.find(image->uniqueID()); | 546 return image.second->is_locked(); |
115 if (task_it == image_tasks.end()) | 547 }) == decoded_images_.end()); |
116 return; | 548 DCHECK(std::find_if(at_raster_decoded_images_.begin(), |
117 image_tasks.erase(task_it); | 549 at_raster_decoded_images_.end(), |
550 [](const AnnotatedDecodedImage& image) { | |
551 return image.second->is_locked(); | |
552 }) == at_raster_decoded_images_.end()); | |
553 decoded_images_.clear(); | |
554 at_raster_decoded_images_.clear(); | |
555 } | |
556 | |
557 size_t ImageDecodeController::SanityCheckState(int line, bool lock_acquired) { | |
558 #if DCHECK_IS_ON() | |
559 if (!lock_acquired) { | |
560 base::AutoLock lock(lock_); | |
561 return SanityCheckState(line, true); | |
562 } | |
563 | |
564 MemoryBudget budget(kLockedMemoryLimitBytes); | |
565 for (const auto& annotated_image : decoded_images_) { | |
566 auto ref_it = decoded_images_ref_counts_.find(annotated_image.first); | |
567 if (annotated_image.second->is_locked()) { | |
568 budget.AddUsage(annotated_image.first.target_bytes()); | |
569 DCHECK(ref_it != decoded_images_ref_counts_.end()) << line; | |
570 } else { | |
571 DCHECK(ref_it == decoded_images_ref_counts_.end() || | |
572 pending_image_tasks_.find(annotated_image.first) != | |
573 pending_image_tasks_.end()) | |
574 << line; | |
575 } | |
576 } | |
577 DCHECK_GE(budget.AvailableMemoryBytes(), | |
578 locked_images_budget_.AvailableMemoryBytes()) | |
579 << line; | |
580 return budget.AvailableMemoryBytes(); | |
581 #else | |
582 return 0u; | |
583 #endif // DCHECK_IS_ON() | |
584 } | |
585 | |
586 // ImageDecodeControllerKey | |
587 ImageDecodeControllerKey ImageDecodeControllerKey::FromDrawImage( | |
588 const DrawImage& image) { | |
589 const SkSize& scale = image.scale(); | |
590 gfx::Size target_size( | |
591 std::abs(SkScalarRoundToInt(image.image()->width() * scale.width())), | |
ericrk
2015/12/04 00:50:46
maybe this is fine, but SkScalarRoundToInt is defi
| |
592 std::abs(SkScalarRoundToInt(image.image()->height() * scale.height()))); | |
593 | |
594 // Start with the quality that was requested, but drop down immediately to low | |
595 // if we're not actually going to do any scale. | |
596 SkFilterQuality quality = image.filter_quality(); | |
597 if (target_size.width() == image.image()->width() && | |
598 target_size.height() == image.image()->height()) { | |
599 quality = std::min(quality, kLow_SkFilterQuality); | |
600 } | |
601 | |
602 // Drop from high to medium if the image has perspective applied, the matrix | |
603 // we applied wasn't decomposable, or if the scaled image will be too large. | |
604 if (quality == kHigh_SkFilterQuality) { | |
605 if (image.matrix_has_perspective() || !image.matrix_is_decomposable()) { | |
606 quality = kMedium_SkFilterQuality; | |
607 } else { | |
608 base::CheckedNumeric<size_t> size = 4u; | |
609 size *= target_size.width(); | |
610 size *= target_size.height(); | |
611 if (size.ValueOrDefault(std::numeric_limits<size_t>::max()) > | |
612 kMaxHighQualityImageSizeBytes) { | |
613 quality = kMedium_SkFilterQuality; | |
614 } | |
615 } | |
616 } | |
617 | |
618 // Drop from medium to low if the matrix we applied wasn't decomposable or if | |
619 // we're enlarging the image in both dimensions. | |
620 if (quality == kMedium_SkFilterQuality) { | |
621 if (!image.matrix_is_decomposable() || | |
622 (scale.width() >= 1.f && scale.height() >= 1.f)) { | |
623 quality = kLow_SkFilterQuality; | |
624 } | |
625 } | |
626 | |
627 return ImageDecodeControllerKey(image.image()->uniqueID(), target_size, | |
628 quality); | |
629 } | |
630 | |
631 ImageDecodeControllerKey::ImageDecodeControllerKey( | |
632 uint32_t image_id, | |
633 const gfx::Size& size, | |
634 SkFilterQuality filter_quality) | |
635 : image_id_(image_id), size_(size), filter_quality_(filter_quality) {} | |
636 | |
637 std::string ImageDecodeControllerKey::ToString() const { | |
638 std::ostringstream str; | |
639 str << "id[" << image_id_ << "] size[" << size_.width() << "x" | |
640 << size_.height() << "] filter_quality[" << filter_quality_ << "]"; | |
641 return str.str(); | |
642 } | |
643 | |
644 // DecodedImage | |
645 ImageDecodeController::DecodedImage::DecodedImage( | |
646 const SkImageInfo& info, | |
647 scoped_ptr<base::DiscardableMemory> memory) | |
648 : locked_(true), image_info_(info), memory_(memory.Pass()) { | |
649 image_ = skia::AdoptRef(SkImage::NewFromRaster( | |
650 image_info_, memory_->data(), image_info_.minRowBytes(), | |
651 [](const void* pixels, void* context) {}, nullptr)); | |
652 } | |
653 | |
654 ImageDecodeController::DecodedImage::~DecodedImage() {} | |
655 | |
656 bool ImageDecodeController::DecodedImage::Lock() { | |
657 DCHECK(!locked_); | |
658 bool success = memory_->Lock(); | |
659 if (!success) | |
660 return false; | |
661 locked_ = true; | |
662 return true; | |
663 } | |
664 | |
665 void ImageDecodeController::DecodedImage::Unlock() { | |
666 DCHECK(locked_); | |
667 memory_->Unlock(); | |
668 locked_ = false; | |
669 } | |
670 | |
671 // MemoryBudget | |
672 ImageDecodeController::MemoryBudget::MemoryBudget(size_t limit_bytes) | |
673 : limit_bytes_(limit_bytes), current_usage_bytes_(0u) {} | |
674 | |
675 size_t ImageDecodeController::MemoryBudget::AvailableMemoryBytes() const { | |
676 size_t usage = GetCurrentUsageSafe(); | |
677 return usage >= limit_bytes_ ? 0u : (limit_bytes_ - usage); | |
678 } | |
679 | |
680 void ImageDecodeController::MemoryBudget::AddUsage(size_t usage) { | |
681 current_usage_bytes_ += usage; | |
682 } | |
683 | |
684 void ImageDecodeController::MemoryBudget::SubtractUsage(size_t usage) { | |
685 DCHECK_GE(current_usage_bytes_.ValueOrDefault(0u), usage); | |
686 current_usage_bytes_ -= usage; | |
687 } | |
688 | |
689 void ImageDecodeController::MemoryBudget::ResetUsage() { | |
690 current_usage_bytes_ = 0; | |
691 } | |
692 | |
693 size_t ImageDecodeController::MemoryBudget::GetCurrentUsageSafe() const { | |
694 return current_usage_bytes_.ValueOrDie(); | |
118 } | 695 } |
119 | 696 |
120 } // namespace cc | 697 } // namespace cc |
OLD | NEW |