OLD | NEW |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "cc/tiles/image_decode_controller.h" | 5 #include "cc/tiles/image_decode_controller.h" |
6 | 6 |
| 7 #include "base/memory/discardable_memory.h" |
7 #include "cc/debug/devtools_instrumentation.h" | 8 #include "cc/debug/devtools_instrumentation.h" |
| 9 #include "third_party/skia/include/core/SkCanvas.h" |
| 10 #include "third_party/skia/include/core/SkImage.h" |
| 11 #include "ui/gfx/skia_util.h" |
8 | 12 |
9 namespace cc { | 13 namespace cc { |
10 namespace { | 14 namespace { |
11 | 15 |
| 16 // The amount of memory we can lock ahead of time (128MB). This limit is only |
| 17 // used to inform the caller of the amount of space available in the cache. The |
| 18 // caller can still request tasks which can cause this limit to be breached. |
| 19 const size_t kLockedMemoryLimitBytes = 128 * 1024 * 1024; |
| 20 |
| 21 // The largest single high quality image to try and process. Images above this |
| 22 // size will drop down to medium quality. |
| 23 const size_t kMaxHighQualityImageSizeBytes = 64 * 1024 * 1024; |
| 24 |
| 25 // The number of entries to keep around in the cache. This limit can be breached |
| 26 // if more items are locked. That is, locked items ignore this limit. |
| 27 const size_t kMaxItemsInCache = 100; |
| 28 |
12 class ImageDecodeTaskImpl : public ImageDecodeTask { | 29 class ImageDecodeTaskImpl : public ImageDecodeTask { |
13 public: | 30 public: |
14 ImageDecodeTaskImpl(ImageDecodeController* controller, | 31 ImageDecodeTaskImpl(ImageDecodeController* controller, |
15 const SkImage* image, | 32 const ImageDecodeController::ImageKey& image_key, |
16 int layer_id, | 33 const DrawImage& image, |
17 uint64_t source_prepare_tiles_id) | 34 uint64_t source_prepare_tiles_id) |
18 : controller_(controller), | 35 : controller_(controller), |
19 image_(skia::SharePtr(image)), | 36 image_key_(image_key), |
20 layer_id_(layer_id), | 37 image_(image), |
| 38 image_ref_(skia::SharePtr(image.image())), |
21 source_prepare_tiles_id_(source_prepare_tiles_id) {} | 39 source_prepare_tiles_id_(source_prepare_tiles_id) {} |
22 | 40 |
23 // Overridden from Task: | 41 // Overridden from Task: |
24 void RunOnWorkerThread() override { | 42 void RunOnWorkerThread() override { |
25 TRACE_EVENT1("cc", "ImageDecodeTaskImpl::RunOnWorkerThread", | 43 TRACE_EVENT1("cc", "ImageDecodeTaskImpl::RunOnWorkerThread", |
26 "source_prepare_tiles_id", source_prepare_tiles_id_); | 44 "source_prepare_tiles_id", source_prepare_tiles_id_); |
27 devtools_instrumentation::ScopedImageDecodeTask image_decode_task( | 45 devtools_instrumentation::ScopedImageDecodeTask image_decode_task( |
28 image_.get()); | 46 image_ref_.get()); |
29 controller_->DecodeImage(image_.get()); | 47 controller_->DecodeImage(image_key_, image_); |
30 | |
31 // Release the reference after decoding image to ensure that it is not kept | |
32 // alive unless needed. | |
33 image_.clear(); | |
34 } | 48 } |
35 | 49 |
36 // Overridden from TileTask: | 50 // Overridden from TileTask: |
37 void ScheduleOnOriginThread(TileTaskClient* client) override {} | 51 void ScheduleOnOriginThread(TileTaskClient* client) override {} |
38 void CompleteOnOriginThread(TileTaskClient* client) override { | 52 void CompleteOnOriginThread(TileTaskClient* client) override { |
39 controller_->OnImageDecodeTaskCompleted(layer_id_, image_.get(), | 53 controller_->RemovePendingTask(image_key_); |
40 !HasFinishedRunning()); | |
41 } | 54 } |
42 | 55 |
43 protected: | 56 protected: |
44 ~ImageDecodeTaskImpl() override {} | 57 ~ImageDecodeTaskImpl() override {} |
45 | 58 |
46 private: | 59 private: |
47 ImageDecodeController* controller_; | 60 ImageDecodeController* controller_; |
48 skia::RefPtr<const SkImage> image_; | 61 ImageDecodeController::ImageKey image_key_; |
49 int layer_id_; | 62 DrawImage image_; |
| 63 skia::RefPtr<const SkImage> image_ref_; |
50 uint64_t source_prepare_tiles_id_; | 64 uint64_t source_prepare_tiles_id_; |
51 | 65 |
52 DISALLOW_COPY_AND_ASSIGN(ImageDecodeTaskImpl); | 66 DISALLOW_COPY_AND_ASSIGN(ImageDecodeTaskImpl); |
53 }; | 67 }; |
54 | 68 |
| 69 template <typename Type> |
| 70 typename std::deque<Type>::iterator FindImage( |
| 71 std::deque<Type>* collection, |
| 72 const ImageDecodeControllerKey& key) { |
| 73 return std::find_if(collection->begin(), collection->end(), |
| 74 [key](const Type& image) { return image.first == key; }); |
| 75 } |
| 76 |
| 77 SkSize GetScaleAdjustment(const ImageDecodeControllerKey& key, |
| 78 const SkImage* original_image) { |
| 79 float x_scale = |
| 80 key.target_size().width() / static_cast<float>(original_image->width()); |
| 81 float y_scale = |
| 82 key.target_size().height() / static_cast<float>(original_image->height()); |
| 83 return SkSize::Make(x_scale, y_scale); |
| 84 } |
| 85 |
55 } // namespace | 86 } // namespace |
56 | 87 |
57 ImageDecodeController::ImageDecodeController() {} | 88 ImageDecodeController::ImageDecodeController() |
58 | 89 : is_using_gpu_rasterization_(false), |
59 ImageDecodeController::~ImageDecodeController() {} | 90 locked_images_budget_(kLockedMemoryLimitBytes) {} |
60 | 91 |
61 scoped_refptr<ImageDecodeTask> ImageDecodeController::GetTaskForImage( | 92 ImageDecodeController::~ImageDecodeController() { |
| 93 DCHECK_EQ(0u, decoded_images_ref_counts_.size()); |
| 94 DCHECK_EQ(0u, at_raster_decoded_images_ref_counts_.size()); |
| 95 } |
| 96 |
| 97 bool ImageDecodeController::GetTaskForImageAndRef( |
62 const DrawImage& image, | 98 const DrawImage& image, |
63 int layer_id, | 99 uint64_t prepare_tiles_id, |
64 uint64_t prepare_tiles_id) { | 100 scoped_refptr<ImageDecodeTask>* task) { |
65 uint32_t generation_id = image.image()->uniqueID(); | 101 // If the image already exists or if we're going to create a task for it, then |
66 scoped_refptr<ImageDecodeTask>& decode_task = | 102 // we'll likely need to ref this image (the exception is if we're prerolling |
67 image_decode_tasks_[layer_id][generation_id]; | 103 // the image only). That means the image is or will be in the cache. When the |
68 if (!decode_task) | 104 // ref goes to 0, it will be unpinned but will remain in the cache. If the |
69 decode_task = CreateTaskForImage(image.image(), layer_id, prepare_tiles_id); | 105 // image does not fit into the budget, then we don't ref this image, since it |
70 return decode_task; | 106 // will be decoded at raster time which is when it will be temporarily put in |
71 } | 107 // the cache. |
72 | 108 ImageKey key = ImageKey::FromDrawImage(image); |
73 scoped_refptr<ImageDecodeTask> ImageDecodeController::CreateTaskForImage( | 109 TRACE_EVENT1("cc", "ImageDecodeController::GetTaskForImageAndRef", "key", |
74 const SkImage* image, | 110 key.ToString()); |
75 int layer_id, | 111 // If we're not going to do a scale, we will just create a task to preroll the |
76 uint64_t prepare_tiles_id) { | 112 // image the first time we see it. This doesn't need to account for memory. |
| 113 // TODO(vmpstr): We can also lock the original sized image, in which case it |
| 114 // does require memory bookkeeping. |
| 115 if (!CanHandleImage(key, image)) { |
| 116 base::AutoLock lock(lock_); |
| 117 if (prerolled_images_.count(key.image_id()) == 0) { |
| 118 scoped_refptr<ImageDecodeTask>& existing_task = pending_image_tasks_[key]; |
| 119 if (!existing_task) { |
| 120 existing_task = make_scoped_refptr( |
| 121 new ImageDecodeTaskImpl(this, key, image, prepare_tiles_id)); |
| 122 } |
| 123 *task = existing_task; |
| 124 } else { |
| 125 *task = nullptr; |
| 126 } |
| 127 return false; |
| 128 } |
| 129 |
| 130 base::AutoLock lock(lock_); |
| 131 |
| 132 // If we already have the image in cache, then we can return it. |
| 133 auto decoded_it = FindImage(&decoded_images_, key); |
| 134 bool new_image_fits_in_memory = |
| 135 locked_images_budget_.AvailableMemoryBytes() >= key.target_bytes(); |
| 136 if (decoded_it != decoded_images_.end()) { |
| 137 if (decoded_it->second->is_locked() || |
| 138 (new_image_fits_in_memory && decoded_it->second->Lock())) { |
| 139 RefImage(key); |
| 140 *task = nullptr; |
| 141 SanityCheckState(__LINE__, true); |
| 142 return true; |
| 143 } |
| 144 // If the image fits in memory, then we at least tried to lock it and |
| 145 // failed. This means that it's not valid anymore. |
| 146 if (new_image_fits_in_memory) |
| 147 decoded_images_.erase(decoded_it); |
| 148 } |
| 149 |
| 150 // If the task exists, return it. |
| 151 scoped_refptr<ImageDecodeTask>& existing_task = pending_image_tasks_[key]; |
| 152 if (existing_task) { |
| 153 RefImage(key); |
| 154 *task = existing_task; |
| 155 SanityCheckState(__LINE__, true); |
| 156 return true; |
| 157 } |
| 158 |
| 159 // At this point, we have to create a new image/task, so we need to abort if |
| 160 // it doesn't fit into memory and there are currently no raster tasks that |
| 161 // would have already accounted for memory. The latter part is possible if |
| 162 // there's a running raster task that could not be canceled, and still has a |
| 163 // ref to the image that is now being reffed for the new schedule. |
| 164 if (!new_image_fits_in_memory && (decoded_images_ref_counts_.find(key) == |
| 165 decoded_images_ref_counts_.end())) { |
| 166 *task = nullptr; |
| 167 SanityCheckState(__LINE__, true); |
| 168 return false; |
| 169 } |
| 170 |
| 171 // Actually create the task. RefImage will account for memory on the first |
| 172 // ref. |
| 173 RefImage(key); |
| 174 existing_task = make_scoped_refptr( |
| 175 new ImageDecodeTaskImpl(this, key, image, prepare_tiles_id)); |
| 176 *task = existing_task; |
| 177 SanityCheckState(__LINE__, true); |
| 178 return true; |
| 179 } |
| 180 |
| 181 void ImageDecodeController::RefImage(const ImageKey& key) { |
| 182 TRACE_EVENT1("cc", "ImageDecodeController::RefImage", "key", key.ToString()); |
| 183 lock_.AssertAcquired(); |
| 184 int ref = ++decoded_images_ref_counts_[key]; |
| 185 if (ref == 1) { |
| 186 DCHECK_GE(locked_images_budget_.AvailableMemoryBytes(), key.target_bytes()); |
| 187 locked_images_budget_.AddUsage(key.target_bytes()); |
| 188 } |
| 189 } |
| 190 |
| 191 void ImageDecodeController::UnrefImage(const DrawImage& image) { |
| 192 // When we unref the image, there are several situations we need to consider: |
| 193 // 1. The ref did not reach 0, which means we have to keep the image locked. |
| 194 // 2. The ref reached 0, we should unlock it. |
| 195 // 2a. The image isn't in the locked cache because we didn't get to decode |
| 196 // it yet. |
| 197 // 2b. Unlock the image but keep it in list. |
| 198 const ImageKey& key = ImageKey::FromDrawImage(image); |
| 199 DCHECK(CanHandleImage(key, image)); |
| 200 TRACE_EVENT1("cc", "ImageDecodeController::UnrefImage", "key", |
| 201 key.ToString()); |
| 202 |
| 203 base::AutoLock lock(lock_); |
| 204 auto ref_count_it = decoded_images_ref_counts_.find(key); |
| 205 DCHECK(ref_count_it != decoded_images_ref_counts_.end()); |
| 206 |
| 207 --ref_count_it->second; |
| 208 if (ref_count_it->second == 0) { |
| 209 decoded_images_ref_counts_.erase(ref_count_it); |
| 210 locked_images_budget_.SubtractUsage(key.target_bytes()); |
| 211 |
| 212 auto decoded_image_it = FindImage(&decoded_images_, key); |
| 213 // If we've never decoded the image before ref reached 0, then we wouldn't |
| 214 // have it in our cache. This would happen if we canceled tasks. |
| 215 if (decoded_image_it == decoded_images_.end()) { |
| 216 SanityCheckState(__LINE__, true); |
| 217 return; |
| 218 } |
| 219 DCHECK(decoded_image_it->second->is_locked()); |
| 220 decoded_image_it->second->Unlock(); |
| 221 } |
| 222 SanityCheckState(__LINE__, true); |
| 223 } |
| 224 |
| 225 void ImageDecodeController::DecodeImage(const ImageKey& key, |
| 226 const DrawImage& image) { |
| 227 TRACE_EVENT1("cc", "ImageDecodeController::DecodeImage", "key", |
| 228 key.ToString()); |
| 229 if (!CanHandleImage(key, image)) { |
| 230 image.image()->preroll(); |
| 231 |
| 232 base::AutoLock lock(lock_); |
| 233 prerolled_images_.insert(key.image_id()); |
| 234 // Erase the pending task from the queue, since the task won't be doing |
| 235 // anything useful after this function terminates. Since we don't preroll |
| 236 // images twice, this is actually not necessary but it behaves similar to |
| 237 // the other code path: when this function finishes, the task isn't in the |
| 238 // pending_image_tasks_ list. |
| 239 pending_image_tasks_.erase(key); |
| 240 return; |
| 241 } |
| 242 |
| 243 base::AutoLock lock(lock_); |
| 244 |
| 245 auto image_it = FindImage(&decoded_images_, key); |
| 246 if (image_it != decoded_images_.end()) { |
| 247 if (image_it->second->is_locked() || image_it->second->Lock()) { |
| 248 pending_image_tasks_.erase(key); |
| 249 return; |
| 250 } |
| 251 decoded_images_.erase(image_it); |
| 252 } |
| 253 |
| 254 scoped_refptr<DecodedImage> decoded_image; |
| 255 { |
| 256 base::AutoUnlock unlock(lock_); |
| 257 decoded_image = DecodeImageInternal(key, image.image()); |
| 258 } |
| 259 |
| 260 // Erase the pending task from the queue, since the task won't be doing |
| 261 // anything useful after this function terminates. That is, if this image |
| 262 // needs to be decoded again, we have to create a new task. |
| 263 pending_image_tasks_.erase(key); |
| 264 |
| 265 // We could have finished all of the raster tasks (cancelled) while this image |
| 266 // decode task was running, which means that we now have a locked image but no |
| 267 // ref counts. Unlock it immediately in this case. |
| 268 if (decoded_images_ref_counts_.find(key) == |
| 269 decoded_images_ref_counts_.end()) { |
| 270 decoded_image->Unlock(); |
| 271 } |
| 272 |
| 273 // At this point, it could have been the case that this image was decoded in |
| 274 // place by an already running raster task from a previous schedule. If that's |
| 275 // the case, then it would have already been placed into the cache (possibly |
| 276 // locked). Remove it if that was the case. |
| 277 image_it = FindImage(&decoded_images_, key); |
| 278 if (image_it != decoded_images_.end()) { |
| 279 if (image_it->second->is_locked() || image_it->second->Lock()) { |
| 280 pending_image_tasks_.erase(key); |
| 281 return; |
| 282 } |
| 283 decoded_images_.erase(image_it); |
| 284 } |
| 285 decoded_images_.push_back(AnnotatedDecodedImage(key, decoded_image)); |
| 286 SanityCheckState(__LINE__, true); |
| 287 } |
| 288 |
| 289 scoped_refptr<ImageDecodeController::DecodedImage> |
| 290 ImageDecodeController::DecodeImageInternal(const ImageKey& key, |
| 291 const SkImage* image) { |
| 292 TRACE_EVENT1("cc", "ImageDecodeController::DecodeImageInternal", "key", |
| 293 key.ToString()); |
| 294 |
| 295 // Get the decoded image first (at the original scale). |
| 296 SkImageInfo decoded_info = SkImageInfo::MakeN32Premul( |
| 297 key.src_rect().width(), key.src_rect().height()); |
| 298 scoped_ptr<uint8_t[]> decoded_pixels( |
| 299 new uint8_t[decoded_info.minRowBytes() * decoded_info.height()]); |
| 300 bool result = image->readPixels( |
| 301 decoded_info, decoded_pixels.get(), decoded_info.minRowBytes(), |
| 302 key.src_rect().x(), key.src_rect().y(), SkImage::kAllow_CachingHint); |
| 303 DCHECK(result); |
| 304 |
| 305 SkPixmap decoded_pixmap(decoded_info, decoded_pixels.get(), |
| 306 decoded_info.minRowBytes()); |
| 307 |
| 308 // TODO(vmpstr): This scale is used to compute the target size to begin with, |
| 309 // see if transporting it in the key is reasonable. |
| 310 float x_scale = |
| 311 key.target_size().width() / static_cast<float>(image->width()); |
| 312 float y_scale = |
| 313 key.target_size().height() / static_cast<float>(image->height()); |
| 314 |
| 315 int new_target_width = SkScalarRoundToInt(key.src_rect().width() * x_scale); |
| 316 int new_target_height = SkScalarRoundToInt(key.src_rect().height() * y_scale); |
| 317 |
| 318 // Now scale the pixels into the destination size. |
| 319 SkImageInfo scaled_info = |
| 320 SkImageInfo::MakeN32Premul(new_target_width, new_target_height); |
| 321 scoped_ptr<base::DiscardableMemory> scaled_pixels = |
| 322 base::DiscardableMemoryAllocator::GetInstance() |
| 323 ->AllocateLockedDiscardableMemory(scaled_info.minRowBytes() * |
| 324 scaled_info.height()); |
| 325 SkPixmap scaled_pixmap(scaled_info, scaled_pixels->data(), |
| 326 scaled_info.minRowBytes()); |
| 327 // TODO(vmpstr): Start handling more than just high filter quality. |
| 328 DCHECK_EQ(kHigh_SkFilterQuality, key.filter_quality()); |
| 329 result = decoded_pixmap.scalePixels(scaled_pixmap, kHigh_SkFilterQuality); |
| 330 DCHECK(result); |
77 return make_scoped_refptr( | 331 return make_scoped_refptr( |
78 new ImageDecodeTaskImpl(this, image, layer_id, prepare_tiles_id)); | 332 new DecodedImage(scaled_info, std::move(scaled_pixels), |
79 } | 333 SkSize::Make(-key.src_rect().x(), -key.src_rect().y()))); |
80 | 334 } |
81 void ImageDecodeController::DecodeImage(const SkImage* image) { | 335 |
82 image->preroll(); | 336 DecodedDrawImage ImageDecodeController::GetDecodedImageForDraw( |
83 } | 337 const DrawImage& draw_image) { |
84 | 338 ImageKey key = ImageKey::FromDrawImage(draw_image); |
85 void ImageDecodeController::AddLayerUsedCount(int layer_id) { | 339 TRACE_EVENT1("cc", "ImageDecodeController::GetDecodedImageAndRef", "key", |
86 ++used_layer_counts_[layer_id]; | 340 key.ToString()); |
87 } | 341 if (!CanHandleImage(key, draw_image)) |
88 | 342 return DecodedDrawImage(draw_image.image(), draw_image.filter_quality()); |
89 void ImageDecodeController::SubtractLayerUsedCount(int layer_id) { | 343 |
90 if (--used_layer_counts_[layer_id]) | 344 base::AutoLock lock(lock_); |
| 345 auto decoded_images_it = FindImage(&decoded_images_, key); |
| 346 // If we found the image and it's locked, then return it. If it's not locked, |
| 347 // erase it from the cache since it might be put into the at-raster cache. |
| 348 scoped_refptr<DecodedImage> decoded_image; |
| 349 if (decoded_images_it != decoded_images_.end()) { |
| 350 decoded_image = decoded_images_it->second; |
| 351 if (decoded_image->is_locked()) { |
| 352 RefImage(key); |
| 353 SanityCheckState(__LINE__, true); |
| 354 return DecodedDrawImage( |
| 355 decoded_image->image(), decoded_image->src_rect_offset(), |
| 356 GetScaleAdjustment(key, draw_image.image()), kLow_SkFilterQuality); |
| 357 } else { |
| 358 decoded_images_.erase(decoded_images_it); |
| 359 } |
| 360 } |
| 361 |
| 362 // See if another thread already decoded this image at raster time. If so, we |
| 363 // can just use that result directly. |
| 364 auto at_raster_images_it = FindImage(&at_raster_decoded_images_, key); |
| 365 if (at_raster_images_it != at_raster_decoded_images_.end()) { |
| 366 DCHECK(at_raster_images_it->second->is_locked()); |
| 367 RefAtRasterImage(key); |
| 368 SanityCheckState(__LINE__, true); |
| 369 auto decoded_draw_image = DecodedDrawImage( |
| 370 at_raster_images_it->second->image(), |
| 371 at_raster_images_it->second->src_rect_offset(), |
| 372 GetScaleAdjustment(key, draw_image.image()), kLow_SkFilterQuality); |
| 373 decoded_draw_image.set_at_raster_decode(true); |
| 374 return decoded_draw_image; |
| 375 } |
| 376 |
| 377 // Now we know that we don't have a locked image, and we seem to be the first |
| 378 // thread encountering this image (that might not be true, since other threads |
| 379 // might be decoding it already). This means that we need to decode the image |
| 380 // assuming we can't lock the one we found in the cache. |
| 381 bool check_at_raster_cache = false; |
| 382 if (!decoded_image || !decoded_image->Lock()) { |
| 383 // Note that we have to release the lock, since this lock is also accessed |
| 384 // on the compositor thread. This means holding on to the lock might stall |
| 385 // the compositor thread for the duration of the decode! |
| 386 base::AutoUnlock unlock(lock_); |
| 387 decoded_image = DecodeImageInternal(key, draw_image.image()); |
| 388 check_at_raster_cache = true; |
| 389 } |
| 390 |
| 391 // While we unlocked the lock, it could be the case that another thread |
| 392 // already decoded this already and put it in the at-raster cache. Look it up |
| 393 // first. |
| 394 bool need_to_add_image_to_cache = true; |
| 395 if (check_at_raster_cache) { |
| 396 at_raster_images_it = FindImage(&at_raster_decoded_images_, key); |
| 397 if (at_raster_images_it != at_raster_decoded_images_.end()) { |
| 398 // We have to drop our decode, since the one in the cache is being used by |
| 399 // another thread. |
| 400 decoded_image->Unlock(); |
| 401 decoded_image = at_raster_images_it->second; |
| 402 need_to_add_image_to_cache = false; |
| 403 } |
| 404 } |
| 405 |
| 406 // If we really are the first ones, or if the other thread already unlocked |
| 407 // the image, then put our work into at-raster time cache. |
| 408 if (need_to_add_image_to_cache) { |
| 409 at_raster_decoded_images_.push_back( |
| 410 AnnotatedDecodedImage(key, decoded_image)); |
| 411 } |
| 412 |
| 413 DCHECK(decoded_image); |
| 414 DCHECK(decoded_image->is_locked()); |
| 415 RefAtRasterImage(key); |
| 416 SanityCheckState(__LINE__, true); |
| 417 auto decoded_draw_image = DecodedDrawImage( |
| 418 decoded_image->image(), decoded_image->src_rect_offset(), |
| 419 GetScaleAdjustment(key, draw_image.image()), kLow_SkFilterQuality); |
| 420 decoded_draw_image.set_at_raster_decode(true); |
| 421 return decoded_draw_image; |
| 422 } |
| 423 |
| 424 void ImageDecodeController::DrawWithImageFinished( |
| 425 const DrawImage& image, |
| 426 const DecodedDrawImage& decoded_image) { |
| 427 TRACE_EVENT1("cc", "ImageDecodeController::DrawWithImageFinished", "key", |
| 428 ImageKey::FromDrawImage(image).ToString()); |
| 429 ImageKey key = ImageKey::FromDrawImage(image); |
| 430 if (!CanHandleImage(key, image)) |
91 return; | 431 return; |
92 | 432 |
93 // Clean up decode tasks once a layer is no longer used. | 433 if (decoded_image.is_at_raster_decode()) |
94 used_layer_counts_.erase(layer_id); | 434 UnrefAtRasterImage(key); |
95 image_decode_tasks_.erase(layer_id); | 435 else |
96 } | 436 UnrefImage(image); |
97 | 437 SanityCheckState(__LINE__, false); |
98 void ImageDecodeController::OnImageDecodeTaskCompleted(int layer_id, | 438 } |
99 const SkImage* image, | 439 |
100 bool was_canceled) { | 440 void ImageDecodeController::RefAtRasterImage(const ImageKey& key) { |
101 // If the task has successfully finished, then keep the task until the layer | 441 TRACE_EVENT1("cc", "ImageDecodeController::RefAtRasterImage", "key", |
102 // is no longer in use. This ensures that we only decode a image once. | 442 key.ToString()); |
103 // TODO(vmpstr): Remove this when decode lifetime is controlled by cc. | 443 DCHECK(FindImage(&at_raster_decoded_images_, key) != |
104 if (!was_canceled) | 444 at_raster_decoded_images_.end()); |
| 445 ++at_raster_decoded_images_ref_counts_[key]; |
| 446 } |
| 447 |
| 448 void ImageDecodeController::UnrefAtRasterImage(const ImageKey& key) { |
| 449 TRACE_EVENT1("cc", "ImageDecodeController::UnrefAtRasterImage", "key", |
| 450 key.ToString()); |
| 451 base::AutoLock lock(lock_); |
| 452 |
| 453 auto ref_it = at_raster_decoded_images_ref_counts_.find(key); |
| 454 DCHECK(ref_it != at_raster_decoded_images_ref_counts_.end()); |
| 455 --ref_it->second; |
| 456 if (ref_it->second == 0) { |
| 457 at_raster_decoded_images_ref_counts_.erase(ref_it); |
| 458 auto at_raster_image_it = FindImage(&at_raster_decoded_images_, key); |
| 459 DCHECK(at_raster_image_it != at_raster_decoded_images_.end()); |
| 460 |
| 461 // The ref for our image reached 0 and it's still locked. We need to figure |
| 462 // out what the best thing to do with the image. There are several |
| 463 // situations: |
| 464 // 1. The image is not in the main cache and... |
| 465 // 1a. ... its ref count is 0: unlock our image and put it in the main |
| 466 // cache. |
| 467 // 1b. ... ref count is not 0: keep the image locked and put it in the |
| 468 // main cache. |
| 469 // 2. The image is in the main cache... |
| 470 // 2a. ... and is locked: unlock our image and discard it |
| 471 // 2b. ... and is unlocked and... |
| 472 // 2b1. ... its ref count is 0: unlock our image and replace the |
| 473 // existing one with ours. |
| 474 // 2b2. ... its ref count is not 0: this shouldn't be possible. |
| 475 auto image_it = FindImage(&decoded_images_, key); |
| 476 if (image_it == decoded_images_.end()) { |
| 477 if (decoded_images_ref_counts_.find(key) == |
| 478 decoded_images_ref_counts_.end()) { |
| 479 at_raster_image_it->second->Unlock(); |
| 480 } |
| 481 decoded_images_.push_back(*at_raster_image_it); |
| 482 } else if (image_it->second->is_locked()) { |
| 483 at_raster_image_it->second->Unlock(); |
| 484 } else { |
| 485 DCHECK(decoded_images_ref_counts_.find(key) == |
| 486 decoded_images_ref_counts_.end()); |
| 487 at_raster_image_it->second->Unlock(); |
| 488 decoded_images_.erase(image_it); |
| 489 decoded_images_.push_back(*at_raster_image_it); |
| 490 } |
| 491 at_raster_decoded_images_.erase(at_raster_image_it); |
| 492 } |
| 493 } |
| 494 |
| 495 bool ImageDecodeController::CanHandleImage(const ImageKey& key, |
| 496 const DrawImage& image) { |
| 497 // TODO(vmpstr): Handle GPU rasterization. |
| 498 if (is_using_gpu_rasterization_) |
| 499 return false; |
| 500 if (!CanHandleFilterQuality(key.filter_quality())) |
| 501 return false; |
| 502 return true; |
| 503 } |
| 504 |
| 505 bool ImageDecodeController::CanHandleFilterQuality( |
| 506 SkFilterQuality filter_quality) { |
| 507 // We don't need to handle low quality filters. |
| 508 if (filter_quality == kLow_SkFilterQuality || |
| 509 filter_quality == kNone_SkFilterQuality) { |
| 510 return false; |
| 511 } |
| 512 |
| 513 // TODO(vmpstr): We need to start caching mipmaps for medium quality and |
| 514 // caching the interpolated values from those. For now, we don't have this. |
| 515 if (filter_quality == kMedium_SkFilterQuality) |
| 516 return false; |
| 517 DCHECK(filter_quality == kHigh_SkFilterQuality); |
| 518 return true; |
| 519 } |
| 520 |
| 521 void ImageDecodeController::ReduceCacheUsage() { |
| 522 TRACE_EVENT0("cc", "ImageDecodeController::ReduceCacheUsage"); |
| 523 base::AutoLock lock(lock_); |
| 524 size_t num_to_remove = (decoded_images_.size() > kMaxItemsInCache) |
| 525 ? (decoded_images_.size() - kMaxItemsInCache) |
| 526 : 0; |
| 527 for (auto it = decoded_images_.begin(); |
| 528 num_to_remove != 0 && it != decoded_images_.end();) { |
| 529 if (it->second->is_locked()) { |
| 530 ++it; |
| 531 continue; |
| 532 } |
| 533 |
| 534 it = decoded_images_.erase(it); |
| 535 --num_to_remove; |
| 536 } |
| 537 } |
| 538 |
| 539 void ImageDecodeController::RemovePendingTask(const ImageKey& key) { |
| 540 base::AutoLock lock(lock_); |
| 541 pending_image_tasks_.erase(key); |
| 542 } |
| 543 |
| 544 void ImageDecodeController::SetIsUsingGpuRasterization( |
| 545 bool is_using_gpu_rasterization) { |
| 546 if (is_using_gpu_rasterization_ == is_using_gpu_rasterization) |
105 return; | 547 return; |
106 | 548 is_using_gpu_rasterization_ = is_using_gpu_rasterization; |
107 // Otherwise, we have to clean up the task so that a new one can be created if | 549 |
108 // we need to decode the image again. | 550 base::AutoLock lock(lock_); |
109 LayerImageTaskMap::iterator layer_it = image_decode_tasks_.find(layer_id); | 551 |
110 if (layer_it == image_decode_tasks_.end()) | 552 DCHECK_EQ(0u, decoded_images_ref_counts_.size()); |
| 553 DCHECK_EQ(0u, at_raster_decoded_images_ref_counts_.size()); |
| 554 DCHECK(std::find_if(decoded_images_.begin(), decoded_images_.end(), |
| 555 [](const AnnotatedDecodedImage& image) { |
| 556 return image.second->is_locked(); |
| 557 }) == decoded_images_.end()); |
| 558 DCHECK(std::find_if(at_raster_decoded_images_.begin(), |
| 559 at_raster_decoded_images_.end(), |
| 560 [](const AnnotatedDecodedImage& image) { |
| 561 return image.second->is_locked(); |
| 562 }) == at_raster_decoded_images_.end()); |
| 563 decoded_images_.clear(); |
| 564 at_raster_decoded_images_.clear(); |
| 565 } |
| 566 |
| 567 void ImageDecodeController::SanityCheckState(int line, bool lock_acquired) { |
| 568 #if DCHECK_IS_ON() |
| 569 if (!lock_acquired) { |
| 570 base::AutoLock lock(lock_); |
| 571 SanityCheckState(line, true); |
111 return; | 572 return; |
112 | 573 } |
113 ImageTaskMap& image_tasks = layer_it->second; | 574 |
114 ImageTaskMap::iterator task_it = image_tasks.find(image->uniqueID()); | 575 MemoryBudget budget(kLockedMemoryLimitBytes); |
115 if (task_it == image_tasks.end()) | 576 for (const auto& annotated_image : decoded_images_) { |
116 return; | 577 auto ref_it = decoded_images_ref_counts_.find(annotated_image.first); |
117 image_tasks.erase(task_it); | 578 if (annotated_image.second->is_locked()) { |
| 579 budget.AddUsage(annotated_image.first.target_bytes()); |
| 580 DCHECK(ref_it != decoded_images_ref_counts_.end()) << line; |
| 581 } else { |
| 582 DCHECK(ref_it == decoded_images_ref_counts_.end() || |
| 583 pending_image_tasks_.find(annotated_image.first) != |
| 584 pending_image_tasks_.end()) |
| 585 << line; |
| 586 } |
| 587 } |
| 588 DCHECK_GE(budget.AvailableMemoryBytes(), |
| 589 locked_images_budget_.AvailableMemoryBytes()) |
| 590 << line; |
| 591 #endif // DCHECK_IS_ON() |
| 592 } |
| 593 |
| 594 // ImageDecodeControllerKey |
| 595 ImageDecodeControllerKey ImageDecodeControllerKey::FromDrawImage( |
| 596 const DrawImage& image) { |
| 597 const SkSize& scale = image.scale(); |
| 598 gfx::Size target_size( |
| 599 SkScalarRoundToInt(std::abs(image.image()->width() * scale.width())), |
| 600 SkScalarRoundToInt(std::abs(image.image()->height() * scale.height()))); |
| 601 |
| 602 // Start with the quality that was requested, but drop down immediately to low |
| 603 // if we're not actually going to do any scale. |
| 604 SkFilterQuality quality = image.filter_quality(); |
| 605 if (target_size.width() == image.image()->width() && |
| 606 target_size.height() == image.image()->height()) { |
| 607 quality = std::min(quality, kLow_SkFilterQuality); |
| 608 } |
| 609 |
| 610 // Drop from high to medium if the image has perspective applied, the matrix |
| 611 // we applied wasn't decomposable, or if the scaled image will be too large. |
| 612 if (quality == kHigh_SkFilterQuality) { |
| 613 if (image.matrix_has_perspective() || !image.matrix_is_decomposable()) { |
| 614 quality = kMedium_SkFilterQuality; |
| 615 } else { |
| 616 base::CheckedNumeric<size_t> size = 4u; |
| 617 size *= target_size.width(); |
| 618 size *= target_size.height(); |
| 619 if (size.ValueOrDefault(std::numeric_limits<size_t>::max()) > |
| 620 kMaxHighQualityImageSizeBytes) { |
| 621 quality = kMedium_SkFilterQuality; |
| 622 } |
| 623 } |
| 624 } |
| 625 |
| 626 // Drop from medium to low if the matrix we applied wasn't decomposable or if |
| 627 // we're enlarging the image in both dimensions. |
| 628 if (quality == kMedium_SkFilterQuality) { |
| 629 if (!image.matrix_is_decomposable() || |
| 630 (scale.width() >= 1.f && scale.height() >= 1.f)) { |
| 631 quality = kLow_SkFilterQuality; |
| 632 } |
| 633 } |
| 634 |
| 635 return ImageDecodeControllerKey(image.image()->uniqueID(), |
| 636 gfx::SkIRectToRect(image.src_rect()), |
| 637 target_size, quality); |
| 638 } |
| 639 |
| 640 ImageDecodeControllerKey::ImageDecodeControllerKey( |
| 641 uint32_t image_id, |
| 642 const gfx::Rect& src_rect, |
| 643 const gfx::Size& size, |
| 644 SkFilterQuality filter_quality) |
| 645 : image_id_(image_id), |
| 646 src_rect_(src_rect), |
| 647 size_(size), |
| 648 filter_quality_(filter_quality) {} |
| 649 |
| 650 std::string ImageDecodeControllerKey::ToString() const { |
| 651 std::ostringstream str; |
| 652 str << "id[" << image_id_ << "] src_rect[" << src_rect_.x() << "," |
| 653 << src_rect_.y() << " " << src_rect_.width() << "x" << src_rect_.height() |
| 654 << "] size[" << size_.width() << "x" << size_.height() |
| 655 << "] filter_quality[" << filter_quality_ << "]"; |
| 656 return str.str(); |
| 657 } |
| 658 |
| 659 // DecodedImage |
| 660 ImageDecodeController::DecodedImage::DecodedImage( |
| 661 const SkImageInfo& info, |
| 662 scoped_ptr<base::DiscardableMemory> memory, |
| 663 const SkSize& src_rect_offset) |
| 664 : locked_(true), |
| 665 image_info_(info), |
| 666 memory_(memory.Pass()), |
| 667 src_rect_offset_(src_rect_offset) { |
| 668 image_ = skia::AdoptRef(SkImage::NewFromRaster( |
| 669 image_info_, memory_->data(), image_info_.minRowBytes(), |
| 670 [](const void* pixels, void* context) {}, nullptr)); |
| 671 } |
| 672 |
| 673 ImageDecodeController::DecodedImage::~DecodedImage() {} |
| 674 |
| 675 bool ImageDecodeController::DecodedImage::Lock() { |
| 676 DCHECK(!locked_); |
| 677 bool success = memory_->Lock(); |
| 678 if (!success) |
| 679 return false; |
| 680 locked_ = true; |
| 681 return true; |
| 682 } |
| 683 |
| 684 void ImageDecodeController::DecodedImage::Unlock() { |
| 685 DCHECK(locked_); |
| 686 memory_->Unlock(); |
| 687 locked_ = false; |
| 688 } |
| 689 |
| 690 // MemoryBudget |
| 691 ImageDecodeController::MemoryBudget::MemoryBudget(size_t limit_bytes) |
| 692 : limit_bytes_(limit_bytes), current_usage_bytes_(0u) {} |
| 693 |
| 694 size_t ImageDecodeController::MemoryBudget::AvailableMemoryBytes() const { |
| 695 size_t usage = GetCurrentUsageSafe(); |
| 696 return usage >= limit_bytes_ ? 0u : (limit_bytes_ - usage); |
| 697 } |
| 698 |
| 699 void ImageDecodeController::MemoryBudget::AddUsage(size_t usage) { |
| 700 current_usage_bytes_ += usage; |
| 701 } |
| 702 |
| 703 void ImageDecodeController::MemoryBudget::SubtractUsage(size_t usage) { |
| 704 DCHECK_GE(current_usage_bytes_.ValueOrDefault(0u), usage); |
| 705 current_usage_bytes_ -= usage; |
| 706 } |
| 707 |
| 708 void ImageDecodeController::MemoryBudget::ResetUsage() { |
| 709 current_usage_bytes_ = 0; |
| 710 } |
| 711 |
| 712 size_t ImageDecodeController::MemoryBudget::GetCurrentUsageSafe() const { |
| 713 return current_usage_bytes_.ValueOrDie(); |
118 } | 714 } |
119 | 715 |
120 } // namespace cc | 716 } // namespace cc |
OLD | NEW |