OLD | NEW |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "cc/tiles/image_decode_controller.h" | 5 #include "cc/tiles/image_decode_controller.h" |
6 | 6 |
7 #include <stdint.h> | 7 #include <stdint.h> |
8 | 8 |
9 #include "base/macros.h" | 9 #include "base/macros.h" |
| 10 #include "base/memory/discardable_memory.h" |
10 #include "cc/debug/devtools_instrumentation.h" | 11 #include "cc/debug/devtools_instrumentation.h" |
| 12 #include "third_party/skia/include/core/SkCanvas.h" |
| 13 #include "third_party/skia/include/core/SkImage.h" |
| 14 #include "ui/gfx/skia_util.h" |
11 | 15 |
12 namespace cc { | 16 namespace cc { |
13 namespace { | 17 namespace { |
14 | 18 |
| 19 // The amount of memory we can lock ahead of time (128MB). This limit is only |
| 20 // used to inform the caller of the amount of space available in the cache. The |
| 21 // caller can still request tasks which can cause this limit to be breached. |
| 22 const size_t kLockedMemoryLimitBytes = 128 * 1024 * 1024; |
| 23 |
| 24 // The largest single high quality image to try and process. Images above this |
| 25 // size will drop down to medium quality. |
| 26 const size_t kMaxHighQualityImageSizeBytes = 64 * 1024 * 1024; |
| 27 |
| 28 // The number of entries to keep around in the cache. This limit can be breached |
| 29 // if more items are locked. That is, locked items ignore this limit. |
| 30 const size_t kMaxItemsInCache = 100; |
| 31 |
15 class ImageDecodeTaskImpl : public ImageDecodeTask { | 32 class ImageDecodeTaskImpl : public ImageDecodeTask { |
16 public: | 33 public: |
17 ImageDecodeTaskImpl(ImageDecodeController* controller, | 34 ImageDecodeTaskImpl(ImageDecodeController* controller, |
18 const SkImage* image, | 35 const ImageDecodeController::ImageKey& image_key, |
19 int layer_id, | 36 const DrawImage& image, |
20 uint64_t source_prepare_tiles_id) | 37 uint64_t source_prepare_tiles_id) |
21 : controller_(controller), | 38 : controller_(controller), |
22 image_(skia::SharePtr(image)), | 39 image_key_(image_key), |
23 layer_id_(layer_id), | 40 image_(image), |
| 41 image_ref_(skia::SharePtr(image.image())), |
24 source_prepare_tiles_id_(source_prepare_tiles_id) {} | 42 source_prepare_tiles_id_(source_prepare_tiles_id) {} |
25 | 43 |
26 // Overridden from Task: | 44 // Overridden from Task: |
27 void RunOnWorkerThread() override { | 45 void RunOnWorkerThread() override { |
28 TRACE_EVENT1("cc", "ImageDecodeTaskImpl::RunOnWorkerThread", | 46 TRACE_EVENT1("cc", "ImageDecodeTaskImpl::RunOnWorkerThread", |
29 "source_prepare_tiles_id", source_prepare_tiles_id_); | 47 "source_prepare_tiles_id", source_prepare_tiles_id_); |
30 devtools_instrumentation::ScopedImageDecodeTask image_decode_task( | 48 devtools_instrumentation::ScopedImageDecodeTask image_decode_task( |
31 image_.get()); | 49 image_ref_.get()); |
32 controller_->DecodeImage(image_.get()); | 50 controller_->DecodeImage(image_key_, image_); |
33 | |
34 // Release the reference after decoding image to ensure that it is not kept | |
35 // alive unless needed. | |
36 image_.clear(); | |
37 } | 51 } |
38 | 52 |
39 // Overridden from TileTask: | 53 // Overridden from TileTask: |
40 void ScheduleOnOriginThread(TileTaskClient* client) override {} | 54 void ScheduleOnOriginThread(TileTaskClient* client) override {} |
41 void CompleteOnOriginThread(TileTaskClient* client) override { | 55 void CompleteOnOriginThread(TileTaskClient* client) override { |
42 controller_->OnImageDecodeTaskCompleted(layer_id_, image_.get(), | 56 controller_->RemovePendingTask(image_key_); |
43 !HasFinishedRunning()); | |
44 } | 57 } |
45 | 58 |
46 protected: | 59 protected: |
47 ~ImageDecodeTaskImpl() override {} | 60 ~ImageDecodeTaskImpl() override {} |
48 | 61 |
49 private: | 62 private: |
50 ImageDecodeController* controller_; | 63 ImageDecodeController* controller_; |
51 skia::RefPtr<const SkImage> image_; | 64 ImageDecodeController::ImageKey image_key_; |
52 int layer_id_; | 65 DrawImage image_; |
| 66 skia::RefPtr<const SkImage> image_ref_; |
53 uint64_t source_prepare_tiles_id_; | 67 uint64_t source_prepare_tiles_id_; |
54 | 68 |
55 DISALLOW_COPY_AND_ASSIGN(ImageDecodeTaskImpl); | 69 DISALLOW_COPY_AND_ASSIGN(ImageDecodeTaskImpl); |
56 }; | 70 }; |
57 | 71 |
| 72 template <typename Type> |
| 73 typename std::deque<Type>::iterator FindImage( |
| 74 std::deque<Type>* collection, |
| 75 const ImageDecodeControllerKey& key) { |
| 76 return std::find_if(collection->begin(), collection->end(), |
| 77 [key](const Type& image) { return image.first == key; }); |
| 78 } |
| 79 |
| 80 SkSize GetScaleAdjustment(const ImageDecodeControllerKey& key) { |
| 81 float x_scale = |
| 82 key.target_size().width() / static_cast<float>(key.src_rect().width()); |
| 83 float y_scale = |
| 84 key.target_size().height() / static_cast<float>(key.src_rect().height()); |
| 85 return SkSize::Make(x_scale, y_scale); |
| 86 } |
| 87 |
58 } // namespace | 88 } // namespace |
59 | 89 |
60 ImageDecodeController::ImageDecodeController() {} | 90 ImageDecodeController::ImageDecodeController() |
61 | 91 : is_using_gpu_rasterization_(false), |
62 ImageDecodeController::~ImageDecodeController() {} | 92 locked_images_budget_(kLockedMemoryLimitBytes) {} |
63 | 93 |
64 scoped_refptr<ImageDecodeTask> ImageDecodeController::GetTaskForImage( | 94 ImageDecodeController::~ImageDecodeController() { |
| 95 DCHECK_EQ(0u, decoded_images_ref_counts_.size()); |
| 96 DCHECK_EQ(0u, at_raster_decoded_images_ref_counts_.size()); |
| 97 } |
| 98 |
| 99 bool ImageDecodeController::GetTaskForImageAndRef( |
65 const DrawImage& image, | 100 const DrawImage& image, |
66 int layer_id, | 101 uint64_t prepare_tiles_id, |
67 uint64_t prepare_tiles_id) { | 102 scoped_refptr<ImageDecodeTask>* task) { |
68 uint32_t generation_id = image.image()->uniqueID(); | 103 // If the image already exists or if we're going to create a task for it, then |
69 scoped_refptr<ImageDecodeTask>& decode_task = | 104 // we'll likely need to ref this image (the exception is if we're prerolling |
70 image_decode_tasks_[layer_id][generation_id]; | 105 // the image only). That means the image is or will be in the cache. When the |
71 if (!decode_task) | 106 // ref goes to 0, it will be unpinned but will remain in the cache. If the |
72 decode_task = CreateTaskForImage(image.image(), layer_id, prepare_tiles_id); | 107 // image does not fit into the budget, then we don't ref this image, since it |
73 return decode_task; | 108 // will be decoded at raster time which is when it will be temporarily put in |
74 } | 109 // the cache. |
75 | 110 ImageKey key = ImageKey::FromDrawImage(image); |
76 scoped_refptr<ImageDecodeTask> ImageDecodeController::CreateTaskForImage( | 111 TRACE_EVENT1("cc", "ImageDecodeController::GetTaskForImageAndRef", "key", |
77 const SkImage* image, | 112 key.ToString()); |
78 int layer_id, | 113 // If we're not going to do a scale, we will just create a task to preroll the |
79 uint64_t prepare_tiles_id) { | 114 // image the first time we see it. This doesn't need to account for memory. |
| 115 // TODO(vmpstr): We can also lock the original sized image, in which case it |
| 116 // does require memory bookkeeping. |
| 117 if (!CanHandleImage(key, image)) { |
| 118 base::AutoLock lock(lock_); |
| 119 if (prerolled_images_.count(key.image_id()) == 0) { |
| 120 scoped_refptr<ImageDecodeTask>& existing_task = pending_image_tasks_[key]; |
| 121 if (!existing_task) { |
| 122 existing_task = make_scoped_refptr( |
| 123 new ImageDecodeTaskImpl(this, key, image, prepare_tiles_id)); |
| 124 } |
| 125 *task = existing_task; |
| 126 } else { |
| 127 *task = nullptr; |
| 128 } |
| 129 return false; |
| 130 } |
| 131 |
| 132 base::AutoLock lock(lock_); |
| 133 |
| 134 // If we already have the image in cache, then we can return it. |
| 135 auto decoded_it = FindImage(&decoded_images_, key); |
| 136 bool new_image_fits_in_memory = |
| 137 locked_images_budget_.AvailableMemoryBytes() >= key.target_bytes(); |
| 138 if (decoded_it != decoded_images_.end()) { |
| 139 if (decoded_it->second->is_locked() || |
| 140 (new_image_fits_in_memory && decoded_it->second->Lock())) { |
| 141 RefImage(key); |
| 142 *task = nullptr; |
| 143 SanityCheckState(__LINE__, true); |
| 144 return true; |
| 145 } |
| 146 // If the image fits in memory, then we at least tried to lock it and |
| 147 // failed. This means that it's not valid anymore. |
| 148 if (new_image_fits_in_memory) |
| 149 decoded_images_.erase(decoded_it); |
| 150 } |
| 151 |
| 152 // If the task exists, return it. |
| 153 scoped_refptr<ImageDecodeTask>& existing_task = pending_image_tasks_[key]; |
| 154 if (existing_task) { |
| 155 RefImage(key); |
| 156 *task = existing_task; |
| 157 SanityCheckState(__LINE__, true); |
| 158 return true; |
| 159 } |
| 160 |
| 161 // At this point, we have to create a new image/task, so we need to abort if |
| 162 // it doesn't fit into memory and there are currently no raster tasks that |
| 163 // would have already accounted for memory. The latter part is possible if |
| 164 // there's a running raster task that could not be canceled, and still has a |
| 165 // ref to the image that is now being reffed for the new schedule. |
| 166 if (!new_image_fits_in_memory && (decoded_images_ref_counts_.find(key) == |
| 167 decoded_images_ref_counts_.end())) { |
| 168 *task = nullptr; |
| 169 SanityCheckState(__LINE__, true); |
| 170 return false; |
| 171 } |
| 172 |
| 173 // Actually create the task. RefImage will account for memory on the first |
| 174 // ref. |
| 175 RefImage(key); |
| 176 existing_task = make_scoped_refptr( |
| 177 new ImageDecodeTaskImpl(this, key, image, prepare_tiles_id)); |
| 178 *task = existing_task; |
| 179 SanityCheckState(__LINE__, true); |
| 180 return true; |
| 181 } |
| 182 |
| 183 void ImageDecodeController::RefImage(const ImageKey& key) { |
| 184 TRACE_EVENT1("cc", "ImageDecodeController::RefImage", "key", key.ToString()); |
| 185 lock_.AssertAcquired(); |
| 186 int ref = ++decoded_images_ref_counts_[key]; |
| 187 if (ref == 1) { |
| 188 DCHECK_GE(locked_images_budget_.AvailableMemoryBytes(), key.target_bytes()); |
| 189 locked_images_budget_.AddUsage(key.target_bytes()); |
| 190 } |
| 191 } |
| 192 |
| 193 void ImageDecodeController::UnrefImage(const DrawImage& image) { |
| 194 // When we unref the image, there are several situations we need to consider: |
| 195 // 1. The ref did not reach 0, which means we have to keep the image locked. |
| 196 // 2. The ref reached 0, we should unlock it. |
| 197 // 2a. The image isn't in the locked cache because we didn't get to decode |
| 198 // it yet. |
| 199 // 2b. Unlock the image but keep it in list. |
| 200 const ImageKey& key = ImageKey::FromDrawImage(image); |
| 201 DCHECK(CanHandleImage(key, image)); |
| 202 TRACE_EVENT1("cc", "ImageDecodeController::UnrefImage", "key", |
| 203 key.ToString()); |
| 204 |
| 205 base::AutoLock lock(lock_); |
| 206 auto ref_count_it = decoded_images_ref_counts_.find(key); |
| 207 DCHECK(ref_count_it != decoded_images_ref_counts_.end()); |
| 208 |
| 209 --ref_count_it->second; |
| 210 if (ref_count_it->second == 0) { |
| 211 decoded_images_ref_counts_.erase(ref_count_it); |
| 212 locked_images_budget_.SubtractUsage(key.target_bytes()); |
| 213 |
| 214 auto decoded_image_it = FindImage(&decoded_images_, key); |
| 215 // If we've never decoded the image before ref reached 0, then we wouldn't |
| 216 // have it in our cache. This would happen if we canceled tasks. |
| 217 if (decoded_image_it == decoded_images_.end()) { |
| 218 SanityCheckState(__LINE__, true); |
| 219 return; |
| 220 } |
| 221 DCHECK(decoded_image_it->second->is_locked()); |
| 222 decoded_image_it->second->Unlock(); |
| 223 } |
| 224 SanityCheckState(__LINE__, true); |
| 225 } |
| 226 |
| 227 void ImageDecodeController::DecodeImage(const ImageKey& key, |
| 228 const DrawImage& image) { |
| 229 TRACE_EVENT1("cc", "ImageDecodeController::DecodeImage", "key", |
| 230 key.ToString()); |
| 231 if (!CanHandleImage(key, image)) { |
| 232 image.image()->preroll(); |
| 233 |
| 234 base::AutoLock lock(lock_); |
| 235 prerolled_images_.insert(key.image_id()); |
| 236 // Erase the pending task from the queue, since the task won't be doing |
| 237 // anything useful after this function terminates. Since we don't preroll |
| 238 // images twice, this is actually not necessary but it behaves similar to |
| 239 // the other code path: when this function finishes, the task isn't in the |
| 240 // pending_image_tasks_ list. |
| 241 pending_image_tasks_.erase(key); |
| 242 return; |
| 243 } |
| 244 |
| 245 base::AutoLock lock(lock_); |
| 246 |
| 247 auto image_it = FindImage(&decoded_images_, key); |
| 248 if (image_it != decoded_images_.end()) { |
| 249 if (image_it->second->is_locked() || image_it->second->Lock()) { |
| 250 pending_image_tasks_.erase(key); |
| 251 return; |
| 252 } |
| 253 decoded_images_.erase(image_it); |
| 254 } |
| 255 |
| 256 scoped_refptr<DecodedImage> decoded_image; |
| 257 { |
| 258 base::AutoUnlock unlock(lock_); |
| 259 decoded_image = DecodeImageInternal(key, image.image()); |
| 260 } |
| 261 |
| 262 // Erase the pending task from the queue, since the task won't be doing |
| 263 // anything useful after this function terminates. That is, if this image |
| 264 // needs to be decoded again, we have to create a new task. |
| 265 pending_image_tasks_.erase(key); |
| 266 |
| 267 // We could have finished all of the raster tasks (cancelled) while this image |
| 268 // decode task was running, which means that we now have a locked image but no |
| 269 // ref counts. Unlock it immediately in this case. |
| 270 if (decoded_images_ref_counts_.find(key) == |
| 271 decoded_images_ref_counts_.end()) { |
| 272 decoded_image->Unlock(); |
| 273 } |
| 274 |
| 275 // At this point, it could have been the case that this image was decoded in |
| 276 // place by an already running raster task from a previous schedule. If that's |
| 277 // the case, then it would have already been placed into the cache (possibly |
| 278 // locked). Remove it if that was the case. |
| 279 image_it = FindImage(&decoded_images_, key); |
| 280 if (image_it != decoded_images_.end()) { |
| 281 if (image_it->second->is_locked() || image_it->second->Lock()) { |
| 282 pending_image_tasks_.erase(key); |
| 283 return; |
| 284 } |
| 285 decoded_images_.erase(image_it); |
| 286 } |
| 287 decoded_images_.push_back(AnnotatedDecodedImage(key, decoded_image)); |
| 288 SanityCheckState(__LINE__, true); |
| 289 } |
| 290 |
| 291 scoped_refptr<ImageDecodeController::DecodedImage> |
| 292 ImageDecodeController::DecodeImageInternal(const ImageKey& key, |
| 293 const SkImage* image) { |
| 294 TRACE_EVENT1("cc", "ImageDecodeController::DecodeImageInternal", "key", |
| 295 key.ToString()); |
| 296 |
| 297 // Get the decoded image first (at the original scale). |
| 298 SkImageInfo decoded_info = SkImageInfo::MakeN32Premul( |
| 299 key.src_rect().width(), key.src_rect().height()); |
| 300 scoped_ptr<uint8_t[]> decoded_pixels; |
| 301 { |
| 302 TRACE_EVENT0( |
| 303 "cc", |
| 304 "ImageDecodeController::DecodeImageInternal - allocate decoded pixels"); |
| 305 decoded_pixels.reset( |
| 306 new uint8_t[decoded_info.minRowBytes() * decoded_info.height()]); |
| 307 } |
| 308 { |
| 309 TRACE_EVENT0("cc", |
| 310 "ImageDecodeController::DecodeImageInternal - read pixels"); |
| 311 bool result = image->readPixels( |
| 312 decoded_info, decoded_pixels.get(), decoded_info.minRowBytes(), |
| 313 key.src_rect().x(), key.src_rect().y(), SkImage::kAllow_CachingHint); |
| 314 DCHECK(result); |
| 315 } |
| 316 |
| 317 SkPixmap decoded_pixmap(decoded_info, decoded_pixels.get(), |
| 318 decoded_info.minRowBytes()); |
| 319 |
| 320 // Now scale the pixels into the destination size. |
| 321 SkImageInfo scaled_info = SkImageInfo::MakeN32Premul( |
| 322 key.target_size().width(), key.target_size().height()); |
| 323 scoped_ptr<base::DiscardableMemory> scaled_pixels; |
| 324 { |
| 325 TRACE_EVENT0( |
| 326 "cc", |
| 327 "ImageDecodeController::DecodeImageInternal - allocate scaled pixels"); |
| 328 scaled_pixels = base::DiscardableMemoryAllocator::GetInstance() |
| 329 ->AllocateLockedDiscardableMemory( |
| 330 scaled_info.minRowBytes() * scaled_info.height()); |
| 331 } |
| 332 SkPixmap scaled_pixmap(scaled_info, scaled_pixels->data(), |
| 333 scaled_info.minRowBytes()); |
| 334 // TODO(vmpstr): Start handling more than just high filter quality. |
| 335 DCHECK_EQ(kHigh_SkFilterQuality, key.filter_quality()); |
| 336 { |
| 337 TRACE_EVENT0("cc", |
| 338 "ImageDecodeController::DecodeImageInternal - scale pixels"); |
| 339 bool result = |
| 340 decoded_pixmap.scalePixels(scaled_pixmap, kHigh_SkFilterQuality); |
| 341 DCHECK(result); |
| 342 } |
80 return make_scoped_refptr( | 343 return make_scoped_refptr( |
81 new ImageDecodeTaskImpl(this, image, layer_id, prepare_tiles_id)); | 344 new DecodedImage(scaled_info, std::move(scaled_pixels), |
82 } | 345 SkSize::Make(-key.src_rect().x(), -key.src_rect().y()))); |
83 | 346 } |
84 void ImageDecodeController::DecodeImage(const SkImage* image) { | 347 |
85 image->preroll(); | 348 DecodedDrawImage ImageDecodeController::GetDecodedImageForDraw( |
86 } | 349 const DrawImage& draw_image) { |
87 | 350 ImageKey key = ImageKey::FromDrawImage(draw_image); |
88 void ImageDecodeController::AddLayerUsedCount(int layer_id) { | 351 TRACE_EVENT1("cc", "ImageDecodeController::GetDecodedImageAndRef", "key", |
89 ++used_layer_counts_[layer_id]; | 352 key.ToString()); |
90 } | 353 if (!CanHandleImage(key, draw_image)) |
91 | 354 return DecodedDrawImage(draw_image.image(), draw_image.filter_quality()); |
92 void ImageDecodeController::SubtractLayerUsedCount(int layer_id) { | 355 |
93 if (--used_layer_counts_[layer_id]) | 356 base::AutoLock lock(lock_); |
| 357 auto decoded_images_it = FindImage(&decoded_images_, key); |
| 358 // If we found the image and it's locked, then return it. If it's not locked, |
| 359 // erase it from the cache since it might be put into the at-raster cache. |
| 360 scoped_refptr<DecodedImage> decoded_image; |
| 361 if (decoded_images_it != decoded_images_.end()) { |
| 362 decoded_image = decoded_images_it->second; |
| 363 if (decoded_image->is_locked()) { |
| 364 RefImage(key); |
| 365 SanityCheckState(__LINE__, true); |
| 366 return DecodedDrawImage(decoded_image->image(), |
| 367 decoded_image->src_rect_offset(), |
| 368 GetScaleAdjustment(key), kLow_SkFilterQuality); |
| 369 } else { |
| 370 decoded_images_.erase(decoded_images_it); |
| 371 } |
| 372 } |
| 373 |
| 374 // See if another thread already decoded this image at raster time. If so, we |
| 375 // can just use that result directly. |
| 376 auto at_raster_images_it = FindImage(&at_raster_decoded_images_, key); |
| 377 if (at_raster_images_it != at_raster_decoded_images_.end()) { |
| 378 DCHECK(at_raster_images_it->second->is_locked()); |
| 379 RefAtRasterImage(key); |
| 380 SanityCheckState(__LINE__, true); |
| 381 auto decoded_draw_image = |
| 382 DecodedDrawImage(at_raster_images_it->second->image(), |
| 383 at_raster_images_it->second->src_rect_offset(), |
| 384 GetScaleAdjustment(key), kLow_SkFilterQuality); |
| 385 decoded_draw_image.set_at_raster_decode(true); |
| 386 return decoded_draw_image; |
| 387 } |
| 388 |
| 389 // Now we know that we don't have a locked image, and we seem to be the first |
| 390 // thread encountering this image (that might not be true, since other threads |
| 391 // might be decoding it already). This means that we need to decode the image |
| 392 // assuming we can't lock the one we found in the cache. |
| 393 bool check_at_raster_cache = false; |
| 394 if (!decoded_image || !decoded_image->Lock()) { |
| 395 // Note that we have to release the lock, since this lock is also accessed |
| 396 // on the compositor thread. This means holding on to the lock might stall |
| 397 // the compositor thread for the duration of the decode! |
| 398 base::AutoUnlock unlock(lock_); |
| 399 decoded_image = DecodeImageInternal(key, draw_image.image()); |
| 400 check_at_raster_cache = true; |
| 401 } |
| 402 |
| 403 // While we unlocked the lock, it could be the case that another thread |
| 404 // already decoded this already and put it in the at-raster cache. Look it up |
| 405 // first. |
| 406 bool need_to_add_image_to_cache = true; |
| 407 if (check_at_raster_cache) { |
| 408 at_raster_images_it = FindImage(&at_raster_decoded_images_, key); |
| 409 if (at_raster_images_it != at_raster_decoded_images_.end()) { |
| 410 // We have to drop our decode, since the one in the cache is being used by |
| 411 // another thread. |
| 412 decoded_image->Unlock(); |
| 413 decoded_image = at_raster_images_it->second; |
| 414 need_to_add_image_to_cache = false; |
| 415 } |
| 416 } |
| 417 |
| 418 // If we really are the first ones, or if the other thread already unlocked |
| 419 // the image, then put our work into at-raster time cache. |
| 420 if (need_to_add_image_to_cache) { |
| 421 at_raster_decoded_images_.push_back( |
| 422 AnnotatedDecodedImage(key, decoded_image)); |
| 423 } |
| 424 |
| 425 DCHECK(decoded_image); |
| 426 DCHECK(decoded_image->is_locked()); |
| 427 RefAtRasterImage(key); |
| 428 SanityCheckState(__LINE__, true); |
| 429 auto decoded_draw_image = |
| 430 DecodedDrawImage(decoded_image->image(), decoded_image->src_rect_offset(), |
| 431 GetScaleAdjustment(key), kLow_SkFilterQuality); |
| 432 decoded_draw_image.set_at_raster_decode(true); |
| 433 return decoded_draw_image; |
| 434 } |
| 435 |
| 436 void ImageDecodeController::DrawWithImageFinished( |
| 437 const DrawImage& image, |
| 438 const DecodedDrawImage& decoded_image) { |
| 439 TRACE_EVENT1("cc", "ImageDecodeController::DrawWithImageFinished", "key", |
| 440 ImageKey::FromDrawImage(image).ToString()); |
| 441 ImageKey key = ImageKey::FromDrawImage(image); |
| 442 if (!CanHandleImage(key, image)) |
94 return; | 443 return; |
95 | 444 |
96 // Clean up decode tasks once a layer is no longer used. | 445 if (decoded_image.is_at_raster_decode()) |
97 used_layer_counts_.erase(layer_id); | 446 UnrefAtRasterImage(key); |
98 image_decode_tasks_.erase(layer_id); | 447 else |
99 } | 448 UnrefImage(image); |
100 | 449 SanityCheckState(__LINE__, false); |
101 void ImageDecodeController::OnImageDecodeTaskCompleted(int layer_id, | 450 } |
102 const SkImage* image, | 451 |
103 bool was_canceled) { | 452 void ImageDecodeController::RefAtRasterImage(const ImageKey& key) { |
104 // If the task has successfully finished, then keep the task until the layer | 453 TRACE_EVENT1("cc", "ImageDecodeController::RefAtRasterImage", "key", |
105 // is no longer in use. This ensures that we only decode a image once. | 454 key.ToString()); |
106 // TODO(vmpstr): Remove this when decode lifetime is controlled by cc. | 455 DCHECK(FindImage(&at_raster_decoded_images_, key) != |
107 if (!was_canceled) | 456 at_raster_decoded_images_.end()); |
| 457 ++at_raster_decoded_images_ref_counts_[key]; |
| 458 } |
| 459 |
| 460 void ImageDecodeController::UnrefAtRasterImage(const ImageKey& key) { |
| 461 TRACE_EVENT1("cc", "ImageDecodeController::UnrefAtRasterImage", "key", |
| 462 key.ToString()); |
| 463 base::AutoLock lock(lock_); |
| 464 |
| 465 auto ref_it = at_raster_decoded_images_ref_counts_.find(key); |
| 466 DCHECK(ref_it != at_raster_decoded_images_ref_counts_.end()); |
| 467 --ref_it->second; |
| 468 if (ref_it->second == 0) { |
| 469 at_raster_decoded_images_ref_counts_.erase(ref_it); |
| 470 auto at_raster_image_it = FindImage(&at_raster_decoded_images_, key); |
| 471 DCHECK(at_raster_image_it != at_raster_decoded_images_.end()); |
| 472 |
| 473 // The ref for our image reached 0 and it's still locked. We need to figure |
| 474 // out what the best thing to do with the image. There are several |
| 475 // situations: |
| 476 // 1. The image is not in the main cache and... |
| 477 // 1a. ... its ref count is 0: unlock our image and put it in the main |
| 478 // cache. |
| 479 // 1b. ... ref count is not 0: keep the image locked and put it in the |
| 480 // main cache. |
| 481 // 2. The image is in the main cache... |
| 482 // 2a. ... and is locked: unlock our image and discard it |
| 483 // 2b. ... and is unlocked and... |
| 484 // 2b1. ... its ref count is 0: unlock our image and replace the |
| 485 // existing one with ours. |
| 486 // 2b2. ... its ref count is not 0: this shouldn't be possible. |
| 487 auto image_it = FindImage(&decoded_images_, key); |
| 488 if (image_it == decoded_images_.end()) { |
| 489 if (decoded_images_ref_counts_.find(key) == |
| 490 decoded_images_ref_counts_.end()) { |
| 491 at_raster_image_it->second->Unlock(); |
| 492 } |
| 493 decoded_images_.push_back(*at_raster_image_it); |
| 494 } else if (image_it->second->is_locked()) { |
| 495 at_raster_image_it->second->Unlock(); |
| 496 } else { |
| 497 DCHECK(decoded_images_ref_counts_.find(key) == |
| 498 decoded_images_ref_counts_.end()); |
| 499 at_raster_image_it->second->Unlock(); |
| 500 decoded_images_.erase(image_it); |
| 501 decoded_images_.push_back(*at_raster_image_it); |
| 502 } |
| 503 at_raster_decoded_images_.erase(at_raster_image_it); |
| 504 } |
| 505 } |
| 506 |
| 507 bool ImageDecodeController::CanHandleImage(const ImageKey& key, |
| 508 const DrawImage& image) { |
| 509 // TODO(vmpstr): Handle GPU rasterization. |
| 510 if (is_using_gpu_rasterization_) |
| 511 return false; |
| 512 if (!CanHandleFilterQuality(key.filter_quality())) |
| 513 return false; |
| 514 return true; |
| 515 } |
| 516 |
| 517 bool ImageDecodeController::CanHandleFilterQuality( |
| 518 SkFilterQuality filter_quality) { |
| 519 // We don't need to handle low quality filters. |
| 520 if (filter_quality == kLow_SkFilterQuality || |
| 521 filter_quality == kNone_SkFilterQuality) { |
| 522 return false; |
| 523 } |
| 524 |
| 525 // TODO(vmpstr): We need to start caching mipmaps for medium quality and |
| 526 // caching the interpolated values from those. For now, we don't have this. |
| 527 if (filter_quality == kMedium_SkFilterQuality) |
| 528 return false; |
| 529 DCHECK(filter_quality == kHigh_SkFilterQuality); |
| 530 return true; |
| 531 } |
| 532 |
| 533 void ImageDecodeController::ReduceCacheUsage() { |
| 534 TRACE_EVENT0("cc", "ImageDecodeController::ReduceCacheUsage"); |
| 535 base::AutoLock lock(lock_); |
| 536 size_t num_to_remove = (decoded_images_.size() > kMaxItemsInCache) |
| 537 ? (decoded_images_.size() - kMaxItemsInCache) |
| 538 : 0; |
| 539 for (auto it = decoded_images_.begin(); |
| 540 num_to_remove != 0 && it != decoded_images_.end();) { |
| 541 if (it->second->is_locked()) { |
| 542 ++it; |
| 543 continue; |
| 544 } |
| 545 |
| 546 it = decoded_images_.erase(it); |
| 547 --num_to_remove; |
| 548 } |
| 549 } |
| 550 |
| 551 void ImageDecodeController::RemovePendingTask(const ImageKey& key) { |
| 552 base::AutoLock lock(lock_); |
| 553 pending_image_tasks_.erase(key); |
| 554 } |
| 555 |
| 556 void ImageDecodeController::SetIsUsingGpuRasterization( |
| 557 bool is_using_gpu_rasterization) { |
| 558 if (is_using_gpu_rasterization_ == is_using_gpu_rasterization) |
108 return; | 559 return; |
109 | 560 is_using_gpu_rasterization_ = is_using_gpu_rasterization; |
110 // Otherwise, we have to clean up the task so that a new one can be created if | 561 |
111 // we need to decode the image again. | 562 base::AutoLock lock(lock_); |
112 LayerImageTaskMap::iterator layer_it = image_decode_tasks_.find(layer_id); | 563 |
113 if (layer_it == image_decode_tasks_.end()) | 564 DCHECK_EQ(0u, decoded_images_ref_counts_.size()); |
| 565 DCHECK_EQ(0u, at_raster_decoded_images_ref_counts_.size()); |
| 566 DCHECK(std::find_if(decoded_images_.begin(), decoded_images_.end(), |
| 567 [](const AnnotatedDecodedImage& image) { |
| 568 return image.second->is_locked(); |
| 569 }) == decoded_images_.end()); |
| 570 DCHECK(std::find_if(at_raster_decoded_images_.begin(), |
| 571 at_raster_decoded_images_.end(), |
| 572 [](const AnnotatedDecodedImage& image) { |
| 573 return image.second->is_locked(); |
| 574 }) == at_raster_decoded_images_.end()); |
| 575 decoded_images_.clear(); |
| 576 at_raster_decoded_images_.clear(); |
| 577 } |
| 578 |
| 579 void ImageDecodeController::SanityCheckState(int line, bool lock_acquired) { |
| 580 #if DCHECK_IS_ON() |
| 581 if (!lock_acquired) { |
| 582 base::AutoLock lock(lock_); |
| 583 SanityCheckState(line, true); |
114 return; | 584 return; |
115 | 585 } |
116 ImageTaskMap& image_tasks = layer_it->second; | 586 |
117 ImageTaskMap::iterator task_it = image_tasks.find(image->uniqueID()); | 587 MemoryBudget budget(kLockedMemoryLimitBytes); |
118 if (task_it == image_tasks.end()) | 588 for (const auto& annotated_image : decoded_images_) { |
119 return; | 589 auto ref_it = decoded_images_ref_counts_.find(annotated_image.first); |
120 image_tasks.erase(task_it); | 590 if (annotated_image.second->is_locked()) { |
| 591 budget.AddUsage(annotated_image.first.target_bytes()); |
| 592 DCHECK(ref_it != decoded_images_ref_counts_.end()) << line; |
| 593 } else { |
| 594 DCHECK(ref_it == decoded_images_ref_counts_.end() || |
| 595 pending_image_tasks_.find(annotated_image.first) != |
| 596 pending_image_tasks_.end()) |
| 597 << line; |
| 598 } |
| 599 } |
| 600 DCHECK_GE(budget.AvailableMemoryBytes(), |
| 601 locked_images_budget_.AvailableMemoryBytes()) |
| 602 << line; |
| 603 #endif // DCHECK_IS_ON() |
| 604 } |
| 605 |
| 606 // ImageDecodeControllerKey |
| 607 ImageDecodeControllerKey ImageDecodeControllerKey::FromDrawImage( |
| 608 const DrawImage& image) { |
| 609 const SkSize& scale = image.scale(); |
| 610 gfx::Size target_size( |
| 611 SkScalarRoundToInt(std::abs(image.src_rect().width() * scale.width())), |
| 612 SkScalarRoundToInt(std::abs(image.src_rect().height() * scale.height()))); |
| 613 |
| 614 // Start with the quality that was requested. |
| 615 SkFilterQuality quality = image.filter_quality(); |
| 616 |
| 617 // Drop down immediately to low quality if this is a negative scale (Skia |
| 618 // doesn't handle this right now). |
| 619 // TODO(vmpstr): We should be able to handle this in the same way that we |
| 620 // handle positive scale, except just flipped around. crbug.com/576389. |
| 621 if (scale.width() < 0.f || scale.height() < 0.f) |
| 622 quality = std::min(quality, kLow_SkFilterQuality); |
| 623 |
| 624 // If we're not going to do a scale, we can use low filter quality. Note that |
| 625 // checking if the sizes are the same is better than checking if scale is 1.f, |
| 626 // because even non-1 scale can result in the same (rounded) width/height. |
| 627 if (target_size.width() == image.src_rect().width() && |
| 628 target_size.height() == image.src_rect().height()) { |
| 629 quality = std::min(quality, kLow_SkFilterQuality); |
| 630 } |
| 631 |
| 632 // Drop from high to medium if the image has perspective applied, the matrix |
| 633 // we applied wasn't decomposable, or if the scaled image will be too large. |
| 634 if (quality == kHigh_SkFilterQuality) { |
| 635 if (image.matrix_has_perspective() || !image.matrix_is_decomposable()) { |
| 636 quality = kMedium_SkFilterQuality; |
| 637 } else { |
| 638 base::CheckedNumeric<size_t> size = 4u; |
| 639 size *= target_size.width(); |
| 640 size *= target_size.height(); |
| 641 if (size.ValueOrDefault(std::numeric_limits<size_t>::max()) > |
| 642 kMaxHighQualityImageSizeBytes) { |
| 643 quality = kMedium_SkFilterQuality; |
| 644 } |
| 645 } |
| 646 } |
| 647 |
| 648 // Drop from medium to low if the matrix we applied wasn't decomposable or if |
| 649 // we're enlarging the image in both dimensions. |
| 650 if (quality == kMedium_SkFilterQuality) { |
| 651 if (!image.matrix_is_decomposable() || |
| 652 (scale.width() >= 1.f && scale.height() >= 1.f)) { |
| 653 quality = kLow_SkFilterQuality; |
| 654 } |
| 655 } |
| 656 |
| 657 return ImageDecodeControllerKey(image.image()->uniqueID(), |
| 658 gfx::SkIRectToRect(image.src_rect()), |
| 659 target_size, quality); |
| 660 } |
| 661 |
| 662 ImageDecodeControllerKey::ImageDecodeControllerKey( |
| 663 uint32_t image_id, |
| 664 const gfx::Rect& src_rect, |
| 665 const gfx::Size& target_size, |
| 666 SkFilterQuality filter_quality) |
| 667 : image_id_(image_id), |
| 668 src_rect_(src_rect), |
| 669 target_size_(target_size), |
| 670 filter_quality_(filter_quality) {} |
| 671 |
| 672 std::string ImageDecodeControllerKey::ToString() const { |
| 673 std::ostringstream str; |
| 674 str << "id[" << image_id_ << "] src_rect[" << src_rect_.x() << "," |
| 675 << src_rect_.y() << " " << src_rect_.width() << "x" << src_rect_.height() |
| 676 << "] target_size[" << target_size_.width() << "x" |
| 677 << target_size_.height() << "] filter_quality[" << filter_quality_ << "]"; |
| 678 return str.str(); |
| 679 } |
| 680 |
| 681 // DecodedImage |
| 682 ImageDecodeController::DecodedImage::DecodedImage( |
| 683 const SkImageInfo& info, |
| 684 scoped_ptr<base::DiscardableMemory> memory, |
| 685 const SkSize& src_rect_offset) |
| 686 : locked_(true), |
| 687 image_info_(info), |
| 688 memory_(std::move(memory)), |
| 689 src_rect_offset_(src_rect_offset) { |
| 690 image_ = skia::AdoptRef(SkImage::NewFromRaster( |
| 691 image_info_, memory_->data(), image_info_.minRowBytes(), |
| 692 [](const void* pixels, void* context) {}, nullptr)); |
| 693 } |
| 694 |
| 695 ImageDecodeController::DecodedImage::~DecodedImage() {} |
| 696 |
| 697 bool ImageDecodeController::DecodedImage::Lock() { |
| 698 DCHECK(!locked_); |
| 699 bool success = memory_->Lock(); |
| 700 if (!success) |
| 701 return false; |
| 702 locked_ = true; |
| 703 return true; |
| 704 } |
| 705 |
| 706 void ImageDecodeController::DecodedImage::Unlock() { |
| 707 DCHECK(locked_); |
| 708 memory_->Unlock(); |
| 709 locked_ = false; |
| 710 } |
| 711 |
| 712 // MemoryBudget |
| 713 ImageDecodeController::MemoryBudget::MemoryBudget(size_t limit_bytes) |
| 714 : limit_bytes_(limit_bytes), current_usage_bytes_(0u) {} |
| 715 |
| 716 size_t ImageDecodeController::MemoryBudget::AvailableMemoryBytes() const { |
| 717 size_t usage = GetCurrentUsageSafe(); |
| 718 return usage >= limit_bytes_ ? 0u : (limit_bytes_ - usage); |
| 719 } |
| 720 |
| 721 void ImageDecodeController::MemoryBudget::AddUsage(size_t usage) { |
| 722 current_usage_bytes_ += usage; |
| 723 } |
| 724 |
| 725 void ImageDecodeController::MemoryBudget::SubtractUsage(size_t usage) { |
| 726 DCHECK_GE(current_usage_bytes_.ValueOrDefault(0u), usage); |
| 727 current_usage_bytes_ -= usage; |
| 728 } |
| 729 |
| 730 void ImageDecodeController::MemoryBudget::ResetUsage() { |
| 731 current_usage_bytes_ = 0; |
| 732 } |
| 733 |
| 734 size_t ImageDecodeController::MemoryBudget::GetCurrentUsageSafe() const { |
| 735 return current_usage_bytes_.ValueOrDie(); |
121 } | 736 } |
122 | 737 |
123 } // namespace cc | 738 } // namespace cc |
OLD | NEW |