Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "cc/tiles/image_decode_controller.h" | 5 #include "cc/tiles/image_decode_controller.h" |
| 6 | 6 |
| 7 #include "base/memory/discardable_memory.h" | |
| 7 #include "cc/debug/devtools_instrumentation.h" | 8 #include "cc/debug/devtools_instrumentation.h" |
| 9 #include "third_party/skia/include/core/SkCanvas.h" | |
| 10 #include "third_party/skia/include/core/SkImage.h" | |
| 8 | 11 |
| 9 namespace cc { | 12 namespace cc { |
| 10 namespace { | 13 namespace { |
| 11 | 14 |
| 15 // The amount of memory we can lock ahead of time (128MB). This limit is only | |
| 16 // used to inform the caller of the amount of space available in the cache. The | |
| 17 // caller can still request tasks which can cause this limit to be breached. | |
| 18 const size_t kLockedMemoryLimitBytes = 128 * 1024 * 1024; | |
| 19 | |
| 20 // The largest single high quality image to try and process. Images above this | |
| 21 // size will drop down to medium quality. | |
| 22 const size_t kMaxHighQualityImageSizeBytes = 64 * 1024 * 1024; | |
| 23 | |
| 24 // The number of entries to keep around in the cache. This limit can be breached | |
| 25 // if more items are locked. That is, locked items ignore this limit. | |
| 26 const size_t kMaxItemsInCache = 100; | |
| 27 | |
| 12 class ImageDecodeTaskImpl : public ImageDecodeTask { | 28 class ImageDecodeTaskImpl : public ImageDecodeTask { |
| 13 public: | 29 public: |
| 14 ImageDecodeTaskImpl(ImageDecodeController* controller, | 30 ImageDecodeTaskImpl(ImageDecodeController* controller, |
| 15 const SkImage* image, | 31 const ImageDecodeController::ImageKey& image_key, |
| 16 int layer_id, | 32 const DrawImage& image, |
| 17 uint64_t source_prepare_tiles_id) | 33 uint64_t source_prepare_tiles_id) |
| 18 : controller_(controller), | 34 : controller_(controller), |
| 19 image_(skia::SharePtr(image)), | 35 image_key_(image_key), |
| 20 layer_id_(layer_id), | 36 image_(image), |
| 37 image_ref_(skia::SharePtr(image.image())), | |
| 21 source_prepare_tiles_id_(source_prepare_tiles_id) {} | 38 source_prepare_tiles_id_(source_prepare_tiles_id) {} |
| 22 | 39 |
| 23 // Overridden from Task: | 40 // Overridden from Task: |
| 24 void RunOnWorkerThread() override { | 41 void RunOnWorkerThread() override { |
| 25 TRACE_EVENT1("cc", "ImageDecodeTaskImpl::RunOnWorkerThread", | 42 TRACE_EVENT1("cc", "ImageDecodeTaskImpl::RunOnWorkerThread", |
| 26 "source_prepare_tiles_id", source_prepare_tiles_id_); | 43 "source_prepare_tiles_id", source_prepare_tiles_id_); |
| 27 devtools_instrumentation::ScopedImageDecodeTask image_decode_task( | 44 devtools_instrumentation::ScopedImageDecodeTask image_decode_task( |
| 28 image_.get()); | 45 image_ref_.get()); |
| 29 controller_->DecodeImage(image_.get()); | 46 controller_->DecodeImage(image_key_, image_); |
| 30 | |
| 31 // Release the reference after decoding image to ensure that it is not kept | |
| 32 // alive unless needed. | |
| 33 image_.clear(); | |
| 34 } | 47 } |
| 35 | 48 |
| 36 // Overridden from TileTask: | 49 // Overridden from TileTask: |
| 37 void ScheduleOnOriginThread(TileTaskClient* client) override {} | 50 void ScheduleOnOriginThread(TileTaskClient* client) override {} |
| 38 void CompleteOnOriginThread(TileTaskClient* client) override { | 51 void CompleteOnOriginThread(TileTaskClient* client) override { |
| 39 controller_->OnImageDecodeTaskCompleted(layer_id_, image_.get(), | 52 controller_->RemovePendingTask(image_key_); |
| 40 !HasFinishedRunning()); | |
| 41 } | 53 } |
| 42 | 54 |
| 43 protected: | 55 protected: |
| 44 ~ImageDecodeTaskImpl() override {} | 56 ~ImageDecodeTaskImpl() override {} |
| 45 | 57 |
| 46 private: | 58 private: |
| 47 ImageDecodeController* controller_; | 59 ImageDecodeController* controller_; |
| 48 skia::RefPtr<const SkImage> image_; | 60 ImageDecodeController::ImageKey image_key_; |
| 49 int layer_id_; | 61 DrawImage image_; |
| 62 skia::RefPtr<const SkImage> image_ref_; | |
| 50 uint64_t source_prepare_tiles_id_; | 63 uint64_t source_prepare_tiles_id_; |
| 51 | 64 |
| 52 DISALLOW_COPY_AND_ASSIGN(ImageDecodeTaskImpl); | 65 DISALLOW_COPY_AND_ASSIGN(ImageDecodeTaskImpl); |
| 53 }; | 66 }; |
| 54 | 67 |
| 68 template <typename Type> | |
| 69 typename std::deque<Type>::iterator FindImage( | |
| 70 std::deque<Type>* collection, | |
| 71 const ImageDecodeControllerKey& key) { | |
| 72 return std::find_if(collection->begin(), collection->end(), | |
| 73 [key](const Type& image) { return image.first == key; }); | |
| 74 } | |
| 75 | |
| 76 SkSize GetScaleAdjustment(const ImageDecodeControllerKey& key, | |
| 77 const SkImage* original_image) { | |
| 78 float x_scale = | |
| 79 key.target_size().width() / static_cast<float>(original_image->width()); | |
| 80 float y_scale = | |
| 81 key.target_size().height() / static_cast<float>(original_image->height()); | |
| 82 return SkSize::Make(x_scale, y_scale); | |
| 83 } | |
| 84 | |
| 55 } // namespace | 85 } // namespace |
| 56 | 86 |
| 57 ImageDecodeController::ImageDecodeController() {} | 87 ImageDecodeController::ImageDecodeController() |
| 88 : is_using_gpu_rasterization_(false), | |
| 89 locked_images_budget_(kLockedMemoryLimitBytes) {} | |
| 58 | 90 |
| 59 ImageDecodeController::~ImageDecodeController() {} | 91 ImageDecodeController::~ImageDecodeController() {} |
| 60 | 92 |
| 61 scoped_refptr<ImageDecodeTask> ImageDecodeController::GetTaskForImage( | 93 bool ImageDecodeController::GetTaskForImageAndRef( |
| 62 const DrawImage& image, | 94 const DrawImage& image, |
| 63 int layer_id, | 95 uint64_t prepare_tiles_id, |
| 64 uint64_t prepare_tiles_id) { | 96 scoped_refptr<ImageDecodeTask>* task) { |
| 65 uint32_t generation_id = image.image()->uniqueID(); | 97 // If the image already exist or if we're going to create a task for it, then |
|
enne (OOO)
2015/12/02 23:33:25
exists
vmpstr
2015/12/03 21:20:23
Done.
| |
| 66 scoped_refptr<ImageDecodeTask>& decode_task = | 98 // we need to ref this image. That means the image is or will be in the cache. |
| 67 image_decode_tasks_[layer_id][generation_id]; | 99 // When the ref goes to 0, it will be unpinned but will remain in the cache. |
| 68 if (!decode_task) | 100 // If the image does not fit into the budget, then we don't ref this image, |
| 69 decode_task = CreateTaskForImage(image.image(), layer_id, prepare_tiles_id); | 101 // since it will be decoded at raster time which is when it will be |
| 70 return decode_task; | 102 // temporarily put in the cache. |
| 71 } | 103 ImageKey key = ImageKey::FromDrawImage(image); |
| 72 | 104 TRACE_EVENT1("cc", "ImageDecodeController::GetTaskForImageAndRef", "key", |
| 73 scoped_refptr<ImageDecodeTask> ImageDecodeController::CreateTaskForImage( | 105 key.ToString()); |
| 74 const SkImage* image, | 106 // If we're not going to do a scale, we will just create a task to preroll the |
| 75 int layer_id, | 107 // image the first time we see it. This doesn't need to account for memory. |
|
enne (OOO)
2015/12/02 23:33:25
Should we preroll once per frame per image to fix
vmpstr
2015/12/03 21:20:23
I don't think that would work for GPU raster, beca
| |
| 76 uint64_t prepare_tiles_id) { | 108 // TODO(vmpstr): We can also lock the original sized image, in which case it |
| 109 // does require memory bookkeeping. | |
| 110 if (!ShouldDecodeAndScaleImage(key, image)) { | |
| 111 base::AutoLock lock(lock_); | |
| 112 if (prerolled_images_.count(key.image_id()) == 0) { | |
| 113 scoped_refptr<ImageDecodeTask>& existing_task = pending_image_tasks_[key]; | |
| 114 if (!existing_task) { | |
| 115 existing_task = make_scoped_refptr( | |
| 116 new ImageDecodeTaskImpl(this, key, image, prepare_tiles_id)); | |
| 117 } | |
| 118 *task = existing_task; | |
| 119 } else { | |
| 120 *task = nullptr; | |
| 121 } | |
| 122 return false; | |
| 123 } | |
| 124 | |
| 125 base::AutoLock lock(lock_); | |
| 126 | |
| 127 // If we already have the image in cache, then we can return it. | |
| 128 auto decoded_it = FindImage(&decoded_images_, key); | |
| 129 bool new_image_fits_in_memory = | |
| 130 locked_images_budget_.AvailableMemoryBytes() >= key.target_bytes(); | |
| 131 if (decoded_it != decoded_images_.end()) { | |
| 132 if (decoded_it->second->is_locked() || | |
| 133 (new_image_fits_in_memory && decoded_it->second->Lock())) { | |
| 134 RefImage(key); | |
| 135 *task = nullptr; | |
| 136 SanityCheckState(__LINE__, true); | |
| 137 return true; | |
| 138 } | |
| 139 // If the image fits in memory, then we at least tried to lock it and | |
| 140 // failed. This means that it's not valid anymore. | |
| 141 if (new_image_fits_in_memory) | |
| 142 decoded_images_.erase(decoded_it); | |
| 143 } | |
| 144 | |
| 145 // If the task exists, return it. | |
| 146 scoped_refptr<ImageDecodeTask>& existing_task = pending_image_tasks_[key]; | |
| 147 if (existing_task) { | |
| 148 RefImage(key); | |
| 149 *task = existing_task; | |
| 150 SanityCheckState(__LINE__, true); | |
| 151 return true; | |
| 152 } | |
| 153 | |
| 154 // At this point, we have to create a new image/task, so we need to abort if | |
| 155 // it doesn't fit into memory. | |
| 156 if (!new_image_fits_in_memory) { | |
| 157 *task = nullptr; | |
| 158 SanityCheckState(__LINE__, true); | |
| 159 return false; | |
| 160 } | |
| 161 | |
| 162 // Actually create the image. RefImage will account for memory on the first | |
|
enne (OOO)
2015/12/02 23:33:25
create the...task?
vmpstr
2015/12/03 21:20:23
Yes. Thanks! Done.
| |
| 163 // ref, so verify that this is going to be the first ref. | |
| 164 DCHECK(decoded_images_ref_counts_.find(key) == | |
| 165 decoded_images_ref_counts_.end()); | |
| 166 | |
| 167 RefImage(key); | |
| 168 existing_task = make_scoped_refptr( | |
| 169 new ImageDecodeTaskImpl(this, key, image, prepare_tiles_id)); | |
| 170 *task = existing_task; | |
| 171 SanityCheckState(__LINE__, true); | |
| 172 return true; | |
| 173 } | |
| 174 | |
| 175 void ImageDecodeController::RefImage(const ImageKey& key) { | |
| 176 TRACE_EVENT1("cc", "ImageDecodeController::RefImage", "key", key.ToString()); | |
| 177 lock_.AssertAcquired(); | |
| 178 int ref = ++decoded_images_ref_counts_[key]; | |
| 179 if (ref == 1) { | |
| 180 DCHECK_GE(locked_images_budget_.AvailableMemoryBytes(), key.target_bytes()); | |
| 181 locked_images_budget_.AddUsage(key.target_bytes()); | |
| 182 } | |
| 183 } | |
| 184 | |
| 185 void ImageDecodeController::UnrefImage(const DrawImage& image) { | |
| 186 // When we unref the image, there are several situations we need to consider: | |
| 187 // 1. The ref did not reach 0, which means we have to keep the image locked. | |
| 188 // 2. The ref reached 0, we should unlock it. | |
| 189 // 2a. The image isn't in the locked cache because we didn't get to decode | |
| 190 // it yet. | |
| 191 // 2b. Unlock the image but keep it in list. | |
| 192 const ImageKey& key = ImageKey::FromDrawImage(image); | |
| 193 DCHECK(ShouldDecodeAndScaleImage(key, image)); | |
| 194 TRACE_EVENT1("cc", "ImageDecodeController::UnrefImage", "key", | |
| 195 key.ToString()); | |
| 196 | |
| 197 base::AutoLock lock(lock_); | |
| 198 auto ref_count_it = decoded_images_ref_counts_.find(key); | |
| 199 DCHECK(ref_count_it != decoded_images_ref_counts_.end()); | |
| 200 | |
| 201 --ref_count_it->second; | |
| 202 if (ref_count_it->second == 0) { | |
| 203 decoded_images_ref_counts_.erase(ref_count_it); | |
| 204 locked_images_budget_.SubtractUsage(key.target_bytes()); | |
| 205 | |
| 206 auto decoded_image_it = FindImage(&decoded_images_, key); | |
| 207 // If we've never decoded the image before ref reached 0, then we wouldn't | |
| 208 // have it in our cache. This would happen if we canceled tasks. | |
| 209 if (decoded_image_it == decoded_images_.end()) { | |
| 210 SanityCheckState(__LINE__, true); | |
| 211 return; | |
| 212 } | |
| 213 DCHECK(decoded_image_it->second->is_locked()); | |
| 214 decoded_image_it->second->Unlock(); | |
| 215 } | |
| 216 SanityCheckState(__LINE__, true); | |
| 217 } | |
| 218 | |
| 219 void ImageDecodeController::DecodeImage(const ImageKey& key, | |
| 220 const DrawImage& image) { | |
| 221 TRACE_EVENT1("cc", "ImageDecodeController::DecodeImage", "key", | |
| 222 key.ToString()); | |
| 223 if (!ShouldDecodeAndScaleImage(key, image)) { | |
| 224 image.image()->preroll(); | |
| 225 | |
| 226 base::AutoLock lock(lock_); | |
| 227 prerolled_images_.insert(key.image_id()); | |
| 228 // Erase the pending task from the queue, since the task won't be doing | |
| 229 // anything useful after this function terminates. Since we don't preroll | |
| 230 // images twice, this is actually not necessary but it behaves similar to | |
| 231 // the other code path: when this function finishes, the task isn't in the | |
| 232 // pending_image_tasks_ list. | |
| 233 pending_image_tasks_.erase(key); | |
| 234 return; | |
| 235 } | |
| 236 | |
| 237 base::AutoLock lock(lock_); | |
| 238 | |
| 239 auto image_it = FindImage(&decoded_images_, key); | |
| 240 if (image_it != decoded_images_.end()) { | |
| 241 if (image_it->second->is_locked() || image_it->second->Lock()) { | |
| 242 pending_image_tasks_.erase(key); | |
| 243 return; | |
| 244 } | |
| 245 decoded_images_.erase(image_it); | |
| 246 } | |
| 247 | |
| 248 scoped_refptr<DecodedImage> decoded_image; | |
| 249 { | |
| 250 base::AutoUnlock unlock(lock_); | |
| 251 decoded_image = DecodeImageInternal(key, image.image()); | |
| 252 } | |
| 253 | |
| 254 // Erase the pending task from the queue, since the task won't be doing | |
| 255 // anything useful after this function terminates. That is, if this image | |
| 256 // needs to be decoded again, we have to create a new task. | |
| 257 pending_image_tasks_.erase(key); | |
| 258 | |
| 259 // We could have finished all of the raster tasks (cancelled) while this image | |
| 260 // decode task was running, which means that we now have a locked image but no | |
| 261 // ref counts. Unlock it immediately in this case. | |
| 262 if (decoded_images_ref_counts_.find(key) == | |
| 263 decoded_images_ref_counts_.end()) { | |
| 264 decoded_image->Unlock(); | |
| 265 } | |
| 266 | |
| 267 // At this point, it could have been the case that this image was decoded in | |
| 268 // place by an already running raster task from a previous schedule. If that's | |
| 269 // the case, then it would have already been placed into the cache (possibly | |
| 270 // locked). Remove it if that was the case. | |
| 271 image_it = FindImage(&decoded_images_, key); | |
| 272 if (image_it != decoded_images_.end()) { | |
| 273 if (image_it->second->is_locked() || image_it->second->Lock()) { | |
| 274 pending_image_tasks_.erase(key); | |
| 275 return; | |
| 276 } | |
| 277 decoded_images_.erase(image_it); | |
| 278 } | |
| 279 decoded_images_.push_back(AnnotatedDecodedImage(key, decoded_image)); | |
| 280 SanityCheckState(__LINE__, true); | |
| 281 } | |
| 282 | |
| 283 scoped_refptr<ImageDecodeController::DecodedImage> | |
| 284 ImageDecodeController::DecodeImageInternal(const ImageKey& key, | |
| 285 const SkImage* image) { | |
| 286 TRACE_EVENT1("cc", "ImageDecodeController::DecodeImageInternal", "key", | |
| 287 key.ToString()); | |
| 288 | |
| 289 // Get the decoded image first (at the original scale). | |
| 290 SkImageInfo decoded_info = | |
| 291 SkImageInfo::MakeN32Premul(image->width(), image->height()); | |
| 292 scoped_ptr<uint8_t[]> decoded_pixels( | |
| 293 new uint8_t[decoded_info.minRowBytes() * decoded_info.height()]); | |
| 294 bool result = image->readPixels(decoded_info, decoded_pixels.get(), | |
| 295 decoded_info.minRowBytes(), 0, 0, | |
| 296 SkImage::kAllow_CachingHint); | |
| 297 DCHECK(result); | |
| 298 | |
| 299 skia::RefPtr<SkImage> decoded_image = skia::AdoptRef(SkImage::NewFromRaster( | |
| 300 decoded_info, decoded_pixels.get(), decoded_info.minRowBytes(), | |
| 301 [](const void* pixels, void* context) {}, nullptr)); | |
| 302 | |
| 303 // Now scale the pixels into the destination size. | |
| 304 SkImageInfo scaled_info = SkImageInfo::MakeN32Premul( | |
| 305 key.target_size().width(), key.target_size().height()); | |
| 306 scoped_ptr<base::DiscardableMemory> scaled_pixels = | |
| 307 base::DiscardableMemoryAllocator::GetInstance() | |
| 308 ->AllocateLockedDiscardableMemory(scaled_info.minRowBytes() * | |
| 309 scaled_info.height()); | |
| 310 SkPixmap scaled_pixmap(scaled_info, scaled_pixels->data(), | |
| 311 scaled_info.minRowBytes()); | |
| 312 // TODO(vmpstr): Start handling more than just high filter quality. | |
| 313 DCHECK_EQ(kHigh_SkFilterQuality, key.filter_quality()); | |
| 314 result = decoded_image->scalePixels(scaled_pixmap, kHigh_SkFilterQuality, | |
| 315 SkImage::kDisallow_CachingHint); | |
| 316 DCHECK(result); | |
| 77 return make_scoped_refptr( | 317 return make_scoped_refptr( |
| 78 new ImageDecodeTaskImpl(this, image, layer_id, prepare_tiles_id)); | 318 new DecodedImage(scaled_info, std::move(scaled_pixels))); |
| 79 } | 319 } |
| 80 | 320 |
| 81 void ImageDecodeController::DecodeImage(const SkImage* image) { | 321 DecodedDrawImage ImageDecodeController::GetDecodedImageForDraw( |
| 82 image->preroll(); | 322 const DrawImage& draw_image) { |
| 83 } | 323 ImageKey key = ImageKey::FromDrawImage(draw_image); |
| 84 | 324 TRACE_EVENT1("cc", "ImageDecodeController::GetDecodedImageAndRef", "key", |
| 85 void ImageDecodeController::AddLayerUsedCount(int layer_id) { | 325 key.ToString()); |
| 86 ++used_layer_counts_[layer_id]; | 326 if (!ShouldDecodeAndScaleImage(key, draw_image)) { |
| 87 } | 327 return DecodedDrawImage(draw_image.image(), SkSize::Make(1.f, 1.f), |
| 88 | 328 draw_image.filter_quality()); |
| 89 void ImageDecodeController::SubtractLayerUsedCount(int layer_id) { | 329 } |
| 90 if (--used_layer_counts_[layer_id]) | 330 |
| 331 base::AutoLock lock(lock_); | |
| 332 auto decoded_images_it = FindImage(&decoded_images_, key); | |
| 333 // If we found the image and it's locked, then return it. If it's not locked, | |
| 334 // erase it from the cache since it might be put into the at-raster cache. | |
| 335 scoped_refptr<DecodedImage> decoded_image; | |
| 336 if (decoded_images_it != decoded_images_.end()) { | |
| 337 decoded_image = decoded_images_it->second; | |
| 338 if (decoded_image->is_locked()) { | |
| 339 RefImage(key); | |
| 340 SanityCheckState(__LINE__, true); | |
| 341 return DecodedDrawImage(decoded_image->image(), | |
| 342 GetScaleAdjustment(key, draw_image.image()), | |
| 343 kLow_SkFilterQuality); | |
| 344 } else { | |
| 345 decoded_images_.erase(decoded_images_it); | |
| 346 } | |
| 347 } | |
| 348 | |
| 349 // See if another thread already decoded this image at raster time. If so, we | |
| 350 // can just use that result directly. | |
| 351 auto at_raster_images_it = FindImage(&at_raster_decoded_images_, key); | |
| 352 if (at_raster_images_it != at_raster_decoded_images_.end()) { | |
| 353 DCHECK(at_raster_images_it->second->is_locked()); | |
| 354 RefAtRasterImage(key); | |
| 355 SanityCheckState(__LINE__, true); | |
| 356 auto decoded_draw_image = DecodedDrawImage( | |
| 357 at_raster_images_it->second->image(), | |
| 358 GetScaleAdjustment(key, draw_image.image()), kLow_SkFilterQuality); | |
| 359 decoded_draw_image.set_at_raster_decode(true); | |
| 360 return decoded_draw_image; | |
| 361 } | |
| 362 | |
| 363 // Now we know that we don't have a locked image, and we seem to be the first | |
| 364 // thread encountering this image (that might not be true, since other threads | |
| 365 // might be decoding it already). This means that we need to decode the image | |
| 366 // assuming we can't lock the one we found in the cache. | |
| 367 bool check_at_raster_cache = false; | |
| 368 if (!decoded_image || !decoded_image->Lock()) { | |
| 369 // Note that we have to release the lock, since this lock is also accessed | |
| 370 // on the compositor thread. This means holding on to the lock might stall | |
| 371 // the compositor thread for the duration of the decode! | |
| 372 base::AutoUnlock unlock(lock_); | |
| 373 decoded_image = DecodeImageInternal(key, draw_image.image()); | |
| 374 check_at_raster_cache = true; | |
| 375 } | |
| 376 | |
| 377 // While we unlocked the lock, it could be the case that another thread | |
| 378 // already decoded this already and put it in the at-raster cache. Look it up | |
| 379 // first. | |
| 380 bool need_to_add_image_to_cache = true; | |
| 381 if (check_at_raster_cache) { | |
| 382 at_raster_images_it = FindImage(&at_raster_decoded_images_, key); | |
| 383 if (at_raster_images_it != at_raster_decoded_images_.end()) { | |
| 384 // We have to drop our decode, since the one in the cache is being used by | |
| 385 // another thread. | |
| 386 decoded_image->Unlock(); | |
| 387 decoded_image = at_raster_images_it->second; | |
| 388 need_to_add_image_to_cache = false; | |
| 389 } | |
| 390 } | |
| 391 | |
| 392 // If we really are the first ones, or if the other thread already unlocked | |
| 393 // the image, then put our work into at-raster time cache. | |
| 394 if (need_to_add_image_to_cache) { | |
| 395 at_raster_decoded_images_.push_back( | |
| 396 AnnotatedDecodedImage(key, decoded_image)); | |
| 397 } | |
| 398 | |
| 399 DCHECK(decoded_image); | |
| 400 DCHECK(decoded_image->is_locked()); | |
| 401 RefAtRasterImage(key); | |
| 402 SanityCheckState(__LINE__, true); | |
| 403 auto decoded_draw_image = DecodedDrawImage( | |
| 404 decoded_image->image(), GetScaleAdjustment(key, draw_image.image()), | |
| 405 kLow_SkFilterQuality); | |
| 406 decoded_draw_image.set_at_raster_decode(true); | |
| 407 return decoded_draw_image; | |
| 408 } | |
| 409 | |
| 410 void ImageDecodeController::DrawWithImageFinished( | |
| 411 const DrawImage& image, | |
| 412 const DecodedDrawImage& decoded_image) { | |
| 413 TRACE_EVENT1("cc", "ImageDecodeController::DrawWithImageFinished", "key", | |
| 414 ImageKey::FromDrawImage(image).ToString()); | |
| 415 ImageKey key = ImageKey::FromDrawImage(image); | |
| 416 if (!ShouldDecodeAndScaleImage(key, image)) | |
| 91 return; | 417 return; |
| 92 | 418 |
| 93 // Clean up decode tasks once a layer is no longer used. | 419 if (decoded_image.is_at_raster_decode()) |
| 94 used_layer_counts_.erase(layer_id); | 420 UnrefAtRasterImage(key); |
| 95 image_decode_tasks_.erase(layer_id); | 421 else |
| 96 } | 422 UnrefImage(image); |
| 97 | 423 SanityCheckState(__LINE__, false); |
| 98 void ImageDecodeController::OnImageDecodeTaskCompleted(int layer_id, | 424 } |
| 99 const SkImage* image, | 425 |
| 100 bool was_canceled) { | 426 void ImageDecodeController::RefAtRasterImage(const ImageKey& key) { |
| 101 // If the task has successfully finished, then keep the task until the layer | 427 TRACE_EVENT1("cc", "ImageDecodeController::RefAtRasterImage", "key", |
| 102 // is no longer in use. This ensures that we only decode a image once. | 428 key.ToString()); |
| 103 // TODO(vmpstr): Remove this when decode lifetime is controlled by cc. | 429 DCHECK(FindImage(&at_raster_decoded_images_, key) != |
| 104 if (!was_canceled) | 430 at_raster_decoded_images_.end()); |
| 431 ++at_raster_decoded_images_ref_counts_[key]; | |
| 432 } | |
| 433 | |
| 434 void ImageDecodeController::UnrefAtRasterImage(const ImageKey& key) { | |
| 435 TRACE_EVENT1("cc", "ImageDecodeController::UnrefAtRasterImage", "key", | |
| 436 key.ToString()); | |
| 437 base::AutoLock lock(lock_); | |
| 438 | |
| 439 auto ref_it = at_raster_decoded_images_ref_counts_.find(key); | |
| 440 DCHECK(ref_it != at_raster_decoded_images_ref_counts_.end()); | |
| 441 --ref_it->second; | |
| 442 if (ref_it->second == 0) { | |
| 443 at_raster_decoded_images_ref_counts_.erase(ref_it); | |
| 444 auto at_raster_image_it = FindImage(&at_raster_decoded_images_, key); | |
| 445 DCHECK(at_raster_image_it != at_raster_decoded_images_.end()); | |
| 446 | |
| 447 // The ref for our image reached 0 and it's still locked. We need to figure | |
| 448 // out what the best thing to do with the image. There are several | |
| 449 // situations: | |
| 450 // 1. The image is not in the main cache and... | |
| 451 // 1a. ... its ref count is 0: unlock our image and put it in the main | |
| 452 // cache. | |
| 453 // 1b. ... ref count is not 0: keep the image locked and put it in the | |
| 454 // main cache. | |
| 455 // 2. The image is in the main cache... | |
| 456 // 2a. ... and is locked: unlock our image and discard it | |
| 457 // 2b. ... and is unlocked and... | |
| 458 // 2b1. ... its ref count is 0: unlock our image and replace the | |
| 459 // existing one with ours. | |
| 460 // 2b2. ... its ref count is not 0: keep our image locked and replace | |
|
enne (OOO)
2015/12/02 23:33:25
Anything that gets into 2b2 outline form scares me
vmpstr
2015/12/03 21:20:23
I've added a bunch of tests that I think cover all
| |
| 461 // the existing one with ours. | |
| 462 auto image_it = FindImage(&decoded_images_, key); | |
| 463 if (image_it == decoded_images_.end()) { | |
| 464 if (decoded_images_ref_counts_.find(key) == | |
| 465 decoded_images_ref_counts_.end()) { | |
| 466 at_raster_image_it->second->Unlock(); | |
| 467 } | |
| 468 decoded_images_.push_back(*at_raster_image_it); | |
| 469 } else if (image_it->second->is_locked()) { | |
| 470 at_raster_image_it->second->Unlock(); | |
| 471 } else { | |
| 472 if (decoded_images_ref_counts_.find(key) == | |
| 473 decoded_images_ref_counts_.end()) { | |
| 474 at_raster_image_it->second->Unlock(); | |
| 475 } | |
| 476 decoded_images_.erase(image_it); | |
| 477 decoded_images_.push_back(*at_raster_image_it); | |
| 478 } | |
| 479 at_raster_decoded_images_.erase(at_raster_image_it); | |
| 480 } | |
| 481 } | |
| 482 | |
| 483 bool ImageDecodeController::ShouldDecodeAndScaleImage(const ImageKey& key, | |
| 484 const DrawImage& image) { | |
| 485 // TODO(vmpstr): Handle GPU rasterization. | |
| 486 if (is_using_gpu_rasterization_) | |
| 487 return false; | |
| 488 | |
| 489 if (!CanHandleFilterQuality(key.filter_quality())) | |
| 490 return false; | |
| 491 | |
| 492 // TODO(vmpstr): When we can lock SkImages directly, this condition should be | |
| 493 // modified. However, note that code needs to be added elsewhere to special | |
| 494 // case this. | |
| 495 if (key.target_size().width() == image.image()->width() && | |
| 496 key.target_size().height() == image.image()->height()) { | |
| 497 return false; | |
| 498 } | |
| 499 return true; | |
| 500 } | |
| 501 | |
| 502 bool ImageDecodeController::CanHandleFilterQuality( | |
| 503 SkFilterQuality filter_quality) { | |
| 504 // We don't need to handle low quality filters. | |
| 505 if (filter_quality == kLow_SkFilterQuality || | |
| 506 filter_quality == kNone_SkFilterQuality) { | |
| 507 return false; | |
| 508 } | |
| 509 | |
| 510 // TODO(vmpstr): We need to start caching mipmaps for medium quality and | |
| 511 // caching the interpolated values from those. For now, we don't have this. | |
| 512 if (filter_quality == kMedium_SkFilterQuality) | |
| 513 return false; | |
| 514 DCHECK(filter_quality == kHigh_SkFilterQuality); | |
| 515 return true; | |
| 516 } | |
| 517 | |
| 518 void ImageDecodeController::ReduceCacheUsage() { | |
| 519 TRACE_EVENT0("cc", "ImageDecodeController::ReduceCacheUsage"); | |
| 520 base::AutoLock lock(lock_); | |
| 521 size_t num_to_remove = (decoded_images_.size() > kMaxItemsInCache) | |
| 522 ? (decoded_images_.size() - kMaxItemsInCache) | |
| 523 : 0; | |
| 524 for (auto it = decoded_images_.begin(); | |
| 525 num_to_remove != 0 && it != decoded_images_.end();) { | |
| 526 if (it->second->is_locked()) { | |
| 527 ++it; | |
| 528 continue; | |
| 529 } | |
| 530 | |
| 531 it = decoded_images_.erase(it); | |
| 532 --num_to_remove; | |
| 533 } | |
| 534 } | |
| 535 | |
| 536 void ImageDecodeController::RemovePendingTask(const ImageKey& key) { | |
| 537 base::AutoLock lock(lock_); | |
| 538 pending_image_tasks_.erase(key); | |
| 539 } | |
| 540 | |
| 541 void ImageDecodeController::SetIsUsingGpuRasterization( | |
| 542 bool is_using_gpu_rasterization) { | |
| 543 if (is_using_gpu_rasterization_ == is_using_gpu_rasterization) | |
| 105 return; | 544 return; |
| 106 | 545 is_using_gpu_rasterization_ = is_using_gpu_rasterization; |
| 107 // Otherwise, we have to clean up the task so that a new one can be created if | 546 |
| 108 // we need to decode the image again. | 547 base::AutoLock lock(lock_); |
| 109 LayerImageTaskMap::iterator layer_it = image_decode_tasks_.find(layer_id); | 548 |
| 110 if (layer_it == image_decode_tasks_.end()) | 549 DCHECK_EQ(0u, decoded_images_ref_counts_.size()); |
|
enne (OOO)
2015/12/02 23:33:25
This is a lot of surprising checks in a setter fun
vmpstr
2015/12/03 21:20:23
Done.
| |
| 111 return; | 550 DCHECK_EQ(0u, at_raster_decoded_images_ref_counts_.size()); |
| 112 | 551 DCHECK(std::find_if(decoded_images_.begin(), decoded_images_.end(), |
| 113 ImageTaskMap& image_tasks = layer_it->second; | 552 [](const AnnotatedDecodedImage& image) { |
| 114 ImageTaskMap::iterator task_it = image_tasks.find(image->uniqueID()); | 553 return image.second->is_locked(); |
| 115 if (task_it == image_tasks.end()) | 554 }) == decoded_images_.end()); |
| 116 return; | 555 DCHECK(std::find_if(at_raster_decoded_images_.begin(), |
| 117 image_tasks.erase(task_it); | 556 at_raster_decoded_images_.end(), |
| 557 [](const AnnotatedDecodedImage& image) { | |
| 558 return image.second->is_locked(); | |
| 559 }) == at_raster_decoded_images_.end()); | |
| 560 decoded_images_.clear(); | |
| 561 at_raster_decoded_images_.clear(); | |
| 562 } | |
| 563 | |
| 564 size_t ImageDecodeController::SanityCheckState(int line, bool lock_acquired) { | |
| 565 #if DCHECK_IS_ON() | |
| 566 if (!lock_acquired) { | |
| 567 base::AutoLock lock(lock_); | |
| 568 return SanityCheckState(line, true); | |
| 569 } | |
| 570 | |
| 571 MemoryBudget budget(kLockedMemoryLimitBytes); | |
| 572 for (const auto& annotated_image : decoded_images_) { | |
| 573 auto ref_it = decoded_images_ref_counts_.find(annotated_image.first); | |
| 574 if (annotated_image.second->is_locked()) { | |
| 575 budget.AddUsage(annotated_image.first.target_bytes()); | |
| 576 DCHECK(ref_it != decoded_images_ref_counts_.end()) << line; | |
| 577 } else { | |
| 578 DCHECK(ref_it == decoded_images_ref_counts_.end() || | |
| 579 pending_image_tasks_.find(annotated_image.first) != | |
| 580 pending_image_tasks_.end()) | |
| 581 << line; | |
| 582 } | |
| 583 } | |
| 584 DCHECK_GE(budget.AvailableMemoryBytes(), | |
| 585 locked_images_budget_.AvailableMemoryBytes()) | |
| 586 << line; | |
| 587 return budget.AvailableMemoryBytes(); | |
| 588 #else | |
| 589 return 0u; | |
| 590 #endif // DCHECK_IS_ON() | |
| 591 } | |
| 592 | |
| 593 static bool ScaleIsNearlyIdentity(const SkSize& scale) { | |
| 594 return std::abs(scale.width() - 1.f) < FLT_EPSILON && | |
| 595 std::abs(scale.height() - 1.f) < FLT_EPSILON; | |
| 596 } | |
| 597 | |
| 598 // ImageDecodeControllerKey | |
| 599 ImageDecodeControllerKey ImageDecodeControllerKey::FromDrawImage( | |
| 600 const DrawImage& image) { | |
| 601 SkSize scale = image.scale(); | |
| 602 SkFilterQuality quality = image.filter_quality(); | |
| 603 if (ScaleIsNearlyIdentity(scale)) { | |
| 604 scale.set(1.f, 1.f); | |
| 605 quality = kLow_SkFilterQuality; | |
| 606 } | |
| 607 | |
| 608 gfx::Size target_size( | |
| 609 std::abs(SkScalarRoundToInt(image.image()->width() * scale.width())), | |
| 610 std::abs(SkScalarRoundToInt(image.image()->height() * scale.height()))); | |
| 611 | |
| 612 if (quality == kHigh_SkFilterQuality) { | |
| 613 if (image.matrix_has_perspective() || !image.matrix_is_decomposable()) { | |
| 614 quality = kMedium_SkFilterQuality; | |
| 615 } else { | |
| 616 base::CheckedNumeric<size_t> size = 4; | |
| 617 size *= target_size.width(); | |
| 618 size *= target_size.height(); | |
| 619 if (size.ValueOrDefault(std::numeric_limits<size_t>::max()) > | |
| 620 kMaxHighQualityImageSizeBytes) { | |
| 621 quality = kMedium_SkFilterQuality; | |
| 622 } | |
| 623 } | |
| 624 } | |
| 625 | |
| 626 if (quality == kMedium_SkFilterQuality) { | |
| 627 if (!image.matrix_is_decomposable() || | |
| 628 (scale.width() >= 1.f && scale.height() >= 1.f)) { | |
| 629 quality = kLow_SkFilterQuality; | |
| 630 } | |
| 631 } | |
| 632 | |
| 633 return ImageDecodeControllerKey(image.image()->uniqueID(), target_size, | |
| 634 quality); | |
| 635 } | |
| 636 | |
| 637 ImageDecodeControllerKey::ImageDecodeControllerKey( | |
| 638 uint32_t image_id, | |
| 639 const gfx::Size& size, | |
| 640 SkFilterQuality filter_quality) | |
| 641 : image_id_(image_id), size_(size), filter_quality_(filter_quality) {} | |
| 642 | |
| 643 std::string ImageDecodeControllerKey::ToString() const { | |
| 644 std::ostringstream str; | |
| 645 str << "id[" << image_id_ << "] size[" << size_.width() << "x" | |
| 646 << size_.height() << "] filter_quality[" << filter_quality_ << "]"; | |
| 647 return str.str(); | |
| 648 } | |
| 649 | |
| 650 // DecodedImage | |
| 651 ImageDecodeController::DecodedImage::DecodedImage( | |
| 652 const SkImageInfo& info, | |
| 653 scoped_ptr<base::DiscardableMemory> memory) | |
| 654 : locked_(true), image_info_(info), memory_(memory.Pass()) { | |
| 655 image_ = skia::AdoptRef(SkImage::NewFromRaster( | |
| 656 image_info_, memory_->data(), image_info_.minRowBytes(), | |
| 657 [](const void* pixels, void* context) {}, nullptr)); | |
| 658 } | |
| 659 | |
| 660 ImageDecodeController::DecodedImage::~DecodedImage() {} | |
| 661 | |
| 662 bool ImageDecodeController::DecodedImage::Lock() { | |
| 663 DCHECK(!locked_); | |
| 664 bool success = memory_->Lock(); | |
| 665 if (!success) | |
| 666 return false; | |
| 667 locked_ = true; | |
| 668 return true; | |
| 669 } | |
| 670 | |
| 671 void ImageDecodeController::DecodedImage::Unlock() { | |
| 672 DCHECK(locked_); | |
| 673 memory_->Unlock(); | |
| 674 locked_ = false; | |
| 675 } | |
| 676 | |
| 677 // MemoryBudget | |
| 678 ImageDecodeController::MemoryBudget::MemoryBudget(size_t limit_bytes) | |
| 679 : limit_bytes_(limit_bytes), current_usage_bytes_(0u) {} | |
| 680 | |
| 681 size_t ImageDecodeController::MemoryBudget::AvailableMemoryBytes() const { | |
| 682 size_t usage = GetCurrentUsageSafe(); | |
| 683 return usage >= limit_bytes_ ? 0u : (limit_bytes_ - usage); | |
| 684 } | |
| 685 | |
| 686 void ImageDecodeController::MemoryBudget::AddUsage(size_t usage) { | |
| 687 current_usage_bytes_ += usage; | |
| 688 } | |
| 689 | |
| 690 void ImageDecodeController::MemoryBudget::SubtractUsage(size_t usage) { | |
| 691 DCHECK_GE(current_usage_bytes_.ValueOrDefault(0u), usage); | |
| 692 current_usage_bytes_ -= usage; | |
| 693 } | |
| 694 | |
| 695 void ImageDecodeController::MemoryBudget::ResetUsage() { | |
| 696 current_usage_bytes_ = 0; | |
| 697 } | |
| 698 | |
| 699 size_t ImageDecodeController::MemoryBudget::GetCurrentUsageSafe() const { | |
| 700 return current_usage_bytes_.ValueOrDie(); | |
| 118 } | 701 } |
| 119 | 702 |
| 120 } // namespace cc | 703 } // namespace cc |
| OLD | NEW |