| OLD | NEW |
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "cc/tiles/gpu_image_decode_cache.h" | 5 #include "cc/tiles/gpu_image_decode_cache.h" |
| 6 | 6 |
| 7 #include <inttypes.h> | 7 #include <inttypes.h> |
| 8 | 8 |
| 9 #include "base/auto_reset.h" | 9 #include "base/auto_reset.h" |
| 10 #include "base/debug/alias.h" | 10 #include "base/debug/alias.h" |
| (...skipping 324 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 335 DCHECK_EQ(0u, upload.ref_count); | 335 DCHECK_EQ(0u, upload.ref_count); |
| 336 DCHECK_EQ(0u, decode.ref_count); | 336 DCHECK_EQ(0u, decode.ref_count); |
| 337 DCHECK_EQ(false, decode.is_locked()); | 337 DCHECK_EQ(false, decode.is_locked()); |
| 338 // This should always be cleaned up before deleting the image, as it needs to | 338 // This should always be cleaned up before deleting the image, as it needs to |
| 339 // be freed with the GL context lock held. | 339 // be freed with the GL context lock held. |
| 340 DCHECK(!upload.image()); | 340 DCHECK(!upload.image()); |
| 341 } | 341 } |
| 342 | 342 |
| 343 GpuImageDecodeCache::GpuImageDecodeCache(ContextProvider* context, | 343 GpuImageDecodeCache::GpuImageDecodeCache(ContextProvider* context, |
| 344 ResourceFormat decode_format, | 344 ResourceFormat decode_format, |
| 345 size_t max_gpu_image_bytes) | 345 size_t max_working_set_bytes, |
| 346 size_t max_cache_bytes) |
| 346 : format_(decode_format), | 347 : format_(decode_format), |
| 347 context_(context), | 348 context_(context), |
| 348 persistent_cache_(PersistentCache::NO_AUTO_EVICT), | 349 persistent_cache_(PersistentCache::NO_AUTO_EVICT), |
| 349 normal_max_gpu_image_bytes_(max_gpu_image_bytes) { | 350 max_working_set_bytes_(max_working_set_bytes), |
| 351 normal_max_cache_bytes_(max_cache_bytes) { |
| 352 DCHECK_GE(max_working_set_bytes_, normal_max_cache_bytes_); |
| 353 |
| 350 // Acquire the context_lock so that we can safely retrieve the | 354 // Acquire the context_lock so that we can safely retrieve the |
| 351 // GrContextThreadSafeProxy. This proxy can then be used with no lock held. | 355 // GrContextThreadSafeProxy. This proxy can then be used with no lock held. |
| 352 { | 356 { |
| 353 ContextProvider::ScopedContextLock context_lock(context_); | 357 ContextProvider::ScopedContextLock context_lock(context_); |
| 354 context_threadsafe_proxy_ = sk_sp<GrContextThreadSafeProxy>( | 358 context_threadsafe_proxy_ = sk_sp<GrContextThreadSafeProxy>( |
| 355 context->GrContext()->threadSafeProxy()); | 359 context->GrContext()->threadSafeProxy()); |
| 356 } | 360 } |
| 357 | 361 |
| 358 // In certain cases, ThreadTaskRunnerHandle isn't set (Android Webview). | 362 // In certain cases, ThreadTaskRunnerHandle isn't set (Android Webview). |
| 359 // Don't register a dump provider in these cases. | 363 // Don't register a dump provider in these cases. |
| (...skipping 209 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 569 // We want to keep as little in our cache as possible. Set our memory limit | 573 // We want to keep as little in our cache as possible. Set our memory limit |
| 570 // to zero and EnsureCapacity to clean up memory. | 574 // to zero and EnsureCapacity to clean up memory. |
| 571 cached_bytes_limit_ = kSuspendedOrInvisibleMaxGpuImageBytes; | 575 cached_bytes_limit_ = kSuspendedOrInvisibleMaxGpuImageBytes; |
| 572 EnsureCapacity(0); | 576 EnsureCapacity(0); |
| 573 | 577 |
| 574 // We are holding the context lock, so finish cleaning up deleted images | 578 // We are holding the context lock, so finish cleaning up deleted images |
| 575 // now. | 579 // now. |
| 576 DeletePendingImages(); | 580 DeletePendingImages(); |
| 577 } else { | 581 } else { |
| 578 base::AutoLock lock(lock_); | 582 base::AutoLock lock(lock_); |
| 579 cached_bytes_limit_ = normal_max_gpu_image_bytes_; | 583 cached_bytes_limit_ = normal_max_cache_bytes_; |
| 580 } | 584 } |
| 581 } | 585 } |
| 582 | 586 |
| 583 bool GpuImageDecodeCache::OnMemoryDump( | 587 bool GpuImageDecodeCache::OnMemoryDump( |
| 584 const base::trace_event::MemoryDumpArgs& args, | 588 const base::trace_event::MemoryDumpArgs& args, |
| 585 base::trace_event::ProcessMemoryDump* pmd) { | 589 base::trace_event::ProcessMemoryDump* pmd) { |
| 586 using base::trace_event::MemoryAllocatorDump; | 590 using base::trace_event::MemoryAllocatorDump; |
| 587 using base::trace_event::MemoryAllocatorDumpGuid; | 591 using base::trace_event::MemoryAllocatorDumpGuid; |
| 588 using base::trace_event::MemoryDumpLevelOfDetail; | 592 using base::trace_event::MemoryDumpLevelOfDetail; |
| 589 | 593 |
| (...skipping 265 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 855 // re-locking discardable (rather than requiring a full upload like GPU | 859 // re-locking discardable (rather than requiring a full upload like GPU |
| 856 // images). | 860 // images). |
| 857 if (image_data->mode == DecodedDataMode::CPU && !has_any_refs) { | 861 if (image_data->mode == DecodedDataMode::CPU && !has_any_refs) { |
| 858 images_pending_deletion_.push_back(image_data->upload.image()); | 862 images_pending_deletion_.push_back(image_data->upload.image()); |
| 859 image_data->upload.SetImage(nullptr); | 863 image_data->upload.SetImage(nullptr); |
| 860 } | 864 } |
| 861 | 865 |
| 862 if (image_data->is_at_raster && !has_any_refs) { | 866 if (image_data->is_at_raster && !has_any_refs) { |
| 863 // We have an at-raster image which has reached zero refs. If it won't fit | 867 // We have an at-raster image which has reached zero refs. If it won't fit |
| 864 // in our cache, delete the image to allow it to fit. | 868 // in our cache, delete the image to allow it to fit. |
| 865 if (image_data->upload.image() && !CanFitSize(image_data->size)) { | 869 if (image_data->upload.image() && !CanFitInCache(image_data->size)) { |
| 866 images_pending_deletion_.push_back(image_data->upload.image()); | 870 images_pending_deletion_.push_back(image_data->upload.image()); |
| 867 image_data->upload.SetImage(nullptr); | 871 image_data->upload.SetImage(nullptr); |
| 868 } | 872 } |
| 869 | 873 |
| 870 // We now have an at-raster image which will fit in our cache. Convert it | 874 // We now have an at-raster image which will fit in our cache. Convert it |
| 871 // to not-at-raster. | 875 // to not-at-raster. |
| 872 image_data->is_at_raster = false; | 876 image_data->is_at_raster = false; |
| 873 if (image_data->upload.image()) { | 877 if (image_data->upload.image()) { |
| 874 bytes_used_ += image_data->size; | 878 bytes_used_ += image_data->size; |
| 875 image_data->upload.budgeted = true; | 879 image_data->upload.budgeted = true; |
| 876 } | 880 } |
| 877 } | 881 } |
| 878 | 882 |
| 879 // If we have image refs on a non-at-raster image, it must be budgeted, as it | 883 // If we have image refs on a non-at-raster image, it must be budgeted, as it |
| 880 // is either uploaded or pending upload. | 884 // is either uploaded or pending upload. |
| 881 if (image_data->upload.ref_count > 0 && !image_data->upload.budgeted && | 885 if (image_data->upload.ref_count > 0 && !image_data->upload.budgeted && |
| 882 !image_data->is_at_raster) { | 886 !image_data->is_at_raster) { |
| 883 // We should only be taking non-at-raster refs on images that fit in cache. | 887 // We should only be taking non-at-raster refs on images that fit in cache. |
| 884 DCHECK(CanFitSize(image_data->size)); | 888 DCHECK(CanFitInWorkingSet(image_data->size)); |
| 885 | 889 |
| 886 bytes_used_ += image_data->size; | 890 bytes_used_ += image_data->size; |
| 887 image_data->upload.budgeted = true; | 891 image_data->upload.budgeted = true; |
| 888 } | 892 } |
| 889 | 893 |
| 890 // If we have no image refs on an image, it should only be budgeted if it has | 894 // If we have no image refs on an image, it should only be budgeted if it has |
| 891 // an uploaded image. If no image exists (upload was cancelled), we should | 895 // an uploaded image. If no image exists (upload was cancelled), we should |
| 892 // un-budget the image. | 896 // un-budget the image. |
| 893 if (image_data->upload.ref_count == 0 && image_data->upload.budgeted && | 897 if (image_data->upload.ref_count == 0 && image_data->upload.budgeted && |
| 894 !image_data->upload.image()) { | 898 !image_data->upload.image()) { |
| 895 DCHECK_GE(bytes_used_, image_data->size); | 899 DCHECK_GE(bytes_used_, image_data->size); |
| 896 bytes_used_ -= image_data->size; | 900 bytes_used_ -= image_data->size; |
| 897 image_data->upload.budgeted = false; | 901 image_data->upload.budgeted = false; |
| 898 } | 902 } |
| 899 | 903 |
| 900 // We should unlock the discardable memory for the image in two cases: | 904 // We should unlock the discardable memory for the image in two cases: |
| 901 // 1) The image is no longer being used (no decode or upload refs). | 905 // 1) The image is no longer being used (no decode or upload refs). |
| 902 // 2) This is a GPU backed image that has already been uploaded (no decode | 906 // 2) This is a GPU backed image that has already been uploaded (no decode |
| 903 // refs, and we actually already have an image). | 907 // refs, and we actually already have an image). |
| 904 bool should_unlock_discardable = | 908 bool should_unlock_discardable = |
| 905 !has_any_refs || | 909 !has_any_refs || |
| 906 (image_data->mode == DecodedDataMode::GPU && | 910 (image_data->mode == DecodedDataMode::GPU && |
| 907 !image_data->decode.ref_count && image_data->upload.image()); | 911 !image_data->decode.ref_count && image_data->upload.image()); |
| 908 | 912 |
| 909 if (should_unlock_discardable && image_data->decode.is_locked()) { | 913 if (should_unlock_discardable && image_data->decode.is_locked()) { |
| 910 DCHECK(image_data->decode.data()); | 914 DCHECK(image_data->decode.data()); |
| 911 image_data->decode.Unlock(); | 915 image_data->decode.Unlock(); |
| 912 } | 916 } |
| 913 | 917 |
| 918 // EnsureCapacity to make sure we are under our cache limits. |
| 919 EnsureCapacity(0); |
| 920 |
| 914 #if DCHECK_IS_ON() | 921 #if DCHECK_IS_ON() |
| 915 // Sanity check the above logic. | 922 // Sanity check the above logic. |
| 916 if (image_data->upload.image()) { | 923 if (image_data->upload.image()) { |
| 917 DCHECK(image_data->is_at_raster || image_data->upload.budgeted); | 924 DCHECK(image_data->is_at_raster || image_data->upload.budgeted); |
| 918 if (image_data->mode == DecodedDataMode::CPU) | 925 if (image_data->mode == DecodedDataMode::CPU) |
| 919 DCHECK(image_data->decode.is_locked()); | 926 DCHECK(image_data->decode.is_locked()); |
| 920 } else { | 927 } else { |
| 921 DCHECK(!image_data->upload.budgeted || image_data->upload.ref_count > 0); | 928 DCHECK(!image_data->upload.budgeted || image_data->upload.ref_count > 0); |
| 922 } | 929 } |
| 923 #endif | 930 #endif |
| 924 } | 931 } |
| 925 | 932 |
| 926 // Ensures that we can fit a new image of size |required_size| in our cache. In | 933 // Ensures that we can fit a new image of size |required_size| in our working |
| 927 // doing so, this function will free unreferenced image data as necessary to | 934 // set. In doing so, this function will free unreferenced image data as |
| 928 // create rooom. | 935 // necessary to create rooom. |
| 929 bool GpuImageDecodeCache::EnsureCapacity(size_t required_size) { | 936 bool GpuImageDecodeCache::EnsureCapacity(size_t required_size) { |
| 930 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"), | 937 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"), |
| 931 "GpuImageDecodeCache::EnsureCapacity"); | 938 "GpuImageDecodeCache::EnsureCapacity"); |
| 932 lock_.AssertAcquired(); | 939 lock_.AssertAcquired(); |
| 933 | 940 |
| 934 if (CanFitSize(required_size) && !ExceedsPreferredCount()) | 941 // While we only care whether |required_size| fits in our working set, we |
| 942 // also want to keep our cache under-budget if possible. Working set size |
| 943 // will always match or exceed cache size, so keeping the cache under budget |
| 944 // may be impossible. |
| 945 if (CanFitInCache(required_size) && !ExceedsPreferredCount()) |
| 935 return true; | 946 return true; |
| 936 | 947 |
| 937 // While we are over memory or preferred item capacity, we iterate through | 948 // While we are over memory or preferred item capacity, we iterate through |
| 938 // our set of cached image data in LRU order. For each image, we can do two | 949 // our set of cached image data in LRU order. For each image, we can do two |
| 939 // things: 1) We can free the uploaded image, reducing the memory usage of | 950 // things: 1) We can free the uploaded image, reducing the memory usage of |
| 940 // the cache and 2) we can remove the entry entirely, reducing the count of | 951 // the cache and 2) we can remove the entry entirely, reducing the count of |
| 941 // elements in the cache. | 952 // elements in the cache. |
| 942 for (auto it = persistent_cache_.rbegin(); it != persistent_cache_.rend();) { | 953 for (auto it = persistent_cache_.rbegin(); it != persistent_cache_.rend();) { |
| 943 if (it->second->decode.ref_count != 0 || | 954 if (it->second->decode.ref_count != 0 || |
| 944 it->second->upload.ref_count != 0) { | 955 it->second->upload.ref_count != 0) { |
| (...skipping 18 matching lines...) Expand all Loading... |
| 963 it->second->upload.budgeted = false; | 974 it->second->upload.budgeted = false; |
| 964 } | 975 } |
| 965 | 976 |
| 966 // Free the entire entry if necessary. | 977 // Free the entire entry if necessary. |
| 967 if (ExceedsPreferredCount()) { | 978 if (ExceedsPreferredCount()) { |
| 968 it = persistent_cache_.Erase(it); | 979 it = persistent_cache_.Erase(it); |
| 969 } else { | 980 } else { |
| 970 ++it; | 981 ++it; |
| 971 } | 982 } |
| 972 | 983 |
| 973 if (CanFitSize(required_size) && !ExceedsPreferredCount()) | 984 if (CanFitInCache(required_size) && !ExceedsPreferredCount()) |
| 974 return true; | 985 return true; |
| 975 } | 986 } |
| 976 | 987 |
| 977 // Preferred count is only used as a guideline when triming the cache. Allow | 988 return CanFitInWorkingSet(required_size); |
| 978 // new elements to be added as long as we are below our size limit. | |
| 979 return CanFitSize(required_size); | |
| 980 } | 989 } |
| 981 | 990 |
| 982 bool GpuImageDecodeCache::CanFitSize(size_t size) const { | 991 bool GpuImageDecodeCache::CanFitInCache(size_t size) const { |
| 983 lock_.AssertAcquired(); | 992 lock_.AssertAcquired(); |
| 984 | 993 |
| 985 size_t bytes_limit; | 994 size_t bytes_limit; |
| 986 if (memory_state_ == base::MemoryState::NORMAL) { | 995 if (memory_state_ == base::MemoryState::NORMAL) { |
| 987 bytes_limit = cached_bytes_limit_; | 996 bytes_limit = cached_bytes_limit_; |
| 988 } else if (memory_state_ == base::MemoryState::THROTTLED) { | 997 } else if (memory_state_ == base::MemoryState::THROTTLED) { |
| 989 bytes_limit = cached_bytes_limit_ / kThrottledCacheSizeReductionFactor; | 998 bytes_limit = cached_bytes_limit_ / kThrottledCacheSizeReductionFactor; |
| 990 } else { | 999 } else { |
| 991 DCHECK_EQ(base::MemoryState::SUSPENDED, memory_state_); | 1000 DCHECK_EQ(base::MemoryState::SUSPENDED, memory_state_); |
| 992 bytes_limit = kSuspendedOrInvisibleMaxGpuImageBytes; | 1001 bytes_limit = kSuspendedOrInvisibleMaxGpuImageBytes; |
| 993 } | 1002 } |
| 994 | 1003 |
| 995 base::CheckedNumeric<uint32_t> new_size(bytes_used_); | 1004 base::CheckedNumeric<uint32_t> new_size(bytes_used_); |
| 996 new_size += size; | 1005 new_size += size; |
| 997 return new_size.IsValid() && new_size.ValueOrDie() <= bytes_limit; | 1006 return new_size.IsValid() && new_size.ValueOrDie() <= bytes_limit; |
| 998 } | 1007 } |
| 999 | 1008 |
| 1009 bool GpuImageDecodeCache::CanFitInWorkingSet(size_t size) const { |
| 1010 lock_.AssertAcquired(); |
| 1011 |
| 1012 base::CheckedNumeric<uint32_t> new_size(bytes_used_); |
| 1013 new_size += size; |
| 1014 return new_size.IsValid() && new_size.ValueOrDie() <= max_working_set_bytes_; |
| 1015 } |
| 1016 |
| 1000 bool GpuImageDecodeCache::ExceedsPreferredCount() const { | 1017 bool GpuImageDecodeCache::ExceedsPreferredCount() const { |
| 1001 lock_.AssertAcquired(); | 1018 lock_.AssertAcquired(); |
| 1002 | 1019 |
| 1003 size_t items_limit; | 1020 size_t items_limit; |
| 1004 if (memory_state_ == base::MemoryState::NORMAL) { | 1021 if (memory_state_ == base::MemoryState::NORMAL) { |
| 1005 items_limit = kNormalMaxItemsInCache; | 1022 items_limit = kNormalMaxItemsInCache; |
| 1006 } else if (memory_state_ == base::MemoryState::THROTTLED) { | 1023 } else if (memory_state_ == base::MemoryState::THROTTLED) { |
| 1007 items_limit = kThrottledMaxItemsInCache; | 1024 items_limit = kThrottledMaxItemsInCache; |
| 1008 } else { | 1025 } else { |
| 1009 DCHECK_EQ(base::MemoryState::SUSPENDED, memory_state_); | 1026 DCHECK_EQ(base::MemoryState::SUSPENDED, memory_state_); |
| (...skipping 262 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1272 | 1289 |
| 1273 void GpuImageDecodeCache::OnPurgeMemory() { | 1290 void GpuImageDecodeCache::OnPurgeMemory() { |
| 1274 base::AutoLock lock(lock_); | 1291 base::AutoLock lock(lock_); |
| 1275 // Temporary changes |memory_state_| to free up cache as much as possible. | 1292 // Temporary changes |memory_state_| to free up cache as much as possible. |
| 1276 base::AutoReset<base::MemoryState> reset(&memory_state_, | 1293 base::AutoReset<base::MemoryState> reset(&memory_state_, |
| 1277 base::MemoryState::SUSPENDED); | 1294 base::MemoryState::SUSPENDED); |
| 1278 EnsureCapacity(0); | 1295 EnsureCapacity(0); |
| 1279 } | 1296 } |
| 1280 | 1297 |
| 1281 } // namespace cc | 1298 } // namespace cc |
| OLD | NEW |