OLD | NEW |
---|---|
1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "cc/tiles/gpu_image_decode_cache.h" | 5 #include "cc/tiles/gpu_image_decode_cache.h" |
6 | 6 |
7 #include <inttypes.h> | 7 #include <inttypes.h> |
8 | 8 |
9 #include "base/auto_reset.h" | 9 #include "base/auto_reset.h" |
10 #include "base/debug/alias.h" | 10 #include "base/debug/alias.h" |
(...skipping 324 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
335 DCHECK_EQ(0u, upload.ref_count); | 335 DCHECK_EQ(0u, upload.ref_count); |
336 DCHECK_EQ(0u, decode.ref_count); | 336 DCHECK_EQ(0u, decode.ref_count); |
337 DCHECK_EQ(false, decode.is_locked()); | 337 DCHECK_EQ(false, decode.is_locked()); |
338 // This should always be cleaned up before deleting the image, as it needs to | 338 // This should always be cleaned up before deleting the image, as it needs to |
339 // be freed with the GL context lock held. | 339 // be freed with the GL context lock held. |
340 DCHECK(!upload.image()); | 340 DCHECK(!upload.image()); |
341 } | 341 } |
342 | 342 |
343 GpuImageDecodeCache::GpuImageDecodeCache(ContextProvider* context, | 343 GpuImageDecodeCache::GpuImageDecodeCache(ContextProvider* context, |
344 ResourceFormat decode_format, | 344 ResourceFormat decode_format, |
345 size_t max_gpu_image_bytes) | 345 size_t max_working_set_bytes, |
346 size_t max_cache_bytes) | |
346 : format_(decode_format), | 347 : format_(decode_format), |
347 context_(context), | 348 context_(context), |
348 persistent_cache_(PersistentCache::NO_AUTO_EVICT), | 349 persistent_cache_(PersistentCache::NO_AUTO_EVICT), |
349 normal_max_gpu_image_bytes_(max_gpu_image_bytes) { | 350 max_working_set_bytes_(max_working_set_bytes), |
351 normal_max_cache_bytes_(max_cache_bytes) { | |
352 DCHECK_GT(max_working_set_bytes_, normal_max_cache_bytes_); | |
353 | |
350 // Acquire the context_lock so that we can safely retrieve the | 354 // Acquire the context_lock so that we can safely retrieve the |
351 // GrContextThreadSafeProxy. This proxy can then be used with no lock held. | 355 // GrContextThreadSafeProxy. This proxy can then be used with no lock held. |
352 { | 356 { |
353 ContextProvider::ScopedContextLock context_lock(context_); | 357 ContextProvider::ScopedContextLock context_lock(context_); |
354 context_threadsafe_proxy_ = sk_sp<GrContextThreadSafeProxy>( | 358 context_threadsafe_proxy_ = sk_sp<GrContextThreadSafeProxy>( |
355 context->GrContext()->threadSafeProxy()); | 359 context->GrContext()->threadSafeProxy()); |
356 } | 360 } |
357 | 361 |
358 // In certain cases, ThreadTaskRunnerHandle isn't set (Android Webview). | 362 // In certain cases, ThreadTaskRunnerHandle isn't set (Android Webview). |
359 // Don't register a dump provider in these cases. | 363 // Don't register a dump provider in these cases. |
(...skipping 209 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
569 // We want to keep as little in our cache as possible. Set our memory limit | 573 // We want to keep as little in our cache as possible. Set our memory limit |
570 // to zero and EnsureCapacity to clean up memory. | 574 // to zero and EnsureCapacity to clean up memory. |
571 cached_bytes_limit_ = kSuspendedOrInvisibleMaxGpuImageBytes; | 575 cached_bytes_limit_ = kSuspendedOrInvisibleMaxGpuImageBytes; |
572 EnsureCapacity(0); | 576 EnsureCapacity(0); |
573 | 577 |
574 // We are holding the context lock, so finish cleaning up deleted images | 578 // We are holding the context lock, so finish cleaning up deleted images |
575 // now. | 579 // now. |
576 DeletePendingImages(); | 580 DeletePendingImages(); |
577 } else { | 581 } else { |
578 base::AutoLock lock(lock_); | 582 base::AutoLock lock(lock_); |
579 cached_bytes_limit_ = normal_max_gpu_image_bytes_; | 583 cached_bytes_limit_ = normal_max_cache_bytes_; |
580 } | 584 } |
581 } | 585 } |
582 | 586 |
583 bool GpuImageDecodeCache::OnMemoryDump( | 587 bool GpuImageDecodeCache::OnMemoryDump( |
584 const base::trace_event::MemoryDumpArgs& args, | 588 const base::trace_event::MemoryDumpArgs& args, |
585 base::trace_event::ProcessMemoryDump* pmd) { | 589 base::trace_event::ProcessMemoryDump* pmd) { |
586 using base::trace_event::MemoryAllocatorDump; | 590 using base::trace_event::MemoryAllocatorDump; |
587 using base::trace_event::MemoryAllocatorDumpGuid; | 591 using base::trace_event::MemoryAllocatorDumpGuid; |
588 using base::trace_event::MemoryDumpLevelOfDetail; | 592 using base::trace_event::MemoryDumpLevelOfDetail; |
589 | 593 |
(...skipping 265 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
855 // re-locking discardable (rather than requiring a full upload like GPU | 859 // re-locking discardable (rather than requiring a full upload like GPU |
856 // images). | 860 // images). |
857 if (image_data->mode == DecodedDataMode::CPU && !has_any_refs) { | 861 if (image_data->mode == DecodedDataMode::CPU && !has_any_refs) { |
858 images_pending_deletion_.push_back(image_data->upload.image()); | 862 images_pending_deletion_.push_back(image_data->upload.image()); |
859 image_data->upload.SetImage(nullptr); | 863 image_data->upload.SetImage(nullptr); |
860 } | 864 } |
861 | 865 |
862 if (image_data->is_at_raster && !has_any_refs) { | 866 if (image_data->is_at_raster && !has_any_refs) { |
863 // We have an at-raster image which has reached zero refs. If it won't fit | 867 // We have an at-raster image which has reached zero refs. If it won't fit |
864 // in our cache, delete the image to allow it to fit. | 868 // in our cache, delete the image to allow it to fit. |
865 if (image_data->upload.image() && !CanFitSize(image_data->size)) { | 869 if (image_data->upload.image() && !CanFitInCache(image_data->size)) { |
866 images_pending_deletion_.push_back(image_data->upload.image()); | 870 images_pending_deletion_.push_back(image_data->upload.image()); |
867 image_data->upload.SetImage(nullptr); | 871 image_data->upload.SetImage(nullptr); |
868 } | 872 } |
869 | 873 |
870 // We now have an at-raster image which will fit in our cache. Convert it | 874 // We now have an at-raster image which will fit in our cache. Convert it |
871 // to not-at-raster. | 875 // to not-at-raster. |
872 image_data->is_at_raster = false; | 876 image_data->is_at_raster = false; |
873 if (image_data->upload.image()) { | 877 if (image_data->upload.image()) { |
874 bytes_used_ += image_data->size; | 878 bytes_used_ += image_data->size; |
875 image_data->upload.budgeted = true; | 879 image_data->upload.budgeted = true; |
876 } | 880 } |
877 } | 881 } |
878 | 882 |
879 // If we have image refs on a non-at-raster image, it must be budgeted, as it | 883 // If we have image refs on a non-at-raster image, it must be budgeted, as it |
880 // is either uploaded or pending upload. | 884 // is either uploaded or pending upload. |
881 if (image_data->upload.ref_count > 0 && !image_data->upload.budgeted && | 885 if (image_data->upload.ref_count > 0 && !image_data->upload.budgeted && |
882 !image_data->is_at_raster) { | 886 !image_data->is_at_raster) { |
883 // We should only be taking non-at-raster refs on images that fit in cache. | 887 // We should only be taking non-at-raster refs on images that fit in cache. |
884 DCHECK(CanFitSize(image_data->size)); | 888 DCHECK(CanFitInWorkingSet(image_data->size)); |
885 | 889 |
886 bytes_used_ += image_data->size; | 890 bytes_used_ += image_data->size; |
887 image_data->upload.budgeted = true; | 891 image_data->upload.budgeted = true; |
888 } | 892 } |
889 | 893 |
890 // If we have no image refs on an image, it should only be budgeted if it has | 894 // If we have no image refs on an image, it should only be budgeted if it has |
891 // an uploaded image. If no image exists (upload was cancelled), we should | 895 // an uploaded image. If no image exists (upload was cancelled), we should |
892 // un-budget the image. | 896 // un-budget the image. |
893 if (image_data->upload.ref_count == 0 && image_data->upload.budgeted && | 897 if (image_data->upload.ref_count == 0 && image_data->upload.budgeted && |
894 !image_data->upload.image()) { | 898 !image_data->upload.image()) { |
895 DCHECK_GE(bytes_used_, image_data->size); | 899 DCHECK_GE(bytes_used_, image_data->size); |
896 bytes_used_ -= image_data->size; | 900 bytes_used_ -= image_data->size; |
897 image_data->upload.budgeted = false; | 901 image_data->upload.budgeted = false; |
898 } | 902 } |
899 | 903 |
900 // We should unlock the discardable memory for the image in two cases: | 904 // We should unlock the discardable memory for the image in two cases: |
901 // 1) The image is no longer being used (no decode or upload refs). | 905 // 1) The image is no longer being used (no decode or upload refs). |
902 // 2) This is a GPU backed image that has already been uploaded (no decode | 906 // 2) This is a GPU backed image that has already been uploaded (no decode |
903 // refs, and we actually already have an image). | 907 // refs, and we actually already have an image). |
904 bool should_unlock_discardable = | 908 bool should_unlock_discardable = |
905 !has_any_refs || | 909 !has_any_refs || |
906 (image_data->mode == DecodedDataMode::GPU && | 910 (image_data->mode == DecodedDataMode::GPU && |
907 !image_data->decode.ref_count && image_data->upload.image()); | 911 !image_data->decode.ref_count && image_data->upload.image()); |
908 | 912 |
909 if (should_unlock_discardable && image_data->decode.is_locked()) { | 913 if (should_unlock_discardable && image_data->decode.is_locked()) { |
910 DCHECK(image_data->decode.data()); | 914 DCHECK(image_data->decode.data()); |
911 image_data->decode.Unlock(); | 915 image_data->decode.Unlock(); |
912 } | 916 } |
913 | 917 |
918 // EnsureCapacity to make sure we are under our cache limits. | |
919 EnsureCapacity(0); | |
920 | |
914 #if DCHECK_IS_ON() | 921 #if DCHECK_IS_ON() |
915 // Sanity check the above logic. | 922 // Sanity check the above logic. |
916 if (image_data->upload.image()) { | 923 if (image_data->upload.image()) { |
917 DCHECK(image_data->is_at_raster || image_data->upload.budgeted); | 924 DCHECK(image_data->is_at_raster || image_data->upload.budgeted); |
918 if (image_data->mode == DecodedDataMode::CPU) | 925 if (image_data->mode == DecodedDataMode::CPU) |
919 DCHECK(image_data->decode.is_locked()); | 926 DCHECK(image_data->decode.is_locked()); |
920 } else { | 927 } else { |
921 DCHECK(!image_data->upload.budgeted || image_data->upload.ref_count > 0); | 928 DCHECK(!image_data->upload.budgeted || image_data->upload.ref_count > 0); |
922 } | 929 } |
923 #endif | 930 #endif |
924 } | 931 } |
925 | 932 |
926 // Ensures that we can fit a new image of size |required_size| in our cache. In | 933 // Ensures that we can fit a new image of size |required_size| in our cache. In |
vmpstr
2017/03/28 18:41:31
in our working set?
ericrk
2017/03/28 20:10:10
Done.
| |
927 // doing so, this function will free unreferenced image data as necessary to | 934 // doing so, this function will free unreferenced image data as necessary to |
928 // create rooom. | 935 // create rooom. |
929 bool GpuImageDecodeCache::EnsureCapacity(size_t required_size) { | 936 bool GpuImageDecodeCache::EnsureCapacity(size_t required_size) { |
930 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"), | 937 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"), |
931 "GpuImageDecodeCache::EnsureCapacity"); | 938 "GpuImageDecodeCache::EnsureCapacity"); |
932 lock_.AssertAcquired(); | 939 lock_.AssertAcquired(); |
933 | 940 |
934 if (CanFitSize(required_size) && !ExceedsPreferredCount()) | 941 if (CanFitInCache(required_size) && !ExceedsPreferredCount()) |
vmpstr
2017/03/28 18:41:30
This could use a comment about why you're checking
ericrk
2017/03/28 20:10:10
Done.
| |
935 return true; | 942 return true; |
936 | 943 |
937 // While we are over memory or preferred item capacity, we iterate through | 944 // While we are over memory or preferred item capacity, we iterate through |
938 // our set of cached image data in LRU order. For each image, we can do two | 945 // our set of cached image data in LRU order. For each image, we can do two |
939 // things: 1) We can free the uploaded image, reducing the memory usage of | 946 // things: 1) We can free the uploaded image, reducing the memory usage of |
940 // the cache and 2) we can remove the entry entirely, reducing the count of | 947 // the cache and 2) we can remove the entry entirely, reducing the count of |
941 // elements in the cache. | 948 // elements in the cache. |
942 for (auto it = persistent_cache_.rbegin(); it != persistent_cache_.rend();) { | 949 for (auto it = persistent_cache_.rbegin(); it != persistent_cache_.rend();) { |
943 if (it->second->decode.ref_count != 0 || | 950 if (it->second->decode.ref_count != 0 || |
944 it->second->upload.ref_count != 0) { | 951 it->second->upload.ref_count != 0) { |
(...skipping 18 matching lines...) Expand all Loading... | |
963 it->second->upload.budgeted = false; | 970 it->second->upload.budgeted = false; |
964 } | 971 } |
965 | 972 |
966 // Free the entire entry if necessary. | 973 // Free the entire entry if necessary. |
967 if (ExceedsPreferredCount()) { | 974 if (ExceedsPreferredCount()) { |
968 it = persistent_cache_.Erase(it); | 975 it = persistent_cache_.Erase(it); |
969 } else { | 976 } else { |
970 ++it; | 977 ++it; |
971 } | 978 } |
972 | 979 |
973 if (CanFitSize(required_size) && !ExceedsPreferredCount()) | 980 if (CanFitInCache(required_size) && !ExceedsPreferredCount()) |
974 return true; | 981 return true; |
975 } | 982 } |
976 | 983 |
977 // Preferred count is only used as a guideline when triming the cache. Allow | 984 // While we always try to bring the cache size within bounds above, we only |
vmpstr
2017/03/28 18:41:31
This comment can probably migrate to the top of th
ericrk
2017/03/28 20:10:10
Done.
| |
978 // new elements to be added as long as we are below our size limit. | 985 // really care about whether the new element can fit in our working set. |
979 return CanFitSize(required_size); | 986 return CanFitInWorkingSet(required_size); |
980 } | 987 } |
981 | 988 |
982 bool GpuImageDecodeCache::CanFitSize(size_t size) const { | 989 bool GpuImageDecodeCache::CanFitInCache(size_t size) const { |
983 lock_.AssertAcquired(); | 990 lock_.AssertAcquired(); |
984 | 991 |
985 size_t bytes_limit; | 992 size_t bytes_limit; |
986 if (memory_state_ == base::MemoryState::NORMAL) { | 993 if (memory_state_ == base::MemoryState::NORMAL) { |
987 bytes_limit = cached_bytes_limit_; | 994 bytes_limit = cached_bytes_limit_; |
988 } else if (memory_state_ == base::MemoryState::THROTTLED) { | 995 } else if (memory_state_ == base::MemoryState::THROTTLED) { |
989 bytes_limit = cached_bytes_limit_ / kThrottledCacheSizeReductionFactor; | 996 bytes_limit = cached_bytes_limit_ / kThrottledCacheSizeReductionFactor; |
990 } else { | 997 } else { |
991 DCHECK_EQ(base::MemoryState::SUSPENDED, memory_state_); | 998 DCHECK_EQ(base::MemoryState::SUSPENDED, memory_state_); |
992 bytes_limit = kSuspendedOrInvisibleMaxGpuImageBytes; | 999 bytes_limit = kSuspendedOrInvisibleMaxGpuImageBytes; |
993 } | 1000 } |
994 | 1001 |
995 base::CheckedNumeric<uint32_t> new_size(bytes_used_); | 1002 base::CheckedNumeric<uint32_t> new_size(bytes_used_); |
996 new_size += size; | 1003 new_size += size; |
997 return new_size.IsValid() && new_size.ValueOrDie() <= bytes_limit; | 1004 return new_size.IsValid() && new_size.ValueOrDie() <= bytes_limit; |
998 } | 1005 } |
999 | 1006 |
1007 bool GpuImageDecodeCache::CanFitInWorkingSet(size_t size) const { | |
1008 lock_.AssertAcquired(); | |
1009 | |
1010 base::CheckedNumeric<uint32_t> new_size(bytes_used_); | |
1011 new_size += size; | |
1012 return new_size.IsValid() && new_size.ValueOrDie() <= max_working_set_bytes_; | |
1013 } | |
1014 | |
1000 bool GpuImageDecodeCache::ExceedsPreferredCount() const { | 1015 bool GpuImageDecodeCache::ExceedsPreferredCount() const { |
1001 lock_.AssertAcquired(); | 1016 lock_.AssertAcquired(); |
1002 | 1017 |
1003 size_t items_limit; | 1018 size_t items_limit; |
1004 if (memory_state_ == base::MemoryState::NORMAL) { | 1019 if (memory_state_ == base::MemoryState::NORMAL) { |
1005 items_limit = kNormalMaxItemsInCache; | 1020 items_limit = kNormalMaxItemsInCache; |
1006 } else if (memory_state_ == base::MemoryState::THROTTLED) { | 1021 } else if (memory_state_ == base::MemoryState::THROTTLED) { |
1007 items_limit = kThrottledMaxItemsInCache; | 1022 items_limit = kThrottledMaxItemsInCache; |
1008 } else { | 1023 } else { |
1009 DCHECK_EQ(base::MemoryState::SUSPENDED, memory_state_); | 1024 DCHECK_EQ(base::MemoryState::SUSPENDED, memory_state_); |
(...skipping 262 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1272 | 1287 |
1273 void GpuImageDecodeCache::OnPurgeMemory() { | 1288 void GpuImageDecodeCache::OnPurgeMemory() { |
1274 base::AutoLock lock(lock_); | 1289 base::AutoLock lock(lock_); |
1275 // Temporary changes |memory_state_| to free up cache as much as possible. | 1290 // Temporary changes |memory_state_| to free up cache as much as possible. |
1276 base::AutoReset<base::MemoryState> reset(&memory_state_, | 1291 base::AutoReset<base::MemoryState> reset(&memory_state_, |
1277 base::MemoryState::SUSPENDED); | 1292 base::MemoryState::SUSPENDED); |
1278 EnsureCapacity(0); | 1293 EnsureCapacity(0); |
1279 } | 1294 } |
1280 | 1295 |
1281 } // namespace cc | 1296 } // namespace cc |
OLD | NEW |