Index: cc/resources/resource_pool.cc |
diff --git a/cc/resources/resource_pool.cc b/cc/resources/resource_pool.cc |
index 5a6654e001ba687af6621f37933e8c70a496a9d8..3bc810826a92255fc1b0df4f74b51e7bf7b328aa 100644 |
--- a/cc/resources/resource_pool.cc |
+++ b/cc/resources/resource_pool.cc |
@@ -16,6 +16,11 @@ |
namespace cc { |
+namespace { |
reveman
2015/08/20 21:43:37
nit: move this blank line below l.19 instead
ericrk
2015/08/21 12:48:22
I *think* I did what you meant? Tried to match oth
|
+// Delay before a resource is considered expired. |
+const int kResourceExpirationDelayMs = 1000; |
+} // namespace |
reveman
2015/08/20 21:43:37
nit: blank line above this
ericrk
2015/08/21 12:48:22
Done.
|
+ |
void ResourcePool::PoolResource::OnMemoryDump( |
base::trace_event::ProcessMemoryDump* pmd, |
const ResourceProvider* resource_provider, |
@@ -46,18 +51,9 @@ void ResourcePool::PoolResource::OnMemoryDump( |
total_bytes); |
} |
} |
+ |
ResourcePool::ResourcePool(ResourceProvider* resource_provider) |
- : resource_provider_(resource_provider), |
- target_(0), |
- max_memory_usage_bytes_(0), |
- max_unused_memory_usage_bytes_(0), |
- max_resource_count_(0), |
- memory_usage_bytes_(0), |
- unused_memory_usage_bytes_(0), |
- resource_count_(0) { |
- base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider( |
- this, base::ThreadTaskRunnerHandle::Get()); |
-} |
+ : ResourcePool(resource_provider, 0) {} |
ResourcePool::ResourcePool(ResourceProvider* resource_provider, GLenum target) |
: resource_provider_(resource_provider), |
@@ -67,8 +63,13 @@ ResourcePool::ResourcePool(ResourceProvider* resource_provider, GLenum target) |
max_resource_count_(0), |
memory_usage_bytes_(0), |
unused_memory_usage_bytes_(0), |
- resource_count_(0) { |
- DCHECK_NE(0u, target); |
+ resource_count_(0), |
+ evict_expired_resources_pending_(false), |
+ resource_expiration_delay_( |
+ base::TimeDelta::FromMilliseconds(kResourceExpirationDelayMs)), |
+ weak_ptr_factory_(this) { |
+ evict_expired_resources_callback_ = base::Bind( |
reveman
2015/08/20 21:43:37
Unlike one-copy where worker threads are in play,
ericrk
2015/08/21 12:48:22
ok, makes sense.
|
+ &ResourcePool::EvictExpiredResources, weak_ptr_factory_.GetWeakPtr()); |
base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider( |
this, base::ThreadTaskRunnerHandle::Get()); |
} |
@@ -154,9 +155,13 @@ void ResourcePool::ReleaseResource(Resource* resource, uint64_t content_id) { |
PoolResource* pool_resource = it->second; |
pool_resource->set_content_id(content_id); |
+ pool_resource->set_last_usage(base::TimeTicks::Now()); |
reveman
2015/08/20 21:43:37
Can we avoid calling Now() two times in this case.
ericrk
2015/08/21 12:48:22
Ok, refactored so we now only call Now() once in t
|
// Transfer resource to |busy_resources_|. |
busy_resources_.push_back(in_use_resources_.take_and_erase(it)); |
+ |
+ // Now that we have evictable resources, schedule cleanup if necessary. |
+ ScheduleEvictExpiredResources(); |
} |
void ResourcePool::SetResourceUsageLimits(size_t max_memory_usage_bytes, |
@@ -181,10 +186,7 @@ void ResourcePool::ReduceResourceUsage() { |
// can't be locked for write might also not be truly free-able. |
// We can free the resource here but it doesn't mean that the |
// memory is necessarily returned to the OS. |
- scoped_ptr<PoolResource> resource = unused_resources_.take_front(); |
- unused_memory_usage_bytes_ -= ResourceUtil::UncheckedSizeInBytes<size_t>( |
- resource->size(), resource->format()); |
- DeleteResource(resource.Pass()); |
+ DeleteUnusedResource(unused_resources_.take_front()); |
} |
} |
@@ -205,6 +207,12 @@ void ResourcePool::DeleteResource(scoped_ptr<PoolResource> resource) { |
--resource_count_; |
} |
+void ResourcePool::DeleteUnusedResource(scoped_ptr<PoolResource> resource) { |
+ unused_memory_usage_bytes_ -= ResourceUtil::UncheckedSizeInBytes<size_t>( |
+ resource->size(), resource->format()); |
+ DeleteResource(resource.Pass()); |
+} |
+ |
void ResourcePool::CheckBusyResources() { |
for (size_t i = 0; i < busy_resources_.size();) { |
ResourceDeque::iterator it(busy_resources_.begin() + i); |
@@ -227,6 +235,77 @@ void ResourcePool::DidFinishUsingResource(scoped_ptr<PoolResource> resource) { |
unused_resources_.push_back(resource.Pass()); |
} |
+void ResourcePool::ScheduleEvictExpiredResources() { |
+ if (evict_expired_resources_pending_) |
+ return; |
+ |
+ if (!HasEvictableResources()) |
+ return; |
+ |
+ evict_expired_resources_pending_ = true; |
+ |
+ // Schedule a call to EvictExpiredResources at the time when the LRU buffer |
+ // expires. |
+ base::TimeTicks reduce_resource_usage_time = |
+ GetUsageTimeForLRUResource() + resource_expiration_delay_; |
+ base::ThreadTaskRunnerHandle::Get()->PostDelayedTask( |
reveman
2015/08/20 21:43:37
Should this task runner be passed to the ctor inst
ericrk
2015/08/21 12:48:22
Sure, we can do that.
|
+ FROM_HERE, evict_expired_resources_callback_, |
+ reduce_resource_usage_time - base::TimeTicks::Now()); |
+} |
+ |
+void ResourcePool::EvictExpiredResources() { |
+ evict_expired_resources_pending_ = false; |
+ |
+ if (!HasEvictableResources()) |
+ return; |
+ |
+ EvictResourcesNotUsedSince(base::TimeTicks::Now() - |
+ resource_expiration_delay_); |
+ |
+ // Re-schedule this function if necessary. |
+ ScheduleEvictExpiredResources(); |
+} |
+ |
+void ResourcePool::EvictResourcesNotUsedSince(base::TimeTicks time_limit) { |
+ while (!unused_resources_.empty()) { |
+ // |unused_resources_| is not strictly ordered with regards to last_usage, |
+ // as this may not exactly line up with the time a resource became non-busy. |
+ // However, this should be roughly ordered, and will only introduce slight |
+ // delays in freeing expired resources. |
+ if (unused_resources_.front()->last_usage() > time_limit) |
+ return; |
+ |
+ DeleteUnusedResource(unused_resources_.take_front()); |
+ } |
+ |
+ // Also free busy resources older than the delay. With a sufficiently large |
+ // delay, such as the 1 second used here, any "busy" resources which have |
+ // expired are not likely to be busy. Additionally, freeing a "busy" resource |
+ // has no downside other than incorrect accounting. |
+ while (!busy_resources_.empty()) { |
+ if (busy_resources_.front()->last_usage() > time_limit) |
+ return; |
+ |
+ DeleteResource(busy_resources_.take_front()); |
+ } |
+} |
+ |
+bool ResourcePool::HasEvictableResources() const { |
+ return !unused_resources_.empty() || !busy_resources_.empty(); |
+} |
+ |
+base::TimeTicks ResourcePool::GetUsageTimeForLRUResource() const { |
+ if (!unused_resources_.empty()) { |
+ return unused_resources_.front()->last_usage(); |
+ } |
+ |
+ if (!busy_resources_.empty()) { |
reveman
2015/08/20 21:43:37
DCHECK(!busy_resources_.empty()) instead as we sho
ericrk
2015/08/21 12:48:22
sounds good
|
+ return busy_resources_.front()->last_usage(); |
+ } |
+ |
+ return base::TimeTicks(); |
+} |
+ |
bool ResourcePool::OnMemoryDump(const base::trace_event::MemoryDumpArgs& args, |
base::trace_event::ProcessMemoryDump* pmd) { |
for (const auto& resource : unused_resources_) { |