Index: base/memory/discardable_memory_android.cc |
diff --git a/base/memory/discardable_memory_android.cc b/base/memory/discardable_memory_android.cc |
index 3850439940e86105b28e95e6b7846b3810242275..0a3ef659ca2bc7371e29a4dd4feb804df011e39f 100644 |
--- a/base/memory/discardable_memory_android.cc |
+++ b/base/memory/discardable_memory_android.cc |
@@ -2,9 +2,11 @@ |
// Use of this source code is governed by a BSD-style license that can be |
// found in the LICENSE file. |
-#include "base/memory/discardable_memory.h" |
+#include "base/memory/discardable_memory_android.h" |
#include <sys/mman.h> |
+#include <sys/resource.h> |
+#include <sys/time.h> |
#include <unistd.h> |
#include "base/basictypes.h" |
@@ -12,6 +14,8 @@ |
#include "base/file_util.h" |
#include "base/lazy_instance.h" |
#include "base/logging.h" |
+#include "base/memory/discardable_memory.h" |
+#include "base/memory/discardable_memory_allocator_android.h" |
#include "base/posix/eintr_wrapper.h" |
#include "base/synchronization/lock.h" |
#include "third_party/ashmem/ashmem.h" |
@@ -19,23 +23,138 @@ |
namespace base { |
namespace { |
-// Protects |g_num_discardable_memory| below. |
-base::LazyInstance<base::Lock>::Leaky g_discardable_memory_lock = |
- LAZY_INSTANCE_INITIALIZER; |
+const char kAshmemAllocatorName[] = "DiscardableMemoryAllocator"; |
+ |
+struct GlobalContext { |
+ GlobalContext() |
+ : ashmem_fd_limit(GetSoftFDLimit()), |
+ allocator(kAshmemAllocatorName), |
+ ashmem_fd_count_(0) { |
+ } |
+ |
+ const int ashmem_fd_limit; |
+ internal::DiscardableMemoryAllocator allocator; |
+ Lock lock; |
+ |
+ int ashmem_fd_count() const { |
+ lock.AssertAcquired(); |
+ return ashmem_fd_count_; |
+ } |
+ |
+ void decrement_ashmem_fd_count() { |
+ lock.AssertAcquired(); |
+ --ashmem_fd_count_; |
+ } |
-// Total number of discardable memory in the process. |
-int g_num_discardable_memory = 0; |
+ void increment_ashmem_fd_count() { |
+ lock.AssertAcquired(); |
+ ++ashmem_fd_count_; |
+ } |
-// Upper limit on the number of discardable memory to avoid hitting file |
-// descriptor limit. |
-const int kDiscardableMemoryNumLimit = 128; |
+ private: |
+ static int GetSoftFDLimit() { |
+ struct rlimit limit_info; |
+ if (getrlimit(RLIMIT_NOFILE, &limit_info) != 0) |
+ return 128; |
+ // Allow 25% of file descriptor capacity for ashmem. |
+ return limit_info.rlim_cur / 4; |
+ } |
+ |
+ int ashmem_fd_count_; |
+}; |
+ |
+LazyInstance<GlobalContext>::Leaky g_context = LAZY_INSTANCE_INITIALIZER; |
+ |
+// This is the default implementation of DiscardableMemory on Android which is |
+// used when file descriptor usage is under the soft limit. When file descriptor |
+// usage gets too high the discardable memory allocator is used instead. See |
+// ShouldUseAllocator() below for more details. |
+class DiscardableMemoryAndroidSimple : public DiscardableMemory { |
+ public: |
+ DiscardableMemoryAndroidSimple(int fd, void* address, size_t size) |
+ : fd_(fd), |
+ memory_(address), |
+ size_(size) { |
+ DCHECK_GE(fd_, 0); |
+ DCHECK(memory_); |
+ } |
+ |
+ virtual ~DiscardableMemoryAndroidSimple() { |
+ internal::CloseAshmemRegion(fd_, size_, memory_); |
+ } |
+ |
+ // DiscardableMemory: |
+ virtual LockDiscardableMemoryStatus Lock() OVERRIDE { |
+ return internal::LockAshmemRegion(fd_, 0, size_, memory_); |
+ } |
+ |
+ virtual void Unlock() OVERRIDE { |
+ internal::UnlockAshmemRegion(fd_, 0, size_, memory_); |
+ } |
+ |
+ virtual void* Memory() const OVERRIDE { |
+ return memory_; |
+ } |
+ |
+ private: |
+ const int fd_; |
+ void* const memory_; |
+ const size_t size_; |
+ |
+ DISALLOW_COPY_AND_ASSIGN(DiscardableMemoryAndroidSimple); |
+}; |
+ |
+int GetCurrentNumberOfAshmemFDs() { |
+ AutoLock lock(g_context.Get().lock); |
+ return g_context.Get().ashmem_fd_count(); |
+} |
+ |
+// Allocation can happen in two ways: |
+// - Each client-requested allocation is backed by an individual ashmem region. |
+// This allows to delete ashmem regions individually by closing the ashmem file |
willchan no longer on Chromium
2013/11/28 22:51:55
grammar nit: s/to delete/deleting/
Philippe
2013/11/29 12:41:05
Oops, thanks :)
|
+// descriptor. This is the default path that is taken when file descriptor usage |
+// allows us to do so. |
willchan no longer on Chromium
2013/11/28 22:51:55
Or when the allocation size would require an entir
Philippe
2013/11/29 12:41:05
Done.
|
+// - Allocations are performed by the global allocator when file descriptor |
+// usage gets too high. This still allows unpinning but does not allow deleting |
+// (i.e. releasing the physycal pages backing) individiual regions. |
willchan no longer on Chromium
2013/11/28 22:51:55
spelling nits:
* s/physycal/physical/
* s/individi
willchan no longer on Chromium
2013/11/28 22:51:55
spelling nits:
s/physycal/physical/
s/individiual/
Philippe
2013/11/29 12:41:05
Oops, I told you I was retarded :)
|
+bool ShouldUseAllocator(size_t size) { |
+ const float kMaxFDUsageRateForNormalAllocations = 0.9; |
+ const float kMaxFDUsageRateForVeryLargeAllocations = 0.98; |
+ const int current_ashmem_fd_count = GetCurrentNumberOfAshmemFDs(); |
+ const int ashmem_fd_limit = g_context.Get().ashmem_fd_limit; |
+ if (current_ashmem_fd_count > |
+ kMaxFDUsageRateForVeryLargeAllocations * ashmem_fd_limit) { |
+ // FD usage is too high no matter how big the requested size is. |
willchan no longer on Chromium
2013/11/28 22:51:55
I don't fully understand this algorithm. The alloc
Philippe
2013/11/29 12:41:05
You're right, the allocator would very likely crea
|
+ return true; |
+ } |
+ // TODO(pliard): consider tuning the size threshold below. For instance we |
+ // might want to make it a fraction of kMinAshmemRegionSize and also |
+ // systematically have small allocations go through the allocator to allow big |
+ // allocations to systematically go through individial ashmem regions. |
willchan no longer on Chromium
2013/11/28 22:51:55
spelling nit: s/individial/individual/
Philippe
2013/11/29 12:41:05
Done.
Philippe
2013/11/29 12:41:05
Done.
|
+ if (size > internal::DiscardableMemoryAllocator::kMinAshmemRegionSize) |
willchan no longer on Chromium
2013/11/28 22:51:55
size >= ... perhaps? Even more precise would be si
Philippe
2013/11/29 12:41:05
Agreed for the layering violation and also page al
|
+ return false; |
+ |
+ return current_ashmem_fd_count > |
+ kMaxFDUsageRateForNormalAllocations * ashmem_fd_limit; |
+} |
+ |
+} // namespace |
+ |
+namespace internal { |
+ |
+size_t AlignToNextPage(size_t size) { |
+ const size_t kPageSize = 4096; |
+ DCHECK_EQ(static_cast<int>(kPageSize), getpagesize()); |
+ const size_t mask = ~(kPageSize - 1); |
+ return (size + kPageSize - 1) & mask; |
willchan no longer on Chromium
2013/11/28 22:51:55
I'm not sure, but this might be a security issue d
Philippe
2013/11/29 12:41:05
Great catch!
|
+} |
bool CreateAshmemRegion(const char* name, |
size_t size, |
int* out_fd, |
void** out_address) { |
- base::AutoLock lock(g_discardable_memory_lock.Get()); |
- if (g_num_discardable_memory + 1 > kDiscardableMemoryNumLimit) |
+ AutoLock lock(g_context.Get().lock); |
+ if (g_context.Get().ashmem_fd_count() + 1 > g_context.Get().ashmem_fd_limit) |
return false; |
int fd = ashmem_create_region(name, size); |
if (fd < 0) { |
@@ -61,15 +180,15 @@ bool CreateAshmemRegion(const char* name, |
} |
ignore_result(fd_closer.release()); |
- ++g_num_discardable_memory; |
+ g_context.Get().increment_ashmem_fd_count(); |
*out_fd = fd; |
*out_address = address; |
return true; |
} |
-bool DeleteAshmemRegion(int fd, size_t size, void* address) { |
- base::AutoLock lock(g_discardable_memory_lock.Get()); |
- --g_num_discardable_memory; |
+bool CloseAshmemRegion(int fd, size_t size, void* address) { |
+ AutoLock lock(g_context.Get().lock); |
+ g_context.Get().decrement_ashmem_fd_count(); |
if (munmap(address, size) == -1) { |
DPLOG(ERROR) << "Failed to unmap memory."; |
close(fd); |
@@ -97,42 +216,7 @@ bool UnlockAshmemRegion(int fd, size_t off, size_t size, const void* address) { |
return !failed; |
} |
-class DiscardableMemoryAndroid : public DiscardableMemory { |
- public: |
- DiscardableMemoryAndroid(int fd, void* address, size_t size) |
- : fd_(fd), |
- memory_(address), |
- size_(size) { |
- DCHECK_GE(fd_, 0); |
- DCHECK(memory_); |
- } |
- |
- virtual ~DiscardableMemoryAndroid() { |
- DeleteAshmemRegion(fd_, size_, memory_); |
- } |
- |
- // DiscardableMemory: |
- virtual LockDiscardableMemoryStatus Lock() OVERRIDE { |
- return LockAshmemRegion(fd_, 0, size_, memory_); |
- } |
- |
- virtual void Unlock() OVERRIDE { |
- UnlockAshmemRegion(fd_, 0, size_, memory_); |
- } |
- |
- virtual void* Memory() const OVERRIDE { |
- return memory_; |
- } |
- |
- private: |
- const int fd_; |
- void* const memory_; |
- const size_t size_; |
- |
- DISALLOW_COPY_AND_ASSIGN(DiscardableMemoryAndroid); |
-}; |
- |
-} // namespace |
+} // namespace internal |
// static |
bool DiscardableMemory::SupportedNatively() { |
@@ -142,17 +226,20 @@ bool DiscardableMemory::SupportedNatively() { |
// static |
scoped_ptr<DiscardableMemory> DiscardableMemory::CreateLockedMemory( |
size_t size) { |
+ GlobalContext* const global_context = g_context.Pointer(); |
+ if (ShouldUseAllocator(size)) |
willchan no longer on Chromium
2013/11/28 22:51:55
It occurs to me now that all this checking of the
Philippe
2013/11/29 12:41:05
Yeah this is indeed slightly racy. I think it shou
|
+ return global_context->allocator.Allocate(size); |
// Pinning & unpinning works with page granularity therefore align the size |
// upfront. |
- const size_t kPageSize = 4096; |
- const size_t mask = ~(kPageSize - 1); |
- size = (size + kPageSize - 1) & mask; |
+ const size_t aligned_size = internal::AlignToNextPage(size); |
int fd; |
void* address; |
- if (!CreateAshmemRegion("", size, &fd, &address)) |
- return scoped_ptr<DiscardableMemory>(); |
+ if (!internal::CreateAshmemRegion("", aligned_size, &fd, &address)) { |
+ // Fallback to the allocator which might be more likely to succeed. |
+ return global_context->allocator.Allocate(size); |
+ } |
return scoped_ptr<DiscardableMemory>( |
- new DiscardableMemoryAndroid(fd, address, size)); |
+ new DiscardableMemoryAndroidSimple(fd, address, aligned_size)); |
} |
// static |