Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(3435)

Unified Diff: base/memory/discardable_memory_android.cc

Issue 25293002: Add DiscardableMemoryAllocator to work around FD limit issue. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Fix Clang build Created 7 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: base/memory/discardable_memory_android.cc
diff --git a/base/memory/discardable_memory_android.cc b/base/memory/discardable_memory_android.cc
index 8fee53e1d6cd388426a7e2542c839b5e46cc458d..7e84967055f1ba9d35f8c6efaeb84b12833586ad 100644
--- a/base/memory/discardable_memory_android.cc
+++ b/base/memory/discardable_memory_android.cc
@@ -2,39 +2,139 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "base/memory/discardable_memory.h"
+#include "base/memory/discardable_memory_android.h"
#include <sys/mman.h>
+#include <sys/resource.h>
+#include <sys/time.h>
#include <unistd.h>
+#include <limits>
+
#include "base/basictypes.h"
#include "base/compiler_specific.h"
#include "base/file_util.h"
#include "base/lazy_instance.h"
#include "base/logging.h"
+#include "base/memory/discardable_memory.h"
+#include "base/memory/discardable_memory_allocator_android.h"
#include "base/synchronization/lock.h"
#include "third_party/ashmem/ashmem.h"
namespace base {
namespace {
-// Protects |g_num_discardable_memory| below.
-base::LazyInstance<base::Lock>::Leaky g_discardable_memory_lock =
- LAZY_INSTANCE_INITIALIZER;
+const size_t kPageSize = 4096;
+
+const char kAshmemAllocatorName[] = "DiscardableMemoryAllocator";
+
+struct GlobalContext {
+ GlobalContext()
+ : ashmem_fd_limit(GetSoftFDLimit()),
+ allocator(kAshmemAllocatorName),
+ ashmem_fd_count_(0) {
+ }
+
+ const int ashmem_fd_limit;
+ internal::DiscardableMemoryAllocator allocator;
+ Lock lock;
+
+ int ashmem_fd_count() const {
+ lock.AssertAcquired();
+ return ashmem_fd_count_;
+ }
+
+ void decrement_ashmem_fd_count() {
+ lock.AssertAcquired();
+ --ashmem_fd_count_;
+ }
+
+ void increment_ashmem_fd_count() {
+ lock.AssertAcquired();
+ ++ashmem_fd_count_;
+ }
+
+ private:
+ static int GetSoftFDLimit() {
+ struct rlimit limit_info;
+ if (getrlimit(RLIMIT_NOFILE, &limit_info) != 0)
+ return 128;
+ // Allow 25% of file descriptor capacity for ashmem.
+ return limit_info.rlim_cur / 4;
+ }
+
+ int ashmem_fd_count_;
+};
+
+LazyInstance<GlobalContext>::Leaky g_context = LAZY_INSTANCE_INITIALIZER;
+
+// This is the default implementation of DiscardableMemory on Android which is
+// used when file descriptor usage is under the soft limit. When file descriptor
+// usage gets too high the discardable memory allocator is used instead. See
+// ShouldUseAllocator() below for more details.
+class DiscardableMemoryAndroidSimple : public DiscardableMemory {
+ public:
+ DiscardableMemoryAndroidSimple(int fd, void* address, size_t size)
+ : fd_(fd),
+ memory_(address),
+ size_(size) {
+ DCHECK_GE(fd_, 0);
+ DCHECK(memory_);
+ }
+
+ virtual ~DiscardableMemoryAndroidSimple() {
+ internal::CloseAshmemRegion(fd_, size_, memory_);
+ }
+
+ // DiscardableMemory:
+ virtual LockDiscardableMemoryStatus Lock() OVERRIDE {
+ return internal::LockAshmemRegion(fd_, 0, size_, memory_);
+ }
+
+ virtual void Unlock() OVERRIDE {
+ internal::UnlockAshmemRegion(fd_, 0, size_, memory_);
+ }
+
+ virtual void* Memory() const OVERRIDE {
+ return memory_;
+ }
+
+ private:
+ const int fd_;
+ void* const memory_;
+ const size_t size_;
+
+ DISALLOW_COPY_AND_ASSIGN(DiscardableMemoryAndroidSimple);
+};
+
+int GetCurrentNumberOfAshmemFDs() {
+ AutoLock lock(g_context.Get().lock);
+ return g_context.Get().ashmem_fd_count();
+}
-// Total number of discardable memory in the process.
-int g_num_discardable_memory = 0;
+// Returns whether the provided size can be safely page-aligned (without causing
+// an overflow).
+bool CheckSizeCanBeAlignedToNextPage(size_t size) {
+ return size <= std::numeric_limits<size_t>::max() - kPageSize + 1;
+}
-// Upper limit on the number of discardable memory to avoid hitting file
-// descriptor limit.
-const int kDiscardableMemoryNumLimit = 128;
+} // namespace
+
+namespace internal {
+
+size_t AlignToNextPage(size_t size) {
+ DCHECK_EQ(static_cast<int>(kPageSize), getpagesize());
+ DCHECK(CheckSizeCanBeAlignedToNextPage(size));
+ const size_t mask = ~(kPageSize - 1);
+ return (size + kPageSize - 1) & mask;
+}
bool CreateAshmemRegion(const char* name,
size_t size,
int* out_fd,
void** out_address) {
- base::AutoLock lock(g_discardable_memory_lock.Get());
- if (g_num_discardable_memory + 1 > kDiscardableMemoryNumLimit)
+ AutoLock lock(g_context.Get().lock);
+ if (g_context.Get().ashmem_fd_count() + 1 > g_context.Get().ashmem_fd_limit)
return false;
int fd = ashmem_create_region(name, size);
if (fd < 0) {
@@ -60,15 +160,15 @@ bool CreateAshmemRegion(const char* name,
}
ignore_result(fd_closer.release());
- ++g_num_discardable_memory;
+ g_context.Get().increment_ashmem_fd_count();
*out_fd = fd;
*out_address = address;
return true;
}
-bool DeleteAshmemRegion(int fd, size_t size, void* address) {
- base::AutoLock lock(g_discardable_memory_lock.Get());
- --g_num_discardable_memory;
+bool CloseAshmemRegion(int fd, size_t size, void* address) {
+ AutoLock lock(g_context.Get().lock);
+ g_context.Get().decrement_ashmem_fd_count();
if (munmap(address, size) == -1) {
DPLOG(ERROR) << "Failed to unmap memory.";
close(fd);
@@ -96,62 +196,54 @@ bool UnlockAshmemRegion(int fd, size_t off, size_t size, const void* address) {
return !failed;
}
-class DiscardableMemoryAndroid : public DiscardableMemory {
- public:
- DiscardableMemoryAndroid(int fd, void* address, size_t size)
- : fd_(fd),
- memory_(address),
- size_(size) {
- DCHECK_GE(fd_, 0);
- DCHECK(memory_);
- }
-
- virtual ~DiscardableMemoryAndroid() {
- DeleteAshmemRegion(fd_, size_, memory_);
- }
-
- // DiscardableMemory:
- virtual LockDiscardableMemoryStatus Lock() OVERRIDE {
- return LockAshmemRegion(fd_, 0, size_, memory_);
- }
-
- virtual void Unlock() OVERRIDE {
- UnlockAshmemRegion(fd_, 0, size_, memory_);
- }
-
- virtual void* Memory() const OVERRIDE {
- return memory_;
- }
-
- private:
- const int fd_;
- void* const memory_;
- const size_t size_;
-
- DISALLOW_COPY_AND_ASSIGN(DiscardableMemoryAndroid);
-};
-
-} // namespace
+} // namespace internal
// static
bool DiscardableMemory::SupportedNatively() {
return true;
}
+// Allocation can happen in two ways:
+// - Each client-requested allocation is backed by an individual ashmem region.
+// This allows deleting ashmem regions individually by closing the ashmem file
+// descriptor. This is the default path that is taken when file descriptor usage
+// allows us to do so or when the allocation size would require and entire
+// ashmem region.
+// - Allocations are performed by the global allocator when file descriptor
+// usage gets too high. This still allows unpinning but does not allow deleting
+// (i.e. releasing the physical pages backing) individual regions.
+//
+// TODO(pliard): consider tuning the size threshold used below. For instance we
+// might want to make it a fraction of kMinAshmemRegionSize and also
+// systematically have small allocations go through the allocator to let big
+// allocations systematically go through individual ashmem regions.
+//
// static
scoped_ptr<DiscardableMemory> DiscardableMemory::CreateLockedMemory(
size_t size) {
+ if (!CheckSizeCanBeAlignedToNextPage(size))
+ return scoped_ptr<DiscardableMemory>();
// Pinning & unpinning works with page granularity therefore align the size
// upfront.
- const size_t kPageSize = 4096;
- const size_t mask = ~(kPageSize - 1);
- size = (size + kPageSize - 1) & mask;
- int fd;
- void* address;
- if (!CreateAshmemRegion("", size, &fd, &address))
- return scoped_ptr<DiscardableMemory>();
- return scoped_ptr<DiscardableMemory>(
- new DiscardableMemoryAndroid(fd, address, size));
+ const size_t aligned_size = internal::AlignToNextPage(size);
+ // Note that the following code is slightly racy. The worst that can happen in
+ // practice though is taking the wrong decision (e.g. using the allocator
+ // rather than DiscardableMemoryAndroidSimple). Moreover keeping the lock
+ // acquired for the whole allocation would cause a deadlock when the allocator
+ // tries to create an ashmem region.
+ const size_t kAllocatorRegionSize =
+ internal::DiscardableMemoryAllocator::kMinAshmemRegionSize;
+ GlobalContext* const global_context = g_context.Pointer();
+ if (aligned_size >= kAllocatorRegionSize ||
+ GetCurrentNumberOfAshmemFDs() < 0.9 * global_context->ashmem_fd_limit) {
+ int fd;
+ void* address;
+ if (internal::CreateAshmemRegion("", aligned_size, &fd, &address)) {
+ return scoped_ptr<DiscardableMemory>(
+ new DiscardableMemoryAndroidSimple(fd, address, aligned_size));
+ }
+ }
+ return global_context->allocator.Allocate(size);
}
// static

Powered by Google App Engine
This is Rietveld 408576698