Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(23)

Side by Side Diff: base/memory/discardable_memory_android.cc

Issue 25293002: Add DiscardableMemoryAllocator to work around FD limit issue. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Fix comment Created 7 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/memory/discardable_memory.h" 5 #include "base/memory/discardable_memory_android.h"
6 6
7 #include <sys/mman.h> 7 #include <sys/mman.h>
8 #include <sys/resource.h>
9 #include <sys/time.h>
8 #include <unistd.h> 10 #include <unistd.h>
9 11
12 #include <limits>
13
10 #include "base/basictypes.h" 14 #include "base/basictypes.h"
11 #include "base/compiler_specific.h" 15 #include "base/compiler_specific.h"
12 #include "base/file_util.h" 16 #include "base/file_util.h"
13 #include "base/lazy_instance.h" 17 #include "base/lazy_instance.h"
14 #include "base/logging.h" 18 #include "base/logging.h"
19 #include "base/memory/discardable_memory.h"
20 #include "base/memory/discardable_memory_allocator_android.h"
15 #include "base/posix/eintr_wrapper.h" 21 #include "base/posix/eintr_wrapper.h"
16 #include "base/synchronization/lock.h" 22 #include "base/synchronization/lock.h"
17 #include "third_party/ashmem/ashmem.h" 23 #include "third_party/ashmem/ashmem.h"
18 24
19 namespace base { 25 namespace base {
20 namespace { 26 namespace {
21 27
22 // Protects |g_num_discardable_memory| below. 28 const size_t kPageSize = 4096;
23 base::LazyInstance<base::Lock>::Leaky g_discardable_memory_lock =
24 LAZY_INSTANCE_INITIALIZER;
25 29
26 // Total number of discardable memory in the process. 30 const char kAshmemAllocatorName[] = "DiscardableMemoryAllocator";
27 int g_num_discardable_memory = 0;
28 31
29 // Upper limit on the number of discardable memory to avoid hitting file 32 struct GlobalContext {
30 // descriptor limit. 33 GlobalContext()
31 const int kDiscardableMemoryNumLimit = 128; 34 : ashmem_fd_limit(GetSoftFDLimit()),
35 allocator(kAshmemAllocatorName),
36 ashmem_fd_count_(0) {
37 }
38
39 const int ashmem_fd_limit;
40 internal::DiscardableMemoryAllocator allocator;
41 Lock lock;
42
43 int ashmem_fd_count() const {
44 lock.AssertAcquired();
45 return ashmem_fd_count_;
46 }
47
48 void decrement_ashmem_fd_count() {
49 lock.AssertAcquired();
50 --ashmem_fd_count_;
51 }
52
53 void increment_ashmem_fd_count() {
54 lock.AssertAcquired();
55 ++ashmem_fd_count_;
56 }
57
58 private:
59 static int GetSoftFDLimit() {
60 struct rlimit limit_info;
61 if (getrlimit(RLIMIT_NOFILE, &limit_info) != 0)
62 return 128;
63 // Allow 25% of file descriptor capacity for ashmem.
64 return limit_info.rlim_cur / 4;
65 }
66
67 int ashmem_fd_count_;
68 };
69
70 LazyInstance<GlobalContext>::Leaky g_context = LAZY_INSTANCE_INITIALIZER;
71
72 // This is the default implementation of DiscardableMemory on Android which is
73 // used when file descriptor usage is under the soft limit. When file descriptor
74 // usage gets too high the discardable memory allocator is used instead. See
75 // ShouldUseAllocator() below for more details.
76 class DiscardableMemoryAndroidSimple : public DiscardableMemory {
77 public:
78 DiscardableMemoryAndroidSimple(int fd, void* address, size_t size)
79 : fd_(fd),
80 memory_(address),
81 size_(size) {
82 DCHECK_GE(fd_, 0);
83 DCHECK(memory_);
84 }
85
86 virtual ~DiscardableMemoryAndroidSimple() {
87 internal::CloseAshmemRegion(fd_, size_, memory_);
88 }
89
90 // DiscardableMemory:
91 virtual LockDiscardableMemoryStatus Lock() OVERRIDE {
92 return internal::LockAshmemRegion(fd_, 0, size_, memory_);
93 }
94
95 virtual void Unlock() OVERRIDE {
96 internal::UnlockAshmemRegion(fd_, 0, size_, memory_);
97 }
98
99 virtual void* Memory() const OVERRIDE {
100 return memory_;
101 }
102
103 private:
104 const int fd_;
105 void* const memory_;
106 const size_t size_;
107
108 DISALLOW_COPY_AND_ASSIGN(DiscardableMemoryAndroidSimple);
109 };
110
111 int GetCurrentNumberOfAshmemFDs() {
112 AutoLock lock(g_context.Get().lock);
113 return g_context.Get().ashmem_fd_count();
114 }
115
116 // Returns whether the provided size can be safely page-aligned (without causing
117 // an overflow).
118 bool CheckSizeCanBeAlignedToNextPage(size_t size) {
119 return size <= std::numeric_limits<size_t>::max() - kPageSize + 1;
120 }
121
122 } // namespace
123
124 namespace internal {
125
126 size_t AlignToNextPage(size_t size) {
127 DCHECK_EQ(static_cast<int>(kPageSize), getpagesize());
128 DCHECK(CheckSizeCanBeAlignedToNextPage(size));
129 const size_t mask = ~(kPageSize - 1);
130 return (size + kPageSize - 1) & mask;
131 }
32 132
33 bool CreateAshmemRegion(const char* name, 133 bool CreateAshmemRegion(const char* name,
34 size_t size, 134 size_t size,
35 int* out_fd, 135 int* out_fd,
36 void** out_address) { 136 void** out_address) {
37 base::AutoLock lock(g_discardable_memory_lock.Get()); 137 AutoLock lock(g_context.Get().lock);
38 if (g_num_discardable_memory + 1 > kDiscardableMemoryNumLimit) 138 if (g_context.Get().ashmem_fd_count() + 1 > g_context.Get().ashmem_fd_limit)
39 return false; 139 return false;
40 int fd = ashmem_create_region(name, size); 140 int fd = ashmem_create_region(name, size);
41 if (fd < 0) { 141 if (fd < 0) {
42 DLOG(ERROR) << "ashmem_create_region() failed"; 142 DLOG(ERROR) << "ashmem_create_region() failed";
43 return false; 143 return false;
44 } 144 }
45 file_util::ScopedFD fd_closer(&fd); 145 file_util::ScopedFD fd_closer(&fd);
46 146
47 const int err = ashmem_set_prot_region(fd, PROT_READ | PROT_WRITE); 147 const int err = ashmem_set_prot_region(fd, PROT_READ | PROT_WRITE);
48 if (err < 0) { 148 if (err < 0) {
49 DLOG(ERROR) << "Error " << err << " when setting protection of ashmem"; 149 DLOG(ERROR) << "Error " << err << " when setting protection of ashmem";
50 return false; 150 return false;
51 } 151 }
52 152
53 // There is a problem using MAP_PRIVATE here. As we are constantly calling 153 // There is a problem using MAP_PRIVATE here. As we are constantly calling
54 // Lock() and Unlock(), data could get lost if they are not written to the 154 // Lock() and Unlock(), data could get lost if they are not written to the
55 // underlying file when Unlock() gets called. 155 // underlying file when Unlock() gets called.
56 void* const address = mmap( 156 void* const address = mmap(
57 NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); 157 NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
58 if (address == MAP_FAILED) { 158 if (address == MAP_FAILED) {
59 DPLOG(ERROR) << "Failed to map memory."; 159 DPLOG(ERROR) << "Failed to map memory.";
60 return false; 160 return false;
61 } 161 }
62 162
63 ignore_result(fd_closer.release()); 163 ignore_result(fd_closer.release());
64 ++g_num_discardable_memory; 164 g_context.Get().increment_ashmem_fd_count();
65 *out_fd = fd; 165 *out_fd = fd;
66 *out_address = address; 166 *out_address = address;
67 return true; 167 return true;
68 } 168 }
69 169
70 bool DeleteAshmemRegion(int fd, size_t size, void* address) { 170 bool CloseAshmemRegion(int fd, size_t size, void* address) {
71 base::AutoLock lock(g_discardable_memory_lock.Get()); 171 AutoLock lock(g_context.Get().lock);
72 --g_num_discardable_memory; 172 g_context.Get().decrement_ashmem_fd_count();
73 if (munmap(address, size) == -1) { 173 if (munmap(address, size) == -1) {
74 DPLOG(ERROR) << "Failed to unmap memory."; 174 DPLOG(ERROR) << "Failed to unmap memory.";
75 close(fd); 175 close(fd);
76 return false; 176 return false;
77 } 177 }
78 return close(fd) == 0; 178 return close(fd) == 0;
79 } 179 }
80 180
81 LockDiscardableMemoryStatus LockAshmemRegion(int fd, 181 LockDiscardableMemoryStatus LockAshmemRegion(int fd,
82 size_t off, 182 size_t off,
83 size_t size, 183 size_t size,
84 const void* address) { 184 const void* address) {
85 const int result = ashmem_pin_region(fd, off, size); 185 const int result = ashmem_pin_region(fd, off, size);
86 DCHECK_EQ(0, mprotect(address, size, PROT_READ | PROT_WRITE)); 186 DCHECK_EQ(0, mprotect(address, size, PROT_READ | PROT_WRITE));
87 return result == ASHMEM_WAS_PURGED ? 187 return result == ASHMEM_WAS_PURGED ?
88 DISCARDABLE_MEMORY_PURGED : DISCARDABLE_MEMORY_SUCCESS; 188 DISCARDABLE_MEMORY_PURGED : DISCARDABLE_MEMORY_SUCCESS;
89 } 189 }
90 190
91 bool UnlockAshmemRegion(int fd, size_t off, size_t size, const void* address) { 191 bool UnlockAshmemRegion(int fd, size_t off, size_t size, const void* address) {
92 const int failed = ashmem_unpin_region(fd, off, size); 192 const int failed = ashmem_unpin_region(fd, off, size);
93 if (failed) 193 if (failed)
94 DLOG(ERROR) << "Failed to unpin memory."; 194 DLOG(ERROR) << "Failed to unpin memory.";
95 // This allows us to catch accesses to unlocked memory. 195 // This allows us to catch accesses to unlocked memory.
96 DCHECK_EQ(0, mprotect(address, size, PROT_NONE)); 196 DCHECK_EQ(0, mprotect(address, size, PROT_NONE));
97 return !failed; 197 return !failed;
98 } 198 }
99 199
100 class DiscardableMemoryAndroid : public DiscardableMemory { 200 } // namespace internal
101 public:
102 DiscardableMemoryAndroid(int fd, void* address, size_t size)
103 : fd_(fd),
104 memory_(address),
105 size_(size) {
106 DCHECK_GE(fd_, 0);
107 DCHECK(memory_);
108 }
109
110 virtual ~DiscardableMemoryAndroid() {
111 DeleteAshmemRegion(fd_, size_, memory_);
112 }
113
114 // DiscardableMemory:
115 virtual LockDiscardableMemoryStatus Lock() OVERRIDE {
116 return LockAshmemRegion(fd_, 0, size_, memory_);
117 }
118
119 virtual void Unlock() OVERRIDE {
120 UnlockAshmemRegion(fd_, 0, size_, memory_);
121 }
122
123 virtual void* Memory() const OVERRIDE {
124 return memory_;
125 }
126
127 private:
128 const int fd_;
129 void* const memory_;
130 const size_t size_;
131
132 DISALLOW_COPY_AND_ASSIGN(DiscardableMemoryAndroid);
133 };
134
135 } // namespace
136 201
137 // static 202 // static
138 bool DiscardableMemory::SupportedNatively() { 203 bool DiscardableMemory::SupportedNatively() {
139 return true; 204 return true;
140 } 205 }
141 206
207 // Allocation can happen in two ways:
208 // - Each client-requested allocation is backed by an individual ashmem region.
209 // This allows deleting ashmem regions individually by closing the ashmem file
210 // descriptor. This is the default path that is taken when file descriptor usage
211 // allows us to do so or when the allocation size would require and entire
212 // ashmem region.
213 // - Allocations are performed by the global allocator when file descriptor
214 // usage gets too high. This still allows unpinning but does not allow deleting
215 // (i.e. releasing the physical pages backing) individual regions.
216 //
217 // TODO(pliard): consider tuning the size threshold used below. For instance we
218 // might want to make it a fraction of kMinAshmemRegionSize and also
219 // systematically have small allocations go through the allocator to let big
220 // allocations systematically go through individual ashmem regions.
221 //
142 // static 222 // static
143 scoped_ptr<DiscardableMemory> DiscardableMemory::CreateLockedMemory( 223 scoped_ptr<DiscardableMemory> DiscardableMemory::CreateLockedMemory(
144 size_t size) { 224 size_t size) {
225 if (!CheckSizeCanBeAlignedToNextPage(size))
226 return scoped_ptr<DiscardableMemory>();
145 // Pinning & unpinning works with page granularity therefore align the size 227 // Pinning & unpinning works with page granularity therefore align the size
146 // upfront. 228 // upfront.
147 const size_t kPageSize = 4096; 229 const size_t aligned_size = internal::AlignToNextPage(size);
148 const size_t mask = ~(kPageSize - 1); 230 // Note that the following code is slightly racy. The worst that can happen in
149 size = (size + kPageSize - 1) & mask; 231 // practice though is taking the wrong decision (e.g. using the allocator
150 int fd; 232 // rather than DiscardableMemoryAndroidSimple). Moreover keeping the lock
151 void* address; 233 // acquired for the whole allocation would cause a deadlock when the allocator
152 if (!CreateAshmemRegion("", size, &fd, &address)) 234 // tries to create an ashmem region.
153 return scoped_ptr<DiscardableMemory>(); 235 const size_t kAllocatorRegionSize =
154 return scoped_ptr<DiscardableMemory>( 236 internal::DiscardableMemoryAllocator::kMinAshmemRegionSize;
155 new DiscardableMemoryAndroid(fd, address, size)); 237 GlobalContext* const global_context = g_context.Pointer();
238 if (aligned_size >= kAllocatorRegionSize ||
239 GetCurrentNumberOfAshmemFDs() < 0.9 * global_context->ashmem_fd_limit) {
240 int fd;
241 void* address;
242 if (internal::CreateAshmemRegion("", aligned_size, &fd, &address)) {
243 return scoped_ptr<DiscardableMemory>(
244 new DiscardableMemoryAndroidSimple(fd, address, aligned_size));
245 }
246 }
247 return global_context->allocator.Allocate(size);
156 } 248 }
157 249
158 // static 250 // static
159 bool DiscardableMemory::PurgeForTestingSupported() { 251 bool DiscardableMemory::PurgeForTestingSupported() {
160 return false; 252 return false;
161 } 253 }
162 254
163 // static 255 // static
164 void DiscardableMemory::PurgeForTesting() { 256 void DiscardableMemory::PurgeForTesting() {
165 NOTIMPLEMENTED(); 257 NOTIMPLEMENTED();
166 } 258 }
167 259
168 } // namespace base 260 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698