OLD | NEW |
1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/memory/discardable_memory_android.h" | 5 #include "base/memory/discardable_memory.h" |
6 | 6 |
7 #include <sys/mman.h> | 7 #include <sys/mman.h> |
8 #include <sys/resource.h> | |
9 #include <sys/time.h> | |
10 #include <unistd.h> | 8 #include <unistd.h> |
11 | 9 |
12 #include <limits> | |
13 | |
14 #include "base/basictypes.h" | 10 #include "base/basictypes.h" |
15 #include "base/compiler_specific.h" | 11 #include "base/compiler_specific.h" |
16 #include "base/file_util.h" | 12 #include "base/file_util.h" |
17 #include "base/lazy_instance.h" | 13 #include "base/lazy_instance.h" |
18 #include "base/logging.h" | 14 #include "base/logging.h" |
19 #include "base/memory/discardable_memory.h" | |
20 #include "base/memory/discardable_memory_allocator_android.h" | |
21 #include "base/synchronization/lock.h" | 15 #include "base/synchronization/lock.h" |
22 #include "third_party/ashmem/ashmem.h" | 16 #include "third_party/ashmem/ashmem.h" |
23 | 17 |
24 namespace base { | 18 namespace base { |
25 namespace { | 19 namespace { |
26 | 20 |
27 const size_t kPageSize = 4096; | 21 // Protects |g_num_discardable_memory| below. |
| 22 base::LazyInstance<base::Lock>::Leaky g_discardable_memory_lock = |
| 23 LAZY_INSTANCE_INITIALIZER; |
28 | 24 |
29 const char kAshmemAllocatorName[] = "DiscardableMemoryAllocator"; | 25 // Total number of discardable memory in the process. |
| 26 int g_num_discardable_memory = 0; |
30 | 27 |
31 struct GlobalContext { | 28 // Upper limit on the number of discardable memory to avoid hitting file |
32 GlobalContext() | 29 // descriptor limit. |
33 : ashmem_fd_limit(GetSoftFDLimit()), | 30 const int kDiscardableMemoryNumLimit = 128; |
34 allocator(kAshmemAllocatorName), | |
35 ashmem_fd_count_(0) { | |
36 } | |
37 | |
38 const int ashmem_fd_limit; | |
39 internal::DiscardableMemoryAllocator allocator; | |
40 Lock lock; | |
41 | |
42 int ashmem_fd_count() const { | |
43 lock.AssertAcquired(); | |
44 return ashmem_fd_count_; | |
45 } | |
46 | |
47 void decrement_ashmem_fd_count() { | |
48 lock.AssertAcquired(); | |
49 --ashmem_fd_count_; | |
50 } | |
51 | |
52 void increment_ashmem_fd_count() { | |
53 lock.AssertAcquired(); | |
54 ++ashmem_fd_count_; | |
55 } | |
56 | |
57 private: | |
58 static int GetSoftFDLimit() { | |
59 struct rlimit limit_info; | |
60 if (getrlimit(RLIMIT_NOFILE, &limit_info) != 0) | |
61 return 128; | |
62 // Allow 25% of file descriptor capacity for ashmem. | |
63 return limit_info.rlim_cur / 4; | |
64 } | |
65 | |
66 int ashmem_fd_count_; | |
67 }; | |
68 | |
69 LazyInstance<GlobalContext>::Leaky g_context = LAZY_INSTANCE_INITIALIZER; | |
70 | |
71 // This is the default implementation of DiscardableMemory on Android which is | |
72 // used when file descriptor usage is under the soft limit. When file descriptor | |
73 // usage gets too high the discardable memory allocator is used instead. See | |
74 // ShouldUseAllocator() below for more details. | |
75 class DiscardableMemoryAndroidSimple : public DiscardableMemory { | |
76 public: | |
77 DiscardableMemoryAndroidSimple(int fd, void* address, size_t size) | |
78 : fd_(fd), | |
79 memory_(address), | |
80 size_(size) { | |
81 DCHECK_GE(fd_, 0); | |
82 DCHECK(memory_); | |
83 } | |
84 | |
85 virtual ~DiscardableMemoryAndroidSimple() { | |
86 internal::CloseAshmemRegion(fd_, size_, memory_); | |
87 } | |
88 | |
89 // DiscardableMemory: | |
90 virtual LockDiscardableMemoryStatus Lock() OVERRIDE { | |
91 return internal::LockAshmemRegion(fd_, 0, size_, memory_); | |
92 } | |
93 | |
94 virtual void Unlock() OVERRIDE { | |
95 internal::UnlockAshmemRegion(fd_, 0, size_, memory_); | |
96 } | |
97 | |
98 virtual void* Memory() const OVERRIDE { | |
99 return memory_; | |
100 } | |
101 | |
102 private: | |
103 const int fd_; | |
104 void* const memory_; | |
105 const size_t size_; | |
106 | |
107 DISALLOW_COPY_AND_ASSIGN(DiscardableMemoryAndroidSimple); | |
108 }; | |
109 | |
110 int GetCurrentNumberOfAshmemFDs() { | |
111 AutoLock lock(g_context.Get().lock); | |
112 return g_context.Get().ashmem_fd_count(); | |
113 } | |
114 | |
115 // Returns whether the provided size can be safely page-aligned (without causing | |
116 // an overflow). | |
117 bool CheckSizeCanBeAlignedToNextPage(size_t size) { | |
118 return size <= std::numeric_limits<size_t>::max() - kPageSize + 1; | |
119 } | |
120 | |
121 } // namespace | |
122 | |
123 namespace internal { | |
124 | |
125 size_t AlignToNextPage(size_t size) { | |
126 DCHECK_EQ(static_cast<int>(kPageSize), getpagesize()); | |
127 DCHECK(CheckSizeCanBeAlignedToNextPage(size)); | |
128 const size_t mask = ~(kPageSize - 1); | |
129 return (size + kPageSize - 1) & mask; | |
130 } | |
131 | 31 |
132 bool CreateAshmemRegion(const char* name, | 32 bool CreateAshmemRegion(const char* name, |
133 size_t size, | 33 size_t size, |
134 int* out_fd, | 34 int* out_fd, |
135 void** out_address) { | 35 void** out_address) { |
136 AutoLock lock(g_context.Get().lock); | 36 base::AutoLock lock(g_discardable_memory_lock.Get()); |
137 if (g_context.Get().ashmem_fd_count() + 1 > g_context.Get().ashmem_fd_limit) | 37 if (g_num_discardable_memory + 1 > kDiscardableMemoryNumLimit) |
138 return false; | 38 return false; |
139 int fd = ashmem_create_region(name, size); | 39 int fd = ashmem_create_region(name, size); |
140 if (fd < 0) { | 40 if (fd < 0) { |
141 DLOG(ERROR) << "ashmem_create_region() failed"; | 41 DLOG(ERROR) << "ashmem_create_region() failed"; |
142 return false; | 42 return false; |
143 } | 43 } |
144 file_util::ScopedFD fd_closer(&fd); | 44 file_util::ScopedFD fd_closer(&fd); |
145 | 45 |
146 const int err = ashmem_set_prot_region(fd, PROT_READ | PROT_WRITE); | 46 const int err = ashmem_set_prot_region(fd, PROT_READ | PROT_WRITE); |
147 if (err < 0) { | 47 if (err < 0) { |
148 DLOG(ERROR) << "Error " << err << " when setting protection of ashmem"; | 48 DLOG(ERROR) << "Error " << err << " when setting protection of ashmem"; |
149 return false; | 49 return false; |
150 } | 50 } |
151 | 51 |
152 // There is a problem using MAP_PRIVATE here. As we are constantly calling | 52 // There is a problem using MAP_PRIVATE here. As we are constantly calling |
153 // Lock() and Unlock(), data could get lost if they are not written to the | 53 // Lock() and Unlock(), data could get lost if they are not written to the |
154 // underlying file when Unlock() gets called. | 54 // underlying file when Unlock() gets called. |
155 void* const address = mmap( | 55 void* const address = mmap( |
156 NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); | 56 NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); |
157 if (address == MAP_FAILED) { | 57 if (address == MAP_FAILED) { |
158 DPLOG(ERROR) << "Failed to map memory."; | 58 DPLOG(ERROR) << "Failed to map memory."; |
159 return false; | 59 return false; |
160 } | 60 } |
161 | 61 |
162 ignore_result(fd_closer.release()); | 62 ignore_result(fd_closer.release()); |
163 g_context.Get().increment_ashmem_fd_count(); | 63 ++g_num_discardable_memory; |
164 *out_fd = fd; | 64 *out_fd = fd; |
165 *out_address = address; | 65 *out_address = address; |
166 return true; | 66 return true; |
167 } | 67 } |
168 | 68 |
169 bool CloseAshmemRegion(int fd, size_t size, void* address) { | 69 bool DeleteAshmemRegion(int fd, size_t size, void* address) { |
170 AutoLock lock(g_context.Get().lock); | 70 base::AutoLock lock(g_discardable_memory_lock.Get()); |
171 g_context.Get().decrement_ashmem_fd_count(); | 71 --g_num_discardable_memory; |
172 if (munmap(address, size) == -1) { | 72 if (munmap(address, size) == -1) { |
173 DPLOG(ERROR) << "Failed to unmap memory."; | 73 DPLOG(ERROR) << "Failed to unmap memory."; |
174 close(fd); | 74 close(fd); |
175 return false; | 75 return false; |
176 } | 76 } |
177 return close(fd) == 0; | 77 return close(fd) == 0; |
178 } | 78 } |
179 | 79 |
180 LockDiscardableMemoryStatus LockAshmemRegion(int fd, | 80 LockDiscardableMemoryStatus LockAshmemRegion(int fd, |
181 size_t off, | 81 size_t off, |
182 size_t size, | 82 size_t size, |
183 const void* address) { | 83 const void* address) { |
184 const int result = ashmem_pin_region(fd, off, size); | 84 const int result = ashmem_pin_region(fd, off, size); |
185 DCHECK_EQ(0, mprotect(address, size, PROT_READ | PROT_WRITE)); | 85 DCHECK_EQ(0, mprotect(address, size, PROT_READ | PROT_WRITE)); |
186 return result == ASHMEM_WAS_PURGED ? | 86 return result == ASHMEM_WAS_PURGED ? |
187 DISCARDABLE_MEMORY_PURGED : DISCARDABLE_MEMORY_SUCCESS; | 87 DISCARDABLE_MEMORY_PURGED : DISCARDABLE_MEMORY_SUCCESS; |
188 } | 88 } |
189 | 89 |
190 bool UnlockAshmemRegion(int fd, size_t off, size_t size, const void* address) { | 90 bool UnlockAshmemRegion(int fd, size_t off, size_t size, const void* address) { |
191 const int failed = ashmem_unpin_region(fd, off, size); | 91 const int failed = ashmem_unpin_region(fd, off, size); |
192 if (failed) | 92 if (failed) |
193 DLOG(ERROR) << "Failed to unpin memory."; | 93 DLOG(ERROR) << "Failed to unpin memory."; |
194 // This allows us to catch accesses to unlocked memory. | 94 // This allows us to catch accesses to unlocked memory. |
195 DCHECK_EQ(0, mprotect(address, size, PROT_NONE)); | 95 DCHECK_EQ(0, mprotect(address, size, PROT_NONE)); |
196 return !failed; | 96 return !failed; |
197 } | 97 } |
198 | 98 |
199 } // namespace internal | 99 class DiscardableMemoryAndroid : public DiscardableMemory { |
| 100 public: |
| 101 DiscardableMemoryAndroid(int fd, void* address, size_t size) |
| 102 : fd_(fd), |
| 103 memory_(address), |
| 104 size_(size) { |
| 105 DCHECK_GE(fd_, 0); |
| 106 DCHECK(memory_); |
| 107 } |
| 108 |
| 109 virtual ~DiscardableMemoryAndroid() { |
| 110 DeleteAshmemRegion(fd_, size_, memory_); |
| 111 } |
| 112 |
| 113 // DiscardableMemory: |
| 114 virtual LockDiscardableMemoryStatus Lock() OVERRIDE { |
| 115 return LockAshmemRegion(fd_, 0, size_, memory_); |
| 116 } |
| 117 |
| 118 virtual void Unlock() OVERRIDE { |
| 119 UnlockAshmemRegion(fd_, 0, size_, memory_); |
| 120 } |
| 121 |
| 122 virtual void* Memory() const OVERRIDE { |
| 123 return memory_; |
| 124 } |
| 125 |
| 126 private: |
| 127 const int fd_; |
| 128 void* const memory_; |
| 129 const size_t size_; |
| 130 |
| 131 DISALLOW_COPY_AND_ASSIGN(DiscardableMemoryAndroid); |
| 132 }; |
| 133 |
| 134 } // namespace |
200 | 135 |
201 // static | 136 // static |
202 bool DiscardableMemory::SupportedNatively() { | 137 bool DiscardableMemory::SupportedNatively() { |
203 return true; | 138 return true; |
204 } | 139 } |
205 | 140 |
206 // Allocation can happen in two ways: | |
207 // - Each client-requested allocation is backed by an individual ashmem region. | |
208 // This allows deleting ashmem regions individually by closing the ashmem file | |
209 // descriptor. This is the default path that is taken when file descriptor usage | |
210 // allows us to do so or when the allocation size would require and entire | |
211 // ashmem region. | |
212 // - Allocations are performed by the global allocator when file descriptor | |
213 // usage gets too high. This still allows unpinning but does not allow deleting | |
214 // (i.e. releasing the physical pages backing) individual regions. | |
215 // | |
216 // TODO(pliard): consider tuning the size threshold used below. For instance we | |
217 // might want to make it a fraction of kMinAshmemRegionSize and also | |
218 // systematically have small allocations go through the allocator to let big | |
219 // allocations systematically go through individual ashmem regions. | |
220 // | |
221 // static | 141 // static |
222 scoped_ptr<DiscardableMemory> DiscardableMemory::CreateLockedMemory( | 142 scoped_ptr<DiscardableMemory> DiscardableMemory::CreateLockedMemory( |
223 size_t size) { | 143 size_t size) { |
224 if (!CheckSizeCanBeAlignedToNextPage(size)) | |
225 return scoped_ptr<DiscardableMemory>(); | |
226 // Pinning & unpinning works with page granularity therefore align the size | 144 // Pinning & unpinning works with page granularity therefore align the size |
227 // upfront. | 145 // upfront. |
228 const size_t aligned_size = internal::AlignToNextPage(size); | 146 const size_t kPageSize = 4096; |
229 // Note that the following code is slightly racy. The worst that can happen in | 147 const size_t mask = ~(kPageSize - 1); |
230 // practice though is taking the wrong decision (e.g. using the allocator | 148 size = (size + kPageSize - 1) & mask; |
231 // rather than DiscardableMemoryAndroidSimple). Moreover keeping the lock | 149 int fd; |
232 // acquired for the whole allocation would cause a deadlock when the allocator | 150 void* address; |
233 // tries to create an ashmem region. | 151 if (!CreateAshmemRegion("", size, &fd, &address)) |
234 const size_t kAllocatorRegionSize = | 152 return scoped_ptr<DiscardableMemory>(); |
235 internal::DiscardableMemoryAllocator::kMinAshmemRegionSize; | 153 return scoped_ptr<DiscardableMemory>( |
236 GlobalContext* const global_context = g_context.Pointer(); | 154 new DiscardableMemoryAndroid(fd, address, size)); |
237 if (aligned_size >= kAllocatorRegionSize || | |
238 GetCurrentNumberOfAshmemFDs() < 0.9 * global_context->ashmem_fd_limit) { | |
239 int fd; | |
240 void* address; | |
241 if (internal::CreateAshmemRegion("", aligned_size, &fd, &address)) { | |
242 return scoped_ptr<DiscardableMemory>( | |
243 new DiscardableMemoryAndroidSimple(fd, address, aligned_size)); | |
244 } | |
245 } | |
246 return global_context->allocator.Allocate(size); | |
247 } | 155 } |
248 | 156 |
249 // static | 157 // static |
250 bool DiscardableMemory::PurgeForTestingSupported() { | 158 bool DiscardableMemory::PurgeForTestingSupported() { |
251 return false; | 159 return false; |
252 } | 160 } |
253 | 161 |
254 // static | 162 // static |
255 void DiscardableMemory::PurgeForTesting() { | 163 void DiscardableMemory::PurgeForTesting() { |
256 NOTIMPLEMENTED(); | 164 NOTIMPLEMENTED(); |
257 } | 165 } |
258 | 166 |
259 } // namespace base | 167 } // namespace base |
OLD | NEW |