OLD | NEW |
1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/memory/discardable_memory_android.h" | 5 #include "base/memory/discardable_memory_android.h" |
6 | 6 |
7 #include <sys/mman.h> | 7 #include <sys/mman.h> |
8 #include <sys/resource.h> | 8 #include <sys/resource.h> |
9 #include <sys/time.h> | 9 #include <sys/time.h> |
10 #include <unistd.h> | 10 #include <unistd.h> |
11 | 11 |
12 #include <limits> | 12 #include <limits> |
13 | 13 |
14 #include "base/basictypes.h" | 14 #include "base/basictypes.h" |
15 #include "base/compiler_specific.h" | 15 #include "base/compiler_specific.h" |
16 #include "base/file_util.h" | 16 #include "base/file_util.h" |
17 #include "base/lazy_instance.h" | 17 #include "base/lazy_instance.h" |
18 #include "base/logging.h" | 18 #include "base/logging.h" |
19 #include "base/memory/discardable_memory.h" | 19 #include "base/memory/discardable_memory.h" |
20 #include "base/memory/discardable_memory_allocator_android.h" | 20 #include "base/memory/discardable_memory_allocator_android.h" |
| 21 #include "base/memory/discardable_memory_emulated.h" |
21 #include "base/synchronization/lock.h" | 22 #include "base/synchronization/lock.h" |
22 #include "third_party/ashmem/ashmem.h" | 23 #include "third_party/ashmem/ashmem.h" |
23 | 24 |
24 namespace base { | 25 namespace base { |
25 namespace { | 26 namespace { |
26 | 27 |
27 const size_t kPageSize = 4096; | 28 const size_t kPageSize = 4096; |
28 | 29 |
29 const char kAshmemAllocatorName[] = "DiscardableMemoryAllocator"; | 30 const char kAshmemAllocatorName[] = "DiscardableMemoryAllocator"; |
30 | 31 |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
80 size_(size) { | 81 size_(size) { |
81 DCHECK_GE(fd_, 0); | 82 DCHECK_GE(fd_, 0); |
82 DCHECK(memory_); | 83 DCHECK(memory_); |
83 } | 84 } |
84 | 85 |
85 virtual ~DiscardableMemoryAndroidSimple() { | 86 virtual ~DiscardableMemoryAndroidSimple() { |
86 internal::CloseAshmemRegion(fd_, size_, memory_); | 87 internal::CloseAshmemRegion(fd_, size_, memory_); |
87 } | 88 } |
88 | 89 |
89 // DiscardableMemory: | 90 // DiscardableMemory: |
90 virtual LockDiscardableMemoryStatus Lock() OVERRIDE { | 91 virtual DiscardableMemoryLockStatus Lock() OVERRIDE { |
91 return internal::LockAshmemRegion(fd_, 0, size_, memory_); | 92 return internal::LockAshmemRegion(fd_, 0, size_, memory_); |
92 } | 93 } |
93 | 94 |
94 virtual void Unlock() OVERRIDE { | 95 virtual void Unlock() OVERRIDE { |
95 internal::UnlockAshmemRegion(fd_, 0, size_, memory_); | 96 internal::UnlockAshmemRegion(fd_, 0, size_, memory_); |
96 } | 97 } |
97 | 98 |
98 virtual void* Memory() const OVERRIDE { | 99 virtual void* Memory() const OVERRIDE { |
99 return memory_; | 100 return memory_; |
100 } | 101 } |
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
170 AutoLock lock(g_context.Get().lock); | 171 AutoLock lock(g_context.Get().lock); |
171 g_context.Get().decrement_ashmem_fd_count(); | 172 g_context.Get().decrement_ashmem_fd_count(); |
172 if (munmap(address, size) == -1) { | 173 if (munmap(address, size) == -1) { |
173 DPLOG(ERROR) << "Failed to unmap memory."; | 174 DPLOG(ERROR) << "Failed to unmap memory."; |
174 close(fd); | 175 close(fd); |
175 return false; | 176 return false; |
176 } | 177 } |
177 return close(fd) == 0; | 178 return close(fd) == 0; |
178 } | 179 } |
179 | 180 |
180 LockDiscardableMemoryStatus LockAshmemRegion(int fd, | 181 DiscardableMemoryLockStatus LockAshmemRegion(int fd, |
181 size_t off, | 182 size_t off, |
182 size_t size, | 183 size_t size, |
183 const void* address) { | 184 const void* address) { |
184 const int result = ashmem_pin_region(fd, off, size); | 185 const int result = ashmem_pin_region(fd, off, size); |
185 DCHECK_EQ(0, mprotect(address, size, PROT_READ | PROT_WRITE)); | 186 DCHECK_EQ(0, mprotect(address, size, PROT_READ | PROT_WRITE)); |
186 return result == ASHMEM_WAS_PURGED ? | 187 return result == ASHMEM_WAS_PURGED ? DISCARDABLE_MEMORY_LOCK_STATUS_PURGED |
187 DISCARDABLE_MEMORY_PURGED : DISCARDABLE_MEMORY_SUCCESS; | 188 : DISCARDABLE_MEMORY_LOCK_STATUS_SUCCESS; |
188 } | 189 } |
189 | 190 |
190 bool UnlockAshmemRegion(int fd, size_t off, size_t size, const void* address) { | 191 bool UnlockAshmemRegion(int fd, size_t off, size_t size, const void* address) { |
191 const int failed = ashmem_unpin_region(fd, off, size); | 192 const int failed = ashmem_unpin_region(fd, off, size); |
192 if (failed) | 193 if (failed) |
193 DLOG(ERROR) << "Failed to unpin memory."; | 194 DLOG(ERROR) << "Failed to unpin memory."; |
194 // This allows us to catch accesses to unlocked memory. | 195 // This allows us to catch accesses to unlocked memory. |
195 DCHECK_EQ(0, mprotect(address, size, PROT_NONE)); | 196 DCHECK_EQ(0, mprotect(address, size, PROT_NONE)); |
196 return !failed; | 197 return !failed; |
197 } | 198 } |
198 | 199 |
199 } // namespace internal | 200 } // namespace internal |
200 | 201 |
201 // static | 202 // static |
202 bool DiscardableMemory::SupportedNatively() { | 203 void DiscardableMemory::GetSupportedTypes( |
203 return true; | 204 std::vector<DiscardableMemoryType>* types) { |
| 205 types->push_back(DISCARDABLE_MEMORY_TYPE_ANDROID); |
| 206 types->push_back(DISCARDABLE_MEMORY_TYPE_EMULATED); |
204 } | 207 } |
205 | 208 |
206 // Allocation can happen in two ways: | 209 // Allocation can happen in two ways: |
207 // - Each client-requested allocation is backed by an individual ashmem region. | 210 // - Each client-requested allocation is backed by an individual ashmem region. |
208 // This allows deleting ashmem regions individually by closing the ashmem file | 211 // This allows deleting ashmem regions individually by closing the ashmem file |
209 // descriptor. This is the default path that is taken when file descriptor usage | 212 // descriptor. This is the default path that is taken when file descriptor usage |
210 // allows us to do so or when the allocation size would require and entire | 213 // allows us to do so or when the allocation size would require and entire |
211 // ashmem region. | 214 // ashmem region. |
212 // - Allocations are performed by the global allocator when file descriptor | 215 // - Allocations are performed by the global allocator when file descriptor |
213 // usage gets too high. This still allows unpinning but does not allow deleting | 216 // usage gets too high. This still allows unpinning but does not allow deleting |
214 // (i.e. releasing the physical pages backing) individual regions. | 217 // (i.e. releasing the physical pages backing) individual regions. |
215 // | 218 // |
216 // TODO(pliard): consider tuning the size threshold used below. For instance we | 219 // TODO(pliard): consider tuning the size threshold used below. For instance we |
217 // might want to make it a fraction of kMinAshmemRegionSize and also | 220 // might want to make it a fraction of kMinAshmemRegionSize and also |
218 // systematically have small allocations go through the allocator to let big | 221 // systematically have small allocations go through the allocator to let big |
219 // allocations systematically go through individual ashmem regions. | 222 // allocations systematically go through individual ashmem regions. |
220 // | 223 // |
221 // static | 224 // static |
222 scoped_ptr<DiscardableMemory> DiscardableMemory::CreateLockedMemory( | 225 scoped_ptr<DiscardableMemory> DiscardableMemory::CreateLockedMemory( |
223 size_t size) { | 226 size_t size) { |
224 if (!CheckSizeCanBeAlignedToNextPage(size)) | 227 switch (GetType()) { |
225 return scoped_ptr<DiscardableMemory>(); | 228 case DISCARDABLE_MEMORY_TYPE_NONE: |
226 // Pinning & unpinning works with page granularity therefore align the size | 229 case DISCARDABLE_MEMORY_TYPE_MAC: |
227 // upfront. | 230 return scoped_ptr<DiscardableMemory>(); |
228 const size_t aligned_size = internal::AlignToNextPage(size); | 231 case DISCARDABLE_MEMORY_TYPE_ANDROID: { |
229 // Note that the following code is slightly racy. The worst that can happen in | 232 if (!CheckSizeCanBeAlignedToNextPage(size)) |
230 // practice though is taking the wrong decision (e.g. using the allocator | 233 return scoped_ptr<DiscardableMemory>(); |
231 // rather than DiscardableMemoryAndroidSimple). Moreover keeping the lock | 234 // Pinning & unpinning works with page granularity therefore align the |
232 // acquired for the whole allocation would cause a deadlock when the allocator | 235 // size upfront. |
233 // tries to create an ashmem region. | 236 const size_t aligned_size = internal::AlignToNextPage(size); |
234 const size_t kAllocatorRegionSize = | 237 // Note that the following code is slightly racy. The worst that can |
235 internal::DiscardableMemoryAllocator::kMinAshmemRegionSize; | 238 // happen in practice though is taking the wrong decision (e.g. using |
236 GlobalContext* const global_context = g_context.Pointer(); | 239 // the allocator rather than DiscardableMemoryAndroidSimple). Moreover |
237 if (aligned_size >= kAllocatorRegionSize || | 240 // keeping the lock acquired for the whole allocation would cause a |
238 GetCurrentNumberOfAshmemFDs() < 0.9 * global_context->ashmem_fd_limit) { | 241 // deadlock when the allocator tries to create an ashmem region. |
239 int fd; | 242 const size_t kAllocatorRegionSize = |
240 void* address; | 243 internal::DiscardableMemoryAllocator::kMinAshmemRegionSize; |
241 if (internal::CreateAshmemRegion("", aligned_size, &fd, &address)) { | 244 GlobalContext* const global_context = g_context.Pointer(); |
242 return scoped_ptr<DiscardableMemory>( | 245 if (aligned_size >= kAllocatorRegionSize || |
243 new DiscardableMemoryAndroidSimple(fd, address, aligned_size)); | 246 GetCurrentNumberOfAshmemFDs() < |
| 247 0.9 * global_context->ashmem_fd_limit) { |
| 248 int fd; |
| 249 void* address; |
| 250 if (internal::CreateAshmemRegion("", aligned_size, &fd, &address)) { |
| 251 return scoped_ptr<DiscardableMemory>( |
| 252 new DiscardableMemoryAndroidSimple(fd, address, aligned_size)); |
| 253 } |
| 254 } |
| 255 return global_context->allocator.Allocate(size); |
| 256 } |
| 257 case DISCARDABLE_MEMORY_TYPE_EMULATED: { |
| 258 scoped_ptr<internal::DiscardableMemoryEmulated> memory( |
| 259 new internal::DiscardableMemoryEmulated(size)); |
| 260 if (!memory->Initialize()) |
| 261 return scoped_ptr<DiscardableMemory>(); |
| 262 |
| 263 return memory.PassAs<DiscardableMemory>(); |
244 } | 264 } |
245 } | 265 } |
246 return global_context->allocator.Allocate(size); | 266 |
| 267 NOTREACHED(); |
| 268 return scoped_ptr<DiscardableMemory>(); |
247 } | 269 } |
248 | 270 |
249 // static | 271 // static |
250 bool DiscardableMemory::PurgeForTestingSupported() { | 272 bool DiscardableMemory::PurgeForTestingSupported() { |
251 return false; | 273 return false; |
252 } | 274 } |
253 | 275 |
254 // static | 276 // static |
255 void DiscardableMemory::PurgeForTesting() { | 277 void DiscardableMemory::PurgeForTesting() { |
256 NOTIMPLEMENTED(); | 278 NOTIMPLEMENTED(); |
257 } | 279 } |
258 | 280 |
259 } // namespace base | 281 } // namespace base |
OLD | NEW |