| OLD | NEW |
| 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/memory/discardable_memory.h" | 5 #include "base/memory/discardable_memory_android.h" |
| 6 | 6 |
| 7 #include <sys/mman.h> | 7 #include <sys/mman.h> |
| 8 #include <sys/resource.h> |
| 9 #include <sys/time.h> |
| 8 #include <unistd.h> | 10 #include <unistd.h> |
| 9 | 11 |
| 12 #include <limits> |
| 13 |
| 10 #include "base/basictypes.h" | 14 #include "base/basictypes.h" |
| 11 #include "base/compiler_specific.h" | 15 #include "base/compiler_specific.h" |
| 12 #include "base/file_util.h" | 16 #include "base/file_util.h" |
| 13 #include "base/lazy_instance.h" | 17 #include "base/lazy_instance.h" |
| 14 #include "base/logging.h" | 18 #include "base/logging.h" |
| 19 #include "base/memory/discardable_memory.h" |
| 20 #include "base/memory/discardable_memory_allocator_android.h" |
| 15 #include "base/synchronization/lock.h" | 21 #include "base/synchronization/lock.h" |
| 16 #include "third_party/ashmem/ashmem.h" | 22 #include "third_party/ashmem/ashmem.h" |
| 17 | 23 |
| 18 namespace base { | 24 namespace base { |
| 19 namespace { | 25 namespace { |
| 20 | 26 |
| 21 // Protects |g_num_discardable_memory| below. | 27 const size_t kPageSize = 4096; |
| 22 base::LazyInstance<base::Lock>::Leaky g_discardable_memory_lock = | |
| 23 LAZY_INSTANCE_INITIALIZER; | |
| 24 | 28 |
| 25 // Total number of discardable memory in the process. | 29 const char kAshmemAllocatorName[] = "DiscardableMemoryAllocator"; |
| 26 int g_num_discardable_memory = 0; | |
| 27 | 30 |
| 28 // Upper limit on the number of discardable memory to avoid hitting file | 31 struct GlobalContext { |
| 29 // descriptor limit. | 32 GlobalContext() |
| 30 const int kDiscardableMemoryNumLimit = 128; | 33 : ashmem_fd_limit(GetSoftFDLimit()), |
| 34 allocator(kAshmemAllocatorName), |
| 35 ashmem_fd_count_(0) { |
| 36 } |
| 37 |
| 38 const int ashmem_fd_limit; |
| 39 internal::DiscardableMemoryAllocator allocator; |
| 40 Lock lock; |
| 41 |
| 42 int ashmem_fd_count() const { |
| 43 lock.AssertAcquired(); |
| 44 return ashmem_fd_count_; |
| 45 } |
| 46 |
| 47 void decrement_ashmem_fd_count() { |
| 48 lock.AssertAcquired(); |
| 49 --ashmem_fd_count_; |
| 50 } |
| 51 |
| 52 void increment_ashmem_fd_count() { |
| 53 lock.AssertAcquired(); |
| 54 ++ashmem_fd_count_; |
| 55 } |
| 56 |
| 57 private: |
| 58 static int GetSoftFDLimit() { |
| 59 struct rlimit limit_info; |
| 60 if (getrlimit(RLIMIT_NOFILE, &limit_info) != 0) |
| 61 return 128; |
| 62 // Allow 25% of file descriptor capacity for ashmem. |
| 63 return limit_info.rlim_cur / 4; |
| 64 } |
| 65 |
| 66 int ashmem_fd_count_; |
| 67 }; |
| 68 |
| 69 LazyInstance<GlobalContext>::Leaky g_context = LAZY_INSTANCE_INITIALIZER; |
| 70 |
| 71 // This is the default implementation of DiscardableMemory on Android which is |
| 72 // used when file descriptor usage is under the soft limit. When file descriptor |
| 73 // usage gets too high the discardable memory allocator is used instead. See |
| 74 // ShouldUseAllocator() below for more details. |
| 75 class DiscardableMemoryAndroidSimple : public DiscardableMemory { |
| 76 public: |
| 77 DiscardableMemoryAndroidSimple(int fd, void* address, size_t size) |
| 78 : fd_(fd), |
| 79 memory_(address), |
| 80 size_(size) { |
| 81 DCHECK_GE(fd_, 0); |
| 82 DCHECK(memory_); |
| 83 } |
| 84 |
| 85 virtual ~DiscardableMemoryAndroidSimple() { |
| 86 internal::CloseAshmemRegion(fd_, size_, memory_); |
| 87 } |
| 88 |
| 89 // DiscardableMemory: |
| 90 virtual LockDiscardableMemoryStatus Lock() OVERRIDE { |
| 91 return internal::LockAshmemRegion(fd_, 0, size_, memory_); |
| 92 } |
| 93 |
| 94 virtual void Unlock() OVERRIDE { |
| 95 internal::UnlockAshmemRegion(fd_, 0, size_, memory_); |
| 96 } |
| 97 |
| 98 virtual void* Memory() const OVERRIDE { |
| 99 return memory_; |
| 100 } |
| 101 |
| 102 private: |
| 103 const int fd_; |
| 104 void* const memory_; |
| 105 const size_t size_; |
| 106 |
| 107 DISALLOW_COPY_AND_ASSIGN(DiscardableMemoryAndroidSimple); |
| 108 }; |
| 109 |
| 110 int GetCurrentNumberOfAshmemFDs() { |
| 111 AutoLock lock(g_context.Get().lock); |
| 112 return g_context.Get().ashmem_fd_count(); |
| 113 } |
| 114 |
| 115 // Returns whether the provided size can be safely page-aligned (without causing |
| 116 // an overflow). |
| 117 bool CheckSizeCanBeAlignedToNextPage(size_t size) { |
| 118 return size <= std::numeric_limits<size_t>::max() - kPageSize + 1; |
| 119 } |
| 120 |
| 121 } // namespace |
| 122 |
| 123 namespace internal { |
| 124 |
| 125 size_t AlignToNextPage(size_t size) { |
| 126 DCHECK_EQ(static_cast<int>(kPageSize), getpagesize()); |
| 127 DCHECK(CheckSizeCanBeAlignedToNextPage(size)); |
| 128 const size_t mask = ~(kPageSize - 1); |
| 129 return (size + kPageSize - 1) & mask; |
| 130 } |
| 31 | 131 |
| 32 bool CreateAshmemRegion(const char* name, | 132 bool CreateAshmemRegion(const char* name, |
| 33 size_t size, | 133 size_t size, |
| 34 int* out_fd, | 134 int* out_fd, |
| 35 void** out_address) { | 135 void** out_address) { |
| 36 base::AutoLock lock(g_discardable_memory_lock.Get()); | 136 AutoLock lock(g_context.Get().lock); |
| 37 if (g_num_discardable_memory + 1 > kDiscardableMemoryNumLimit) | 137 if (g_context.Get().ashmem_fd_count() + 1 > g_context.Get().ashmem_fd_limit) |
| 38 return false; | 138 return false; |
| 39 int fd = ashmem_create_region(name, size); | 139 int fd = ashmem_create_region(name, size); |
| 40 if (fd < 0) { | 140 if (fd < 0) { |
| 41 DLOG(ERROR) << "ashmem_create_region() failed"; | 141 DLOG(ERROR) << "ashmem_create_region() failed"; |
| 42 return false; | 142 return false; |
| 43 } | 143 } |
| 44 file_util::ScopedFD fd_closer(&fd); | 144 file_util::ScopedFD fd_closer(&fd); |
| 45 | 145 |
| 46 const int err = ashmem_set_prot_region(fd, PROT_READ | PROT_WRITE); | 146 const int err = ashmem_set_prot_region(fd, PROT_READ | PROT_WRITE); |
| 47 if (err < 0) { | 147 if (err < 0) { |
| 48 DLOG(ERROR) << "Error " << err << " when setting protection of ashmem"; | 148 DLOG(ERROR) << "Error " << err << " when setting protection of ashmem"; |
| 49 return false; | 149 return false; |
| 50 } | 150 } |
| 51 | 151 |
| 52 // There is a problem using MAP_PRIVATE here. As we are constantly calling | 152 // There is a problem using MAP_PRIVATE here. As we are constantly calling |
| 53 // Lock() and Unlock(), data could get lost if they are not written to the | 153 // Lock() and Unlock(), data could get lost if they are not written to the |
| 54 // underlying file when Unlock() gets called. | 154 // underlying file when Unlock() gets called. |
| 55 void* const address = mmap( | 155 void* const address = mmap( |
| 56 NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); | 156 NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); |
| 57 if (address == MAP_FAILED) { | 157 if (address == MAP_FAILED) { |
| 58 DPLOG(ERROR) << "Failed to map memory."; | 158 DPLOG(ERROR) << "Failed to map memory."; |
| 59 return false; | 159 return false; |
| 60 } | 160 } |
| 61 | 161 |
| 62 ignore_result(fd_closer.release()); | 162 ignore_result(fd_closer.release()); |
| 63 ++g_num_discardable_memory; | 163 g_context.Get().increment_ashmem_fd_count(); |
| 64 *out_fd = fd; | 164 *out_fd = fd; |
| 65 *out_address = address; | 165 *out_address = address; |
| 66 return true; | 166 return true; |
| 67 } | 167 } |
| 68 | 168 |
| 69 bool DeleteAshmemRegion(int fd, size_t size, void* address) { | 169 bool CloseAshmemRegion(int fd, size_t size, void* address) { |
| 70 base::AutoLock lock(g_discardable_memory_lock.Get()); | 170 AutoLock lock(g_context.Get().lock); |
| 71 --g_num_discardable_memory; | 171 g_context.Get().decrement_ashmem_fd_count(); |
| 72 if (munmap(address, size) == -1) { | 172 if (munmap(address, size) == -1) { |
| 73 DPLOG(ERROR) << "Failed to unmap memory."; | 173 DPLOG(ERROR) << "Failed to unmap memory."; |
| 74 close(fd); | 174 close(fd); |
| 75 return false; | 175 return false; |
| 76 } | 176 } |
| 77 return close(fd) == 0; | 177 return close(fd) == 0; |
| 78 } | 178 } |
| 79 | 179 |
| 80 LockDiscardableMemoryStatus LockAshmemRegion(int fd, | 180 LockDiscardableMemoryStatus LockAshmemRegion(int fd, |
| 81 size_t off, | 181 size_t off, |
| 82 size_t size, | 182 size_t size, |
| 83 const void* address) { | 183 const void* address) { |
| 84 const int result = ashmem_pin_region(fd, off, size); | 184 const int result = ashmem_pin_region(fd, off, size); |
| 85 DCHECK_EQ(0, mprotect(address, size, PROT_READ | PROT_WRITE)); | 185 DCHECK_EQ(0, mprotect(address, size, PROT_READ | PROT_WRITE)); |
| 86 return result == ASHMEM_WAS_PURGED ? | 186 return result == ASHMEM_WAS_PURGED ? |
| 87 DISCARDABLE_MEMORY_PURGED : DISCARDABLE_MEMORY_SUCCESS; | 187 DISCARDABLE_MEMORY_PURGED : DISCARDABLE_MEMORY_SUCCESS; |
| 88 } | 188 } |
| 89 | 189 |
| 90 bool UnlockAshmemRegion(int fd, size_t off, size_t size, const void* address) { | 190 bool UnlockAshmemRegion(int fd, size_t off, size_t size, const void* address) { |
| 91 const int failed = ashmem_unpin_region(fd, off, size); | 191 const int failed = ashmem_unpin_region(fd, off, size); |
| 92 if (failed) | 192 if (failed) |
| 93 DLOG(ERROR) << "Failed to unpin memory."; | 193 DLOG(ERROR) << "Failed to unpin memory."; |
| 94 // This allows us to catch accesses to unlocked memory. | 194 // This allows us to catch accesses to unlocked memory. |
| 95 DCHECK_EQ(0, mprotect(address, size, PROT_NONE)); | 195 DCHECK_EQ(0, mprotect(address, size, PROT_NONE)); |
| 96 return !failed; | 196 return !failed; |
| 97 } | 197 } |
| 98 | 198 |
| 99 class DiscardableMemoryAndroid : public DiscardableMemory { | 199 } // namespace internal |
| 100 public: | |
| 101 DiscardableMemoryAndroid(int fd, void* address, size_t size) | |
| 102 : fd_(fd), | |
| 103 memory_(address), | |
| 104 size_(size) { | |
| 105 DCHECK_GE(fd_, 0); | |
| 106 DCHECK(memory_); | |
| 107 } | |
| 108 | |
| 109 virtual ~DiscardableMemoryAndroid() { | |
| 110 DeleteAshmemRegion(fd_, size_, memory_); | |
| 111 } | |
| 112 | |
| 113 // DiscardableMemory: | |
| 114 virtual LockDiscardableMemoryStatus Lock() OVERRIDE { | |
| 115 return LockAshmemRegion(fd_, 0, size_, memory_); | |
| 116 } | |
| 117 | |
| 118 virtual void Unlock() OVERRIDE { | |
| 119 UnlockAshmemRegion(fd_, 0, size_, memory_); | |
| 120 } | |
| 121 | |
| 122 virtual void* Memory() const OVERRIDE { | |
| 123 return memory_; | |
| 124 } | |
| 125 | |
| 126 private: | |
| 127 const int fd_; | |
| 128 void* const memory_; | |
| 129 const size_t size_; | |
| 130 | |
| 131 DISALLOW_COPY_AND_ASSIGN(DiscardableMemoryAndroid); | |
| 132 }; | |
| 133 | |
| 134 } // namespace | |
| 135 | 200 |
| 136 // static | 201 // static |
| 137 bool DiscardableMemory::SupportedNatively() { | 202 bool DiscardableMemory::SupportedNatively() { |
| 138 return true; | 203 return true; |
| 139 } | 204 } |
| 140 | 205 |
| 206 // Allocation can happen in two ways: |
| 207 // - Each client-requested allocation is backed by an individual ashmem region. |
| 208 // This allows deleting ashmem regions individually by closing the ashmem file |
| 209 // descriptor. This is the default path that is taken when file descriptor usage |
| 210 // allows us to do so or when the allocation size would require and entire |
| 211 // ashmem region. |
| 212 // - Allocations are performed by the global allocator when file descriptor |
| 213 // usage gets too high. This still allows unpinning but does not allow deleting |
| 214 // (i.e. releasing the physical pages backing) individual regions. |
| 215 // |
| 216 // TODO(pliard): consider tuning the size threshold used below. For instance we |
| 217 // might want to make it a fraction of kMinAshmemRegionSize and also |
| 218 // systematically have small allocations go through the allocator to let big |
| 219 // allocations systematically go through individual ashmem regions. |
| 220 // |
| 141 // static | 221 // static |
| 142 scoped_ptr<DiscardableMemory> DiscardableMemory::CreateLockedMemory( | 222 scoped_ptr<DiscardableMemory> DiscardableMemory::CreateLockedMemory( |
| 143 size_t size) { | 223 size_t size) { |
| 224 if (!CheckSizeCanBeAlignedToNextPage(size)) |
| 225 return scoped_ptr<DiscardableMemory>(); |
| 144 // Pinning & unpinning works with page granularity therefore align the size | 226 // Pinning & unpinning works with page granularity therefore align the size |
| 145 // upfront. | 227 // upfront. |
| 146 const size_t kPageSize = 4096; | 228 const size_t aligned_size = internal::AlignToNextPage(size); |
| 147 const size_t mask = ~(kPageSize - 1); | 229 // Note that the following code is slightly racy. The worst that can happen in |
| 148 size = (size + kPageSize - 1) & mask; | 230 // practice though is taking the wrong decision (e.g. using the allocator |
| 149 int fd; | 231 // rather than DiscardableMemoryAndroidSimple). Moreover keeping the lock |
| 150 void* address; | 232 // acquired for the whole allocation would cause a deadlock when the allocator |
| 151 if (!CreateAshmemRegion("", size, &fd, &address)) | 233 // tries to create an ashmem region. |
| 152 return scoped_ptr<DiscardableMemory>(); | 234 const size_t kAllocatorRegionSize = |
| 153 return scoped_ptr<DiscardableMemory>( | 235 internal::DiscardableMemoryAllocator::kMinAshmemRegionSize; |
| 154 new DiscardableMemoryAndroid(fd, address, size)); | 236 GlobalContext* const global_context = g_context.Pointer(); |
| 237 if (aligned_size >= kAllocatorRegionSize || |
| 238 GetCurrentNumberOfAshmemFDs() < 0.9 * global_context->ashmem_fd_limit) { |
| 239 int fd; |
| 240 void* address; |
| 241 if (internal::CreateAshmemRegion("", aligned_size, &fd, &address)) { |
| 242 return scoped_ptr<DiscardableMemory>( |
| 243 new DiscardableMemoryAndroidSimple(fd, address, aligned_size)); |
| 244 } |
| 245 } |
| 246 return global_context->allocator.Allocate(size); |
| 155 } | 247 } |
| 156 | 248 |
| 157 // static | 249 // static |
| 158 bool DiscardableMemory::PurgeForTestingSupported() { | 250 bool DiscardableMemory::PurgeForTestingSupported() { |
| 159 return false; | 251 return false; |
| 160 } | 252 } |
| 161 | 253 |
| 162 // static | 254 // static |
| 163 void DiscardableMemory::PurgeForTesting() { | 255 void DiscardableMemory::PurgeForTesting() { |
| 164 NOTIMPLEMENTED(); | 256 NOTIMPLEMENTED(); |
| 165 } | 257 } |
| 166 | 258 |
| 167 } // namespace base | 259 } // namespace base |
| OLD | NEW |