Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/memory/discardable_memory_android.h" | 5 #include "base/memory/discardable_memory_android.h" |
| 6 | 6 |
| 7 #include <sys/mman.h> | 7 #include <sys/mman.h> |
| 8 #include <sys/resource.h> | 8 #include <sys/resource.h> |
| 9 #include <sys/time.h> | 9 #include <sys/time.h> |
| 10 #include <unistd.h> | 10 #include <unistd.h> |
| 11 | 11 |
| 12 #include <limits> | 12 #include <limits> |
| 13 | 13 |
| 14 #include "base/basictypes.h" | 14 #include "base/basictypes.h" |
| 15 #include "base/compiler_specific.h" | 15 #include "base/compiler_specific.h" |
| 16 #include "base/file_util.h" | 16 #include "base/file_util.h" |
| 17 #include "base/lazy_instance.h" | 17 #include "base/lazy_instance.h" |
| 18 #include "base/logging.h" | 18 #include "base/logging.h" |
| 19 #include "base/memory/discardable_memory.h" | 19 #include "base/memory/discardable_memory.h" |
| 20 #include "base/memory/discardable_memory_allocator_android.h" | 20 #include "base/memory/discardable_memory_allocator_android.h" |
| 21 #include "base/memory/discardable_memory_emulated.h" | |
| 21 #include "base/synchronization/lock.h" | 22 #include "base/synchronization/lock.h" |
| 22 #include "third_party/ashmem/ashmem.h" | 23 #include "third_party/ashmem/ashmem.h" |
| 23 | 24 |
| 24 namespace base { | 25 namespace base { |
| 25 namespace { | 26 namespace { |
| 26 | 27 |
| 27 const size_t kPageSize = 4096; | 28 const size_t kPageSize = 4096; |
| 28 | 29 |
| 29 const char kAshmemAllocatorName[] = "DiscardableMemoryAllocator"; | 30 const char kAshmemAllocatorName[] = "DiscardableMemoryAllocator"; |
| 30 | 31 |
| (...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 111 AutoLock lock(g_context.Get().lock); | 112 AutoLock lock(g_context.Get().lock); |
| 112 return g_context.Get().ashmem_fd_count(); | 113 return g_context.Get().ashmem_fd_count(); |
| 113 } | 114 } |
| 114 | 115 |
| 115 // Returns whether the provided size can be safely page-aligned (without causing | 116 // Returns whether the provided size can be safely page-aligned (without causing |
| 116 // an overflow). | 117 // an overflow). |
| 117 bool CheckSizeCanBeAlignedToNextPage(size_t size) { | 118 bool CheckSizeCanBeAlignedToNextPage(size_t size) { |
| 118 return size <= std::numeric_limits<size_t>::max() - kPageSize + 1; | 119 return size <= std::numeric_limits<size_t>::max() - kPageSize + 1; |
| 119 } | 120 } |
| 120 | 121 |
| 122 struct SupportedTypeVector { | |
|
Philippe
2013/12/17 14:28:21
This is just a suggestion but I see that we have n
reveman
2013/12/18 08:12:38
Please have a look at the latest code. I simply ma
Philippe
2013/12/18 09:07:47
Yeah, even better!
| |
| 123 SupportedTypeVector() { | |
| 124 v.push_back(DISCARDABLE_MEMORY_ANDROID); | |
| 125 v.push_back(DISCARDABLE_MEMORY_EMULATED); | |
| 126 } | |
| 127 std::vector<DiscardableMemoryType> v; | |
| 128 }; | |
| 129 LazyInstance<SupportedTypeVector>::Leaky g_supported_types = | |
| 130 LAZY_INSTANCE_INITIALIZER; | |
| 131 | |
| 121 } // namespace | 132 } // namespace |
| 122 | 133 |
| 123 namespace internal { | 134 namespace internal { |
| 124 | 135 |
| 125 size_t AlignToNextPage(size_t size) { | 136 size_t AlignToNextPage(size_t size) { |
| 126 DCHECK_EQ(static_cast<int>(kPageSize), getpagesize()); | 137 DCHECK_EQ(static_cast<int>(kPageSize), getpagesize()); |
| 127 DCHECK(CheckSizeCanBeAlignedToNextPage(size)); | 138 DCHECK(CheckSizeCanBeAlignedToNextPage(size)); |
| 128 const size_t mask = ~(kPageSize - 1); | 139 const size_t mask = ~(kPageSize - 1); |
| 129 return (size + kPageSize - 1) & mask; | 140 return (size + kPageSize - 1) & mask; |
| 130 } | 141 } |
| (...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 192 if (failed) | 203 if (failed) |
| 193 DLOG(ERROR) << "Failed to unpin memory."; | 204 DLOG(ERROR) << "Failed to unpin memory."; |
| 194 // This allows us to catch accesses to unlocked memory. | 205 // This allows us to catch accesses to unlocked memory. |
| 195 DCHECK_EQ(0, mprotect(address, size, PROT_NONE)); | 206 DCHECK_EQ(0, mprotect(address, size, PROT_NONE)); |
| 196 return !failed; | 207 return !failed; |
| 197 } | 208 } |
| 198 | 209 |
| 199 } // namespace internal | 210 } // namespace internal |
| 200 | 211 |
| 201 // static | 212 // static |
| 202 bool DiscardableMemory::SupportedNatively() { | 213 const std::vector<DiscardableMemoryType>& |
| 203 return true; | 214 DiscardableMemory::GetSupportedTypes() { |
| 215 return g_supported_types.Get().v; | |
| 204 } | 216 } |
| 205 | 217 |
| 206 // Allocation can happen in two ways: | 218 // Allocation can happen in two ways: |
| 207 // - Each client-requested allocation is backed by an individual ashmem region. | 219 // - Each client-requested allocation is backed by an individual ashmem region. |
| 208 // This allows deleting ashmem regions individually by closing the ashmem file | 220 // This allows deleting ashmem regions individually by closing the ashmem file |
| 209 // descriptor. This is the default path that is taken when file descriptor usage | 221 // descriptor. This is the default path that is taken when file descriptor usage |
| 210 // allows us to do so or when the allocation size would require and entire | 222 // allows us to do so or when the allocation size would require and entire |
| 211 // ashmem region. | 223 // ashmem region. |
| 212 // - Allocations are performed by the global allocator when file descriptor | 224 // - Allocations are performed by the global allocator when file descriptor |
| 213 // usage gets too high. This still allows unpinning but does not allow deleting | 225 // usage gets too high. This still allows unpinning but does not allow deleting |
| 214 // (i.e. releasing the physical pages backing) individual regions. | 226 // (i.e. releasing the physical pages backing) individual regions. |
| 215 // | 227 // |
| 216 // TODO(pliard): consider tuning the size threshold used below. For instance we | 228 // TODO(pliard): consider tuning the size threshold used below. For instance we |
| 217 // might want to make it a fraction of kMinAshmemRegionSize and also | 229 // might want to make it a fraction of kMinAshmemRegionSize and also |
| 218 // systematically have small allocations go through the allocator to let big | 230 // systematically have small allocations go through the allocator to let big |
| 219 // allocations systematically go through individual ashmem regions. | 231 // allocations systematically go through individual ashmem regions. |
| 220 // | 232 // |
| 221 // static | 233 // static |
| 222 scoped_ptr<DiscardableMemory> DiscardableMemory::CreateLockedMemory( | 234 scoped_ptr<DiscardableMemory> DiscardableMemory::CreateLockedMemory( |
| 223 size_t size) { | 235 size_t size) { |
| 224 if (!CheckSizeCanBeAlignedToNextPage(size)) | 236 switch (GetType()) { |
| 225 return scoped_ptr<DiscardableMemory>(); | 237 case DISCARDABLE_MEMORY_NONE: |
| 226 // Pinning & unpinning works with page granularity therefore align the size | 238 case DISCARDABLE_MEMORY_MAC: |
| 227 // upfront. | 239 return scoped_ptr<DiscardableMemory>(); |
| 228 const size_t aligned_size = internal::AlignToNextPage(size); | 240 case DISCARDABLE_MEMORY_ANDROID: { |
| 229 // Note that the following code is slightly racy. The worst that can happen in | 241 if (!CheckSizeCanBeAlignedToNextPage(size)) |
| 230 // practice though is taking the wrong decision (e.g. using the allocator | 242 return scoped_ptr<DiscardableMemory>(); |
| 231 // rather than DiscardableMemoryAndroidSimple). Moreover keeping the lock | 243 // Pinning & unpinning works with page granularity therefore align the |
| 232 // acquired for the whole allocation would cause a deadlock when the allocator | 244 // size upfront. |
| 233 // tries to create an ashmem region. | 245 const size_t aligned_size = internal::AlignToNextPage(size); |
| 234 const size_t kAllocatorRegionSize = | 246 // Note that the following code is slightly racy. The worst that can |
| 235 internal::DiscardableMemoryAllocator::kMinAshmemRegionSize; | 247 // happen in practice though is taking the wrong decision (e.g. using |
| 236 GlobalContext* const global_context = g_context.Pointer(); | 248 // the allocator rather than DiscardableMemoryAndroidSimple). Moreover |
| 237 if (aligned_size >= kAllocatorRegionSize || | 249 // keeping the lock acquired for the whole allocation would cause a |
| 238 GetCurrentNumberOfAshmemFDs() < 0.9 * global_context->ashmem_fd_limit) { | 250 // deadlock when the allocator tries to create an ashmem region. |
| 239 int fd; | 251 const size_t kAllocatorRegionSize = |
| 240 void* address; | 252 internal::DiscardableMemoryAllocator::kMinAshmemRegionSize; |
| 241 if (internal::CreateAshmemRegion("", aligned_size, &fd, &address)) { | 253 GlobalContext* const global_context = g_context.Pointer(); |
| 242 return scoped_ptr<DiscardableMemory>( | 254 if (aligned_size >= kAllocatorRegionSize || |
| 243 new DiscardableMemoryAndroidSimple(fd, address, aligned_size)); | 255 GetCurrentNumberOfAshmemFDs() < |
| 256 0.9 * global_context->ashmem_fd_limit) { | |
| 257 int fd; | |
| 258 void* address; | |
| 259 if (internal::CreateAshmemRegion("", aligned_size, &fd, &address)) { | |
| 260 return scoped_ptr<DiscardableMemory>( | |
| 261 new DiscardableMemoryAndroidSimple(fd, address, aligned_size)); | |
| 262 } | |
| 263 } | |
| 264 return global_context->allocator.Allocate(size); | |
| 265 } | |
| 266 case DISCARDABLE_MEMORY_EMULATED: { | |
| 267 scoped_ptr<internal::DiscardableMemoryEmulated> memory( | |
| 268 new internal::DiscardableMemoryEmulated(size)); | |
| 269 if (!memory->Initialize()) | |
|
Philippe
2013/12/17 14:28:21
I have to say that I look forward to seeing Discar
Philippe
2013/12/17 15:26:22
Just realized that this may sound harsh. Sorry if
reveman
2013/12/18 08:12:38
DiscardableMemoryEmulated is likely not going to b
Philippe
2013/12/18 09:07:47
Those things are personal preferences obviously an
| |
| 270 return scoped_ptr<DiscardableMemory>(); | |
| 271 | |
| 272 return memory.PassAs<DiscardableMemory>(); | |
| 244 } | 273 } |
| 245 } | 274 } |
| 246 return global_context->allocator.Allocate(size); | 275 |
| 276 NOTREACHED(); | |
| 277 return scoped_ptr<DiscardableMemory>(); | |
| 247 } | 278 } |
| 248 | 279 |
| 249 // static | 280 // static |
| 250 bool DiscardableMemory::PurgeForTestingSupported() { | 281 bool DiscardableMemory::PurgeForTestingSupported() { |
| 251 return false; | 282 return false; |
| 252 } | 283 } |
| 253 | 284 |
| 254 // static | 285 // static |
| 255 void DiscardableMemory::PurgeForTesting() { | 286 void DiscardableMemory::PurgeForTesting() { |
| 256 NOTIMPLEMENTED(); | 287 NOTIMPLEMENTED(); |
| 257 } | 288 } |
| 258 | 289 |
| 259 } // namespace base | 290 } // namespace base |
| OLD | NEW |