| OLD | NEW |
| 1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/memory/discardable_memory_allocator_android.h" | 5 #include "base/memory/discardable_memory_allocator_android.h" |
| 6 | 6 |
| 7 #include <sys/mman.h> |
| 8 #include <unistd.h> |
| 9 |
| 7 #include <algorithm> | 10 #include <algorithm> |
| 8 #include <cmath> | 11 #include <cmath> |
| 12 #include <limits> |
| 9 #include <set> | 13 #include <set> |
| 10 #include <utility> | 14 #include <utility> |
| 11 | 15 |
| 12 #include "base/basictypes.h" | 16 #include "base/basictypes.h" |
| 13 #include "base/containers/hash_tables.h" | 17 #include "base/containers/hash_tables.h" |
| 18 #include "base/file_util.h" |
| 14 #include "base/logging.h" | 19 #include "base/logging.h" |
| 15 #include "base/memory/discardable_memory.h" | 20 #include "base/memory/discardable_memory.h" |
| 16 #include "base/memory/discardable_memory_android.h" | |
| 17 #include "base/memory/scoped_vector.h" | 21 #include "base/memory/scoped_vector.h" |
| 18 #include "base/synchronization/lock.h" | 22 #include "base/synchronization/lock.h" |
| 19 #include "base/threading/thread_checker.h" | 23 #include "base/threading/thread_checker.h" |
| 24 #include "third_party/ashmem/ashmem.h" |
| 20 | 25 |
| 21 // The allocator consists of three parts (classes): | 26 // The allocator consists of three parts (classes): |
| 22 // - DiscardableMemoryAllocator: entry point of all allocations (through its | 27 // - DiscardableMemoryAllocator: entry point of all allocations (through its |
| 23 // Allocate() method) that are dispatched to the AshmemRegion instances (which | 28 // Allocate() method) that are dispatched to the AshmemRegion instances (which |
| 24 // it owns). | 29 // it owns). |
| 25 // - AshmemRegion: manages allocations and destructions inside a single large | 30 // - AshmemRegion: manages allocations and destructions inside a single large |
| 26 // (e.g. 32 MBytes) ashmem region. | 31 // (e.g. 32 MBytes) ashmem region. |
| 27 // - DiscardableAshmemChunk: class implementing the DiscardableMemory interface | 32 // - DiscardableAshmemChunk: class implementing the DiscardableMemory interface |
| 28 // whose instances are returned to the client. DiscardableAshmemChunk lets the | 33 // whose instances are returned to the client. DiscardableAshmemChunk lets the |
| 29 // client seamlessly operate on a subrange of the ashmem region managed by | 34 // client seamlessly operate on a subrange of the ashmem region managed by |
| (...skipping 19 matching lines...) Expand all Loading... |
| 49 // Returns 0 if the provided size is too high to be aligned. | 54 // Returns 0 if the provided size is too high to be aligned. |
| 50 size_t AlignToNextPage(size_t size) { | 55 size_t AlignToNextPage(size_t size) { |
| 51 const size_t kPageSize = 4096; | 56 const size_t kPageSize = 4096; |
| 52 DCHECK_EQ(static_cast<int>(kPageSize), getpagesize()); | 57 DCHECK_EQ(static_cast<int>(kPageSize), getpagesize()); |
| 53 if (size > std::numeric_limits<size_t>::max() - kPageSize + 1) | 58 if (size > std::numeric_limits<size_t>::max() - kPageSize + 1) |
| 54 return 0; | 59 return 0; |
| 55 const size_t mask = ~(kPageSize - 1); | 60 const size_t mask = ~(kPageSize - 1); |
| 56 return (size + kPageSize - 1) & mask; | 61 return (size + kPageSize - 1) & mask; |
| 57 } | 62 } |
| 58 | 63 |
| 64 bool CreateAshmemRegion(const char* name, |
| 65 size_t size, |
| 66 int* out_fd, |
| 67 void** out_address) { |
| 68 int fd = ashmem_create_region(name, size); |
| 69 if (fd < 0) { |
| 70 DLOG(ERROR) << "ashmem_create_region() failed"; |
| 71 return false; |
| 72 } |
| 73 file_util::ScopedFD fd_closer(&fd); |
| 74 |
| 75 const int err = ashmem_set_prot_region(fd, PROT_READ | PROT_WRITE); |
| 76 if (err < 0) { |
| 77 DLOG(ERROR) << "Error " << err << " when setting protection of ashmem"; |
| 78 return false; |
| 79 } |
| 80 |
| 81 // There is a problem using MAP_PRIVATE here. As we are constantly calling |
| 82 // Lock() and Unlock(), data could get lost if they are not written to the |
| 83 // underlying file when Unlock() gets called. |
| 84 void* const address = mmap( |
| 85 NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); |
| 86 if (address == MAP_FAILED) { |
| 87 DPLOG(ERROR) << "Failed to map memory."; |
| 88 return false; |
| 89 } |
| 90 |
| 91 ignore_result(fd_closer.release()); |
| 92 *out_fd = fd; |
| 93 *out_address = address; |
| 94 return true; |
| 95 } |
| 96 |
| 97 bool CloseAshmemRegion(int fd, size_t size, void* address) { |
| 98 if (munmap(address, size) == -1) { |
| 99 DPLOG(ERROR) << "Failed to unmap memory."; |
| 100 close(fd); |
| 101 return false; |
| 102 } |
| 103 return close(fd) == 0; |
| 104 } |
| 105 |
| 106 DiscardableMemoryLockStatus LockAshmemRegion(int fd, |
| 107 size_t off, |
| 108 size_t size, |
| 109 const void* address) { |
| 110 const int result = ashmem_pin_region(fd, off, size); |
| 111 DCHECK_EQ(0, mprotect(address, size, PROT_READ | PROT_WRITE)); |
| 112 return result == ASHMEM_WAS_PURGED ? DISCARDABLE_MEMORY_LOCK_STATUS_PURGED |
| 113 : DISCARDABLE_MEMORY_LOCK_STATUS_SUCCESS; |
| 114 } |
| 115 |
| 116 bool UnlockAshmemRegion(int fd, size_t off, size_t size, const void* address) { |
| 117 const int failed = ashmem_unpin_region(fd, off, size); |
| 118 if (failed) |
| 119 DLOG(ERROR) << "Failed to unpin memory."; |
| 120 // This allows us to catch accesses to unlocked memory. |
| 121 DCHECK_EQ(0, mprotect(address, size, PROT_NONE)); |
| 122 return !failed; |
| 123 } |
| 124 |
| 59 } // namespace | 125 } // namespace |
| 60 | 126 |
| 61 namespace internal { | 127 namespace internal { |
| 62 | 128 |
| 63 class DiscardableMemoryAllocator::DiscardableAshmemChunk | 129 class DiscardableMemoryAllocator::DiscardableAshmemChunk |
| 64 : public DiscardableMemory { | 130 : public DiscardableMemory { |
| 65 public: | 131 public: |
| 66 // Note that |ashmem_region| must outlive |this|. | 132 // Note that |ashmem_region| must outlive |this|. |
| 67 DiscardableAshmemChunk(AshmemRegion* ashmem_region, | 133 DiscardableAshmemChunk(AshmemRegion* ashmem_region, |
| 68 int fd, | 134 int fd, |
| 69 void* address, | 135 void* address, |
| 70 size_t offset, | 136 size_t offset, |
| 71 size_t size) | 137 size_t size) |
| 72 : ashmem_region_(ashmem_region), | 138 : ashmem_region_(ashmem_region), |
| 73 fd_(fd), | 139 fd_(fd), |
| 74 address_(address), | 140 address_(address), |
| 75 offset_(offset), | 141 offset_(offset), |
| 76 size_(size), | 142 size_(size), |
| 77 locked_(true) { | 143 locked_(true) { |
| 78 } | 144 } |
| 79 | 145 |
| 80 // Implemented below AshmemRegion since this requires the full definition of | 146 // Implemented below AshmemRegion since this requires the full definition of |
| 81 // AshmemRegion. | 147 // AshmemRegion. |
| 82 virtual ~DiscardableAshmemChunk(); | 148 virtual ~DiscardableAshmemChunk(); |
| 83 | 149 |
| 84 // DiscardableMemory: | 150 // DiscardableMemory: |
| 85 virtual DiscardableMemoryLockStatus Lock() OVERRIDE { | 151 virtual DiscardableMemoryLockStatus Lock() OVERRIDE { |
| 86 DCHECK(!locked_); | 152 DCHECK(!locked_); |
| 87 locked_ = true; | 153 locked_ = true; |
| 88 return internal::LockAshmemRegion(fd_, offset_, size_, address_); | 154 return LockAshmemRegion(fd_, offset_, size_, address_); |
| 89 } | 155 } |
| 90 | 156 |
| 91 virtual void Unlock() OVERRIDE { | 157 virtual void Unlock() OVERRIDE { |
| 92 DCHECK(locked_); | 158 DCHECK(locked_); |
| 93 locked_ = false; | 159 locked_ = false; |
| 94 internal::UnlockAshmemRegion(fd_, offset_, size_, address_); | 160 UnlockAshmemRegion(fd_, offset_, size_, address_); |
| 95 } | 161 } |
| 96 | 162 |
| 97 virtual void* Memory() const OVERRIDE { | 163 virtual void* Memory() const OVERRIDE { |
| 98 return address_; | 164 return address_; |
| 99 } | 165 } |
| 100 | 166 |
| 101 private: | 167 private: |
| 102 AshmemRegion* const ashmem_region_; | 168 AshmemRegion* const ashmem_region_; |
| 103 const int fd_; | 169 const int fd_; |
| 104 void* const address_; | 170 void* const address_; |
| 105 const size_t offset_; | 171 const size_t offset_; |
| 106 const size_t size_; | 172 const size_t size_; |
| 107 bool locked_; | 173 bool locked_; |
| 108 | 174 |
| 109 DISALLOW_COPY_AND_ASSIGN(DiscardableAshmemChunk); | 175 DISALLOW_COPY_AND_ASSIGN(DiscardableAshmemChunk); |
| 110 }; | 176 }; |
| 111 | 177 |
| 112 class DiscardableMemoryAllocator::AshmemRegion { | 178 class DiscardableMemoryAllocator::AshmemRegion { |
| 113 public: | 179 public: |
| 114 // Note that |allocator| must outlive |this|. | 180 // Note that |allocator| must outlive |this|. |
| 115 static scoped_ptr<AshmemRegion> Create( | 181 static scoped_ptr<AshmemRegion> Create( |
| 116 size_t size, | 182 size_t size, |
| 117 const std::string& name, | 183 const std::string& name, |
| 118 DiscardableMemoryAllocator* allocator) { | 184 DiscardableMemoryAllocator* allocator) { |
| 119 DCHECK_EQ(size, AlignToNextPage(size)); | 185 DCHECK_EQ(size, AlignToNextPage(size)); |
| 120 int fd; | 186 int fd; |
| 121 void* base; | 187 void* base; |
| 122 if (!internal::CreateAshmemRegion(name.c_str(), size, &fd, &base)) | 188 if (!CreateAshmemRegion(name.c_str(), size, &fd, &base)) |
| 123 return scoped_ptr<AshmemRegion>(); | 189 return scoped_ptr<AshmemRegion>(); |
| 124 return make_scoped_ptr(new AshmemRegion(fd, size, base, allocator)); | 190 return make_scoped_ptr(new AshmemRegion(fd, size, base, allocator)); |
| 125 } | 191 } |
| 126 | 192 |
| 127 ~AshmemRegion() { | 193 ~AshmemRegion() { |
| 128 const bool result = internal::CloseAshmemRegion(fd_, size_, base_); | 194 const bool result = CloseAshmemRegion(fd_, size_, base_); |
| 129 DCHECK(result); | 195 DCHECK(result); |
| 130 } | 196 } |
| 131 | 197 |
| 132 // Returns a new instance of DiscardableMemory whose size is greater or equal | 198 // Returns a new instance of DiscardableMemory whose size is greater or equal |
| 133 // than |actual_size| (which is expected to be greater or equal than | 199 // than |actual_size| (which is expected to be greater or equal than |
| 134 // |client_requested_size|). | 200 // |client_requested_size|). |
| 135 // Allocation works as follows: | 201 // Allocation works as follows: |
| 136 // 1) Reuse a previously freed chunk and return it if it succeeded. See | 202 // 1) Reuse a previously freed chunk and return it if it succeeded. See |
| 137 // ReuseFreeChunk_Locked() below for more information. | 203 // ReuseFreeChunk_Locked() below for more information. |
| 138 // 2) If no free chunk could be reused and the region is not big enough for | 204 // 2) If no free chunk could be reused and the region is not big enough for |
| (...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 250 DCHECK_GT(reused_chunk.size, actual_size); | 316 DCHECK_GT(reused_chunk.size, actual_size); |
| 251 const size_t new_chunk_size = reused_chunk.size - actual_size; | 317 const size_t new_chunk_size = reused_chunk.size - actual_size; |
| 252 // Note that merging is not needed here since there can't be contiguous | 318 // Note that merging is not needed here since there can't be contiguous |
| 253 // free chunks at this point. | 319 // free chunks at this point. |
| 254 AddFreeChunk_Locked( | 320 AddFreeChunk_Locked( |
| 255 FreeChunk(reused_chunk.start, new_chunk_start, new_chunk_size)); | 321 FreeChunk(reused_chunk.start, new_chunk_start, new_chunk_size)); |
| 256 } | 322 } |
| 257 | 323 |
| 258 const size_t offset = | 324 const size_t offset = |
| 259 static_cast<char*>(reused_chunk.start) - static_cast<char*>(base_); | 325 static_cast<char*>(reused_chunk.start) - static_cast<char*>(base_); |
| 260 internal::LockAshmemRegion( | 326 LockAshmemRegion(fd_, offset, reused_chunk_size, reused_chunk.start); |
| 261 fd_, offset, reused_chunk_size, reused_chunk.start); | |
| 262 scoped_ptr<DiscardableMemory> memory( | 327 scoped_ptr<DiscardableMemory> memory( |
| 263 new DiscardableAshmemChunk(this, fd_, reused_chunk.start, offset, | 328 new DiscardableAshmemChunk(this, fd_, reused_chunk.start, offset, |
| 264 reused_chunk_size)); | 329 reused_chunk_size)); |
| 265 return memory.Pass(); | 330 return memory.Pass(); |
| 266 } | 331 } |
| 267 | 332 |
| 268 // Makes the chunk identified with the provided arguments free and possibly | 333 // Makes the chunk identified with the provided arguments free and possibly |
| 269 // merges this chunk with the previous and next contiguous ones. | 334 // merges this chunk with the previous and next contiguous ones. |
| 270 // If the provided chunk is the only one used (and going to be freed) in the | 335 // If the provided chunk is the only one used (and going to be freed) in the |
| 271 // region then the internal ashmem region is closed so that the underlying | 336 // region then the internal ashmem region is closed so that the underlying |
| (...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 383 void*, std::multiset<FreeChunk>::iterator> address_to_free_chunk_map_; | 448 void*, std::multiset<FreeChunk>::iterator> address_to_free_chunk_map_; |
| 384 // Maps the address of *used* chunks to the address of their previous | 449 // Maps the address of *used* chunks to the address of their previous |
| 385 // contiguous chunk. | 450 // contiguous chunk. |
| 386 hash_map<void*, void*> used_to_previous_chunk_map_; | 451 hash_map<void*, void*> used_to_previous_chunk_map_; |
| 387 | 452 |
| 388 DISALLOW_COPY_AND_ASSIGN(AshmemRegion); | 453 DISALLOW_COPY_AND_ASSIGN(AshmemRegion); |
| 389 }; | 454 }; |
| 390 | 455 |
| 391 DiscardableMemoryAllocator::DiscardableAshmemChunk::~DiscardableAshmemChunk() { | 456 DiscardableMemoryAllocator::DiscardableAshmemChunk::~DiscardableAshmemChunk() { |
| 392 if (locked_) | 457 if (locked_) |
| 393 internal::UnlockAshmemRegion(fd_, offset_, size_, address_); | 458 UnlockAshmemRegion(fd_, offset_, size_, address_); |
| 394 ashmem_region_->OnChunkDeletion(address_, size_); | 459 ashmem_region_->OnChunkDeletion(address_, size_); |
| 395 } | 460 } |
| 396 | 461 |
| 397 DiscardableMemoryAllocator::DiscardableMemoryAllocator( | 462 DiscardableMemoryAllocator::DiscardableMemoryAllocator( |
| 398 const std::string& name, | 463 const std::string& name, |
| 399 size_t ashmem_region_size) | 464 size_t ashmem_region_size) |
| 400 : name_(name), | 465 : name_(name), |
| 401 ashmem_region_size_( | 466 ashmem_region_size_( |
| 402 std::max(kMinAshmemRegionSize, AlignToNextPage(ashmem_region_size))), | 467 std::max(kMinAshmemRegionSize, AlignToNextPage(ashmem_region_size))), |
| 403 last_ashmem_region_size_(0) { | 468 last_ashmem_region_size_(0) { |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 459 DCHECK_LE(ashmem_regions_.size(), 5U); | 524 DCHECK_LE(ashmem_regions_.size(), 5U); |
| 460 const ScopedVector<AshmemRegion>::iterator it = std::find( | 525 const ScopedVector<AshmemRegion>::iterator it = std::find( |
| 461 ashmem_regions_.begin(), ashmem_regions_.end(), region); | 526 ashmem_regions_.begin(), ashmem_regions_.end(), region); |
| 462 DCHECK_NE(ashmem_regions_.end(), it); | 527 DCHECK_NE(ashmem_regions_.end(), it); |
| 463 std::swap(*it, ashmem_regions_.back()); | 528 std::swap(*it, ashmem_regions_.back()); |
| 464 ashmem_regions_.pop_back(); | 529 ashmem_regions_.pop_back(); |
| 465 } | 530 } |
| 466 | 531 |
| 467 } // namespace internal | 532 } // namespace internal |
| 468 } // namespace base | 533 } // namespace base |
| OLD | NEW |