OLD | NEW |
(Empty) | |
| 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "base/memory/discardable_memory_allocator_android.h" |
| 6 |
| 7 #include <cmath> |
| 8 #include <set> |
| 9 #include <utility> |
| 10 |
| 11 #include "base/basictypes.h" |
| 12 #include "base/compiler_specific.h" |
| 13 #include "base/containers/hash_tables.h" |
| 14 #include "base/logging.h" |
| 15 #include "base/memory/discardable_memory.h" |
| 16 #include "base/memory/discardable_memory_android.h" |
| 17 #include "base/memory/scoped_vector.h" |
| 18 #include "base/strings/stringprintf.h" |
| 19 #include "base/synchronization/lock.h" |
| 20 #include "base/threading/thread_checker.h" |
| 21 |
| 22 // The allocator consists of three parts (classes): |
| 23 // - DiscardableMemoryAllocator: entry point of all allocations (through its |
| 24 // Allocate() method) that are dispatched to the AshmemRegion instances (which |
| 25 // it owns). |
| 26 // - AshmemRegion: manages allocations and destructions inside a single large |
| 27 // (e.g. 32 MBytes) ashmem region. |
| 28 // - DiscardableAshmemChunk: class implementing the DiscardableMemory interface |
| 29 // whose instances are returned to the client. DiscardableAshmemChunk lets the |
| 30 // client seamlessly operate on a subrange of the ashmem region managed by |
| 31 // AshmemRegion. |
| 32 |
| 33 namespace base { |
| 34 namespace { |
| 35 |
| 36 const int kInvalidFD = -1; |
| 37 |
| 38 // Allow 8 KBytes of fragmentation inside used chunks. |
| 39 const size_t kMaxChunkFragmentationBytes = 8192; |
| 40 |
| 41 class DiscardableAshmemChunk : public DiscardableMemory { |
| 42 public: |
| 43 struct DeletionObserver { |
| 44 virtual void OnChunkDeletion(int fd, |
| 45 void* previous_chunk, |
| 46 void* chunk, |
| 47 size_t size) = 0; |
| 48 |
| 49 protected: |
| 50 virtual ~DeletionObserver() {} |
| 51 }; |
| 52 |
| 53 DiscardableAshmemChunk(DeletionObserver* deletion_observer, |
| 54 int fd, |
| 55 void* previous_chunk, |
| 56 void* address, |
| 57 size_t offset, |
| 58 size_t size) |
| 59 : deletion_observer_(deletion_observer), |
| 60 fd_(fd), |
| 61 previous_chunk_(previous_chunk), |
| 62 address_(address), |
| 63 offset_(offset), |
| 64 size_(size), |
| 65 locked_(true) { |
| 66 } |
| 67 |
| 68 virtual ~DiscardableAshmemChunk() { |
| 69 if (locked_) |
| 70 internal::UnlockAshmemRegion(fd_, offset_, size_, address_); |
| 71 deletion_observer_->OnChunkDeletion(fd_, previous_chunk_, address_, size_); |
| 72 } |
| 73 |
| 74 // DiscardableMemory: |
| 75 virtual size_t Size() const OVERRIDE { |
| 76 return size_; |
| 77 } |
| 78 |
| 79 virtual LockDiscardableMemoryStatus Lock() OVERRIDE { |
| 80 DCHECK(!locked_); |
| 81 locked_ = true; |
| 82 return internal::LockAshmemRegion(fd_, offset_, size_, address_); |
| 83 } |
| 84 |
| 85 virtual void Unlock() OVERRIDE { |
| 86 DCHECK(locked_); |
| 87 locked_ = false; |
| 88 internal::UnlockAshmemRegion(fd_, offset_, size_, address_); |
| 89 } |
| 90 |
| 91 virtual void* Memory() const OVERRIDE { |
| 92 return address_; |
| 93 } |
| 94 |
| 95 private: |
| 96 DeletionObserver* const deletion_observer_; |
| 97 const int fd_; |
| 98 void* const previous_chunk_; |
| 99 void* const address_; |
| 100 const size_t offset_; |
| 101 const size_t size_; |
| 102 bool locked_; |
| 103 |
| 104 DISALLOW_COPY_AND_ASSIGN(DiscardableAshmemChunk); |
| 105 }; |
| 106 |
| 107 } // namespace |
| 108 |
| 109 namespace internal { |
| 110 |
| 111 class DiscardableMemoryAllocator::AshmemRegion |
| 112 : public DiscardableAshmemChunk::DeletionObserver { |
| 113 public: |
| 114 AshmemRegion(size_t size, const std::string& name) |
| 115 : fd_(kInvalidFD), |
| 116 base_(NULL), |
| 117 size_(size), |
| 118 offset_(0), |
| 119 name_(name), |
| 120 last_allocated_chunk_(NULL) { |
| 121 } |
| 122 |
| 123 virtual ~AshmemRegion() { |
| 124 DCHECK(thread_checker_.CalledOnValidThread()); |
| 125 base::AutoLock auto_lock(lock_); |
| 126 if (!AshmemRegionClosed()) |
| 127 CloseAshmemRegion(); |
| 128 } |
| 129 |
| 130 scoped_ptr<DiscardableMemory> Allocate(size_t client_requested_size, |
| 131 size_t actual_size) { |
| 132 base::AutoLock auto_lock(lock_); |
| 133 if (AshmemRegionClosed() && !OpenAshmemRegion()) |
| 134 return scoped_ptr<DiscardableMemory>(); |
| 135 scoped_ptr<DiscardableMemory> memory = RecycleFreeChunk( |
| 136 client_requested_size, actual_size); |
| 137 if (memory) |
| 138 return memory.Pass(); |
| 139 if (size_ - offset_ < actual_size) { |
| 140 // This region does not have enough space left to hold the requested size. |
| 141 return scoped_ptr<DiscardableMemory>(); |
| 142 } |
| 143 void* const address = static_cast<char*>(base_) + offset_; |
| 144 memory.reset( |
| 145 new DiscardableAshmemChunk(this, fd_, last_allocated_chunk_, address, |
| 146 offset_, actual_size)); |
| 147 last_allocated_chunk_ = address; |
| 148 offset_ += actual_size; |
| 149 return memory.Pass(); |
| 150 } |
| 151 |
| 152 private: |
| 153 struct FreeChunk { |
| 154 FreeChunk(void* previous_chunk, void* start, size_t size) |
| 155 : previous_chunk(previous_chunk), |
| 156 start(start), |
| 157 size(size) { |
| 158 } |
| 159 |
| 160 void* const previous_chunk; |
| 161 void* const start; |
| 162 const size_t size; |
| 163 |
| 164 bool is_null() const { return !start; } |
| 165 |
| 166 bool operator<(const FreeChunk& other) const { |
| 167 return size < other.size; |
| 168 } |
| 169 }; |
| 170 |
| 171 // DiscardableAshmemChunk::DeletionObserver: |
| 172 virtual void OnChunkDeletion(int fd, |
| 173 void* previous_chunk, |
| 174 void* chunk, |
| 175 size_t size) OVERRIDE { |
| 176 base::AutoLock auto_lock(lock_); |
| 177 AddFreeChunk(fd, previous_chunk, chunk, size, |
| 178 MERGE_PREVIOUS_CHUNKS | MERGE_NEXT_CHUNKS); |
| 179 } |
| 180 |
| 181 // Tries to reuse a previously freed chunk by doing a closest size match. |
| 182 scoped_ptr<DiscardableMemory> RecycleFreeChunk(size_t client_requested_size, |
| 183 size_t actual_size) { |
| 184 lock_.AssertAcquired(); |
| 185 const std::multiset<FreeChunk>::iterator chunk_it = |
| 186 free_chunks_.lower_bound(FreeChunk(NULL, NULL, actual_size)); |
| 187 if (chunk_it == free_chunks_.end()) |
| 188 return scoped_ptr<DiscardableMemory>(); |
| 189 size_t recycled_chunk_size = chunk_it->size; |
| 190 const size_t fragmentation_bytes = chunk_it->size - client_requested_size; |
| 191 if (fragmentation_bytes >= kMaxChunkFragmentationBytes) { |
| 192 // Split the free chunk being recycled if it's too large so that its |
| 193 // unused tail doesn't get recycled (i.e. locked) which would prevent it |
| 194 // from being evicted under memory pressure. |
| 195 const int fd = -1; |
| 196 void* const previous_chunk = chunk_it->start; |
| 197 AddFreeChunk( |
| 198 fd, previous_chunk, static_cast<char*>(chunk_it->start) + actual_size, |
| 199 chunk_it->size - actual_size, MERGE_NEXT_CHUNKS); |
| 200 recycled_chunk_size = actual_size; |
| 201 } |
| 202 const size_t offset = |
| 203 static_cast<char*>(chunk_it->start) - static_cast<char*>(base_); |
| 204 internal::LockAshmemRegion( |
| 205 fd_, offset, recycled_chunk_size, chunk_it->start); |
| 206 scoped_ptr<DiscardableMemory> memory( |
| 207 new DiscardableAshmemChunk( |
| 208 this, fd_, chunk_it->previous_chunk, chunk_it->start, offset, |
| 209 recycled_chunk_size)); |
| 210 free_chunk_for_address_.erase(reinterpret_cast<uintptr_t>(chunk_it->start)); |
| 211 free_chunks_.erase(chunk_it); |
| 212 return memory.Pass(); |
| 213 } |
| 214 |
| 215 enum ContiguousChunksMergingFlags { |
| 216 MERGE_PREVIOUS_CHUNKS = 1, |
| 217 MERGE_NEXT_CHUNKS = 2, |
| 218 }; |
| 219 |
| 220 // Makes the chunk identified with the provided arguments free and possibly |
| 221 // merges this chunk with the previous and next contiguous ones according to |
| 222 // the value of |chunk_merging_flags|. |
| 223 // If the provided chunk is the only one used (and going to be freed) in the |
| 224 // region then the internal ashmem region is closed so that the underlying |
| 225 // physical pages are immediately released. |
| 226 // Note that free chunks are unlocked therefore they can be reclaimed by the |
| 227 // kernel if needed (under memory pressure) but they are not immediately |
| 228 // released unfortunately since madvise(MADV_REMOVE) and |
| 229 // fallocate(FALLOC_FL_PUNCH_HOLE) don't seem to work on ashmem. This might |
| 230 // change in versions of kernel >=3.5 though. The fact that free chunks are |
| 231 // not immediately released is the reason why we are trying to minimize |
| 232 // fragmentation. |
| 233 void AddFreeChunk(int fd, |
| 234 void* previous_chunk, |
| 235 void* chunk, |
| 236 size_t size, |
| 237 int chunk_merging_flags) { |
| 238 lock_.AssertAcquired(); |
| 239 size_t new_free_chunk_size = size; |
| 240 void* first_free_chunk = chunk; |
| 241 if (chunk_merging_flags & MERGE_PREVIOUS_CHUNKS) { |
| 242 while (previous_chunk) { |
| 243 const FreeChunk free_chunk = UnlinkFreeChunk(previous_chunk); |
| 244 if (free_chunk.is_null()) |
| 245 break; |
| 246 new_free_chunk_size += free_chunk.size; |
| 247 first_free_chunk = previous_chunk; |
| 248 previous_chunk = free_chunk.previous_chunk; |
| 249 } |
| 250 } |
| 251 const void* next_chunk = static_cast<const char*>(chunk) + size; |
| 252 if (chunk_merging_flags & MERGE_NEXT_CHUNKS) { |
| 253 while (true) { |
| 254 const FreeChunk free_chunk = UnlinkFreeChunk(next_chunk); |
| 255 if (free_chunk.is_null()) |
| 256 break; |
| 257 new_free_chunk_size += free_chunk.size; |
| 258 next_chunk = static_cast<const char*>(next_chunk) + free_chunk.size; |
| 259 } |
| 260 } |
| 261 const bool whole_ashmem_region_is_free = new_free_chunk_size == size_; |
| 262 if (!whole_ashmem_region_is_free) { |
| 263 const std::multiset<FreeChunk>::iterator it = free_chunks_.insert( |
| 264 FreeChunk(previous_chunk, first_free_chunk, new_free_chunk_size)); |
| 265 free_chunk_for_address_.insert( |
| 266 std::make_pair(reinterpret_cast<uintptr_t>(first_free_chunk), it)); |
| 267 return; |
| 268 } |
| 269 // The whole ashmem region is free thus it can be closed. Note that deleting |
| 270 // the instance and notifying the allocator would be cleaner (it would allow |
| 271 // |fd_| and |base_| to be immutable in particular) but this would imply |
| 272 // some non-trivial threading interactions since this method can be called |
| 273 // on any thread and the allocator has its own lock. |
| 274 DCHECK_EQ(size_, new_free_chunk_size); |
| 275 DCHECK(free_chunks_.empty() && free_chunk_for_address_.empty()); |
| 276 DCHECK(!AshmemRegionClosed()); |
| 277 CloseAshmemRegion(); |
| 278 } |
| 279 |
| 280 // Finds and unlinks the free chunk, if any, whose start address is |
| 281 // |chunk_start|. Returns a copy of the unlinked free chunk or a free chunk |
| 282 // whose content is null if it was not found. |
| 283 FreeChunk UnlinkFreeChunk(const void* chunk_start) { |
| 284 lock_.AssertAcquired(); |
| 285 const base::hash_map< |
| 286 uintptr_t, std::multiset<FreeChunk>::iterator>::iterator it = |
| 287 free_chunk_for_address_.find( |
| 288 reinterpret_cast<uintptr_t>(chunk_start)); |
| 289 if (it == free_chunk_for_address_.end()) |
| 290 return FreeChunk(NULL, NULL, 0U); |
| 291 const std::multiset<FreeChunk>::iterator free_chunk_it = it->second; |
| 292 const FreeChunk free_chunk(*free_chunk_it); |
| 293 DCHECK_EQ(chunk_start, free_chunk.start); |
| 294 free_chunk_for_address_.erase(it); |
| 295 free_chunks_.erase(free_chunk_it); |
| 296 return free_chunk; |
| 297 } |
| 298 |
| 299 bool AshmemRegionClosed() const { |
| 300 lock_.AssertAcquired(); |
| 301 DCHECK((fd_ == kInvalidFD && !base_) || (fd_ != kInvalidFD && base_)); |
| 302 return fd_ == kInvalidFD; |
| 303 } |
| 304 |
| 305 void CloseAshmemRegion() { |
| 306 lock_.AssertAcquired(); |
| 307 DCHECK(fd_ != kInvalidFD && base_); |
| 308 const bool result = internal::DeleteAshmemRegion(fd_, size_, base_); |
| 309 DCHECK(result); |
| 310 fd_ = kInvalidFD; |
| 311 base_ = NULL; |
| 312 offset_ = 0U; |
| 313 last_allocated_chunk_ = NULL; |
| 314 } |
| 315 |
| 316 bool OpenAshmemRegion() { |
| 317 lock_.AssertAcquired(); |
| 318 DCHECK(fd_ == kInvalidFD && !base_ && !last_allocated_chunk_); |
| 319 int fd; |
| 320 void* address; |
| 321 if (!internal::CreateAshmemRegion(name_.c_str(), size_, &fd, &address)) |
| 322 return false; |
| 323 fd_ = fd; |
| 324 base_ = address; |
| 325 return true; |
| 326 } |
| 327 |
| 328 base::ThreadChecker thread_checker_; |
| 329 base::Lock lock_; // Protects the state below. |
| 330 int fd_; |
| 331 void* base_; |
| 332 const size_t size_; |
| 333 size_t offset_; |
| 334 const std::string name_; |
| 335 void* last_allocated_chunk_; |
| 336 std::multiset<FreeChunk> free_chunks_; |
| 337 base::hash_map< |
| 338 uintptr_t, std::multiset<FreeChunk>::iterator> free_chunk_for_address_; |
| 339 |
| 340 DISALLOW_COPY_AND_ASSIGN(AshmemRegion); |
| 341 }; |
| 342 |
| 343 DiscardableMemoryAllocator::DiscardableMemoryAllocator(const std::string& name) |
| 344 : name_(name) { |
| 345 } |
| 346 |
| 347 DiscardableMemoryAllocator::~DiscardableMemoryAllocator() { |
| 348 DCHECK(thread_checker_.CalledOnValidThread()); |
| 349 } |
| 350 |
| 351 scoped_ptr<DiscardableMemory> DiscardableMemoryAllocator::Allocate( |
| 352 size_t size) { |
| 353 const size_t aligned_size = internal::AlignToNextPage(size); |
| 354 base::AutoLock auto_lock(lock_); |
| 355 for (ScopedVector<AshmemRegion>::iterator it = ashmem_regions_.begin(); |
| 356 it != ashmem_regions_.end(); ++it) { |
| 357 scoped_ptr<DiscardableMemory> memory((*it)->Allocate(size, aligned_size)); |
| 358 if (memory) |
| 359 return memory.Pass(); |
| 360 } |
| 361 ashmem_regions_.push_back( |
| 362 new AshmemRegion( |
| 363 std::max(static_cast<size_t>(kMinAshmemRegionSize), aligned_size), |
| 364 name_.c_str())); |
| 365 return ashmem_regions_.back()->Allocate(size, aligned_size); |
| 366 } |
| 367 |
| 368 } // namespace internal |
| 369 } // namespace base |
OLD | NEW |