Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 // Copyright 2013 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "base/memory/discardable_memory_allocator_android.h" | |
| 6 | |
| 7 #include <algorithm> | |
| 8 #include <cmath> | |
| 9 #include <set> | |
| 10 #include <utility> | |
| 11 | |
| 12 #include "base/basictypes.h" | |
| 13 #include "base/containers/hash_tables.h" | |
| 14 #include "base/logging.h" | |
| 15 #include "base/memory/discardable_memory.h" | |
| 16 #include "base/memory/discardable_memory_android.h" | |
| 17 #include "base/memory/scoped_vector.h" | |
| 18 #include "base/strings/stringprintf.h" | |
| 19 #include "base/synchronization/lock.h" | |
| 20 #include "base/threading/thread_checker.h" | |
| 21 | |
| 22 // The allocator consists of three parts (classes): | |
| 23 // - DiscardableMemoryAllocator: entry point of all allocations (through its | |
| 24 // Allocate() method) that are dispatched to the AshmemRegion instances (which | |
| 25 // it owns). | |
| 26 // - AshmemRegion: manages allocations and destructions inside a single large | |
| 27 // (e.g. 32 MBytes) ashmem region. | |
| 28 // - DiscardableAshmemChunk: class implementing the DiscardableMemory interface | |
| 29 // whose instances are returned to the client. DiscardableAshmemChunk lets the | |
| 30 // client seamlessly operate on a subrange of the ashmem region managed by | |
| 31 // AshmemRegion. | |
| 32 | |
| 33 namespace base { | |
| 34 namespace { | |
| 35 | |
| 36 // Only tolerate fragmentation in used chunks *caused by the client* (as opposed | |
| 37 // to the allocator when a free chunk is reused). The client can cause such | |
| 38 // fragmentation by e.g. requesting 4097 bytes. This size would be rounded up to | |
| 39 // 8192 by the allocator which would cause 4095 bytes of fragmentation (which is | |
| 40 // currently the maximum allowed). If the client requests 4096 bytes and a free | |
| 41 // chunk of 8192 bytes is available then the free chunk gets splitted into two | |
| 42 // pieces to minimize fragmentation (since 8192 - 4096 = 4096 which is greater | |
| 43 // than 4095). | |
| 44 // TODO(pliard): tune this if splitting chunks too often leads to performance | |
| 45 // issues. | |
| 46 const size_t kMaxChunkFragmentationBytes = 4096 - 1; | |
| 47 | |
| 48 } // namespace | |
| 49 | |
| 50 namespace internal { | |
| 51 | |
| 52 class DiscardableMemoryAllocator::DiscardableAshmemChunk | |
| 53 : public DiscardableMemory { | |
| 54 public: | |
| 55 // Note that |ashmem_region| must outlive |this|. | |
| 56 DiscardableAshmemChunk(AshmemRegion* ashmem_region, | |
| 57 int fd, | |
| 58 void* address, | |
| 59 size_t offset, | |
| 60 size_t size) | |
| 61 : ashmem_region_(ashmem_region), | |
| 62 fd_(fd), | |
| 63 address_(address), | |
| 64 offset_(offset), | |
| 65 size_(size), | |
| 66 locked_(true) { | |
| 67 } | |
| 68 | |
| 69 // Implemented below AshmemRegion since this requires the full definition of | |
| 70 // AshmemRegion. | |
| 71 virtual ~DiscardableAshmemChunk(); | |
| 72 | |
| 73 // DiscardableMemory: | |
| 74 virtual LockDiscardableMemoryStatus Lock() OVERRIDE { | |
| 75 DCHECK(!locked_); | |
| 76 locked_ = true; | |
| 77 return internal::LockAshmemRegion(fd_, offset_, size_, address_); | |
| 78 } | |
| 79 | |
| 80 virtual void Unlock() OVERRIDE { | |
| 81 DCHECK(locked_); | |
| 82 locked_ = false; | |
| 83 internal::UnlockAshmemRegion(fd_, offset_, size_, address_); | |
| 84 } | |
| 85 | |
| 86 virtual void* Memory() const OVERRIDE { | |
| 87 return address_; | |
| 88 } | |
| 89 | |
| 90 private: | |
| 91 AshmemRegion* const ashmem_region_; | |
| 92 const int fd_; | |
| 93 void* const address_; | |
| 94 const size_t offset_; | |
| 95 const size_t size_; | |
| 96 bool locked_; | |
| 97 | |
| 98 DISALLOW_COPY_AND_ASSIGN(DiscardableAshmemChunk); | |
| 99 }; | |
| 100 | |
| 101 class DiscardableMemoryAllocator::AshmemRegion { | |
| 102 public: | |
| 103 // Note that |allocator| must outlive |this|. | |
| 104 static scoped_ptr<AshmemRegion> Create( | |
| 105 size_t size, | |
| 106 const std::string& name, | |
| 107 DiscardableMemoryAllocator* allocator) { | |
| 108 int fd; | |
| 109 void* base; | |
| 110 if (!internal::CreateAshmemRegion(name.c_str(), size, &fd, &base)) | |
| 111 return scoped_ptr<AshmemRegion>(); | |
| 112 return make_scoped_ptr(new AshmemRegion(fd, size, base, allocator)); | |
| 113 } | |
| 114 | |
| 115 virtual ~AshmemRegion() { | |
| 116 const bool result = internal::CloseAshmemRegion(fd_, size_, base_); | |
| 117 DCHECK(result); | |
| 118 } | |
| 119 | |
| 120 // Returns a new instance of DiscardableMemory whose size is greater or equal | |
| 121 // than |actual_size| (which is expected to be greater or equal than | |
| 122 // |client_requested_size|). | |
| 123 // Allocation works as follows: | |
| 124 // 1) Reuse a previously freed chunk and return it if it succeeded. See | |
| 125 // ReuseFreeChunk_Locked() below for more information. | |
| 126 // 2) If no free chunk could be reused and the region is not big enough for | |
| 127 // the requested size then NULL is returned. | |
| 128 // 3) If there is enough room in the ashmem region then a new chunk is | |
| 129 // returned. This new chunk starts at |offset_| which is the end of the | |
| 130 // previously highest chunk in the region. | |
| 131 scoped_ptr<DiscardableMemory> Allocate_Locked(size_t client_requested_size, | |
| 132 size_t actual_size) { | |
| 133 DCHECK_LE(client_requested_size, actual_size); | |
| 134 allocator_->lock_.AssertAcquired(); | |
| 135 scoped_ptr<DiscardableMemory> memory = ReuseFreeChunk_Locked( | |
| 136 client_requested_size, actual_size); | |
| 137 if (memory) | |
| 138 return memory.Pass(); | |
| 139 if (size_ - offset_ < actual_size) { | |
| 140 // This region does not have enough space left to hold the requested size. | |
| 141 return scoped_ptr<DiscardableMemory>(); | |
| 142 } | |
| 143 void* const address = static_cast<char*>(base_) + offset_; | |
| 144 memory.reset( | |
| 145 new DiscardableAshmemChunk(this, fd_, address, offset_, actual_size)); | |
| 146 used_to_previous_chunk_map_.insert( | |
| 147 std::make_pair(address, highest_allocated_chunk_)); | |
| 148 highest_allocated_chunk_ = address; | |
| 149 offset_ += actual_size; | |
| 150 return memory.Pass(); | |
| 151 } | |
| 152 | |
| 153 void OnChunkDeletion(void* chunk, size_t size) OVERRIDE { | |
| 154 base::AutoLock auto_lock(allocator_->lock_); | |
| 155 MergeAndAddFreeChunk_Locked(chunk, size); | |
| 156 } | |
| 157 | |
| 158 private: | |
| 159 struct FreeChunk { | |
| 160 FreeChunk(void* previous_chunk, void* start, size_t size) | |
| 161 : previous_chunk(previous_chunk), | |
| 162 start(start), | |
| 163 size(size) { | |
| 164 } | |
| 165 | |
| 166 void* const previous_chunk; | |
| 167 void* const start; | |
| 168 const size_t size; | |
| 169 | |
| 170 bool is_null() const { return !start; } | |
| 171 | |
| 172 bool operator<(const FreeChunk& other) const { | |
| 173 return size < other.size; | |
| 174 } | |
| 175 }; | |
| 176 | |
| 177 // Note that |allocator| must outlive this. | |
| 178 AshmemRegion(int fd, | |
| 179 size_t size, | |
| 180 void* base, | |
| 181 DiscardableMemoryAllocator* allocator) | |
| 182 : fd_(fd), | |
| 183 size_(size), | |
| 184 base_(base), | |
| 185 allocator_(allocator), | |
| 186 highest_allocated_chunk_(NULL), | |
| 187 offset_(0) { | |
| 188 DCHECK_GE(fd_, 0); | |
| 189 DCHECK_GE(size, kMinAshmemRegionSize); | |
| 190 DCHECK(base); | |
| 191 DCHECK(allocator); | |
| 192 } | |
| 193 | |
| 194 // Tries to reuse a previously freed chunk by doing a closest size match. | |
| 195 scoped_ptr<DiscardableMemory> ReuseFreeChunk_Locked( | |
| 196 size_t client_requested_size, | |
| 197 size_t actual_size) { | |
| 198 allocator_->lock_.AssertAcquired(); | |
| 199 const FreeChunk free_chunk = RemoveFreeChunkFromIterator_Locked( | |
|
willchan no longer on Chromium
2013/11/28 22:51:55
I go back and forth on this, but part of me wonder
Philippe
2013/11/29 12:41:05
Yes, I was also tempted to rename this variable, g
| |
| 200 free_chunks_.lower_bound(FreeChunk(NULL, NULL, actual_size))); | |
| 201 if (free_chunk.is_null()) | |
| 202 return scoped_ptr<DiscardableMemory>(); | |
|
willchan no longer on Chromium
2013/11/28 22:51:55
Nit: I'd prefer a newline here. Up to you though.
Philippe
2013/11/29 12:41:05
I added the blank line. I tend to avoid them in ge
| |
| 203 used_to_previous_chunk_map_.insert( | |
| 204 std::make_pair(free_chunk.start, free_chunk.previous_chunk)); | |
| 205 | |
| 206 size_t reused_chunk_size = free_chunk.size; | |
| 207 // |client_requested_size| is used below rather than |actual_size| to | |
| 208 // reflect the amount of bytes that would not be usable by the client (i.e. | |
| 209 // wasted). Using |actual_size| instead would not allow us to detect | |
| 210 // fragmentation caused by the client if he did misaligned allocations. | |
| 211 const size_t fragmentation_bytes = free_chunk.size - client_requested_size; | |
| 212 if (fragmentation_bytes > kMaxChunkFragmentationBytes) { | |
| 213 reused_chunk_size = actual_size; | |
| 214 void* const previous_chunk = free_chunk.start; | |
| 215 void* const new_chunk_start = | |
| 216 static_cast<char*>(free_chunk.start) + actual_size; | |
| 217 DCHECK_GT(free_chunk.size, actual_size); | |
| 218 const size_t new_chunk_size = free_chunk.size - actual_size; | |
| 219 // Note that merging is not needed here since there can't be contiguous | |
| 220 // free chunks at this point. | |
| 221 AddFreeChunk_Locked( | |
| 222 FreeChunk(previous_chunk, new_chunk_start, new_chunk_size)); | |
| 223 } | |
| 224 | |
| 225 const size_t offset = | |
| 226 static_cast<char*>(free_chunk.start) - static_cast<char*>(base_); | |
| 227 internal::LockAshmemRegion( | |
| 228 fd_, offset, reused_chunk_size, free_chunk.start); | |
| 229 scoped_ptr<DiscardableMemory> memory( | |
| 230 new DiscardableAshmemChunk(this, fd_, free_chunk.start, offset, | |
| 231 reused_chunk_size)); | |
| 232 return memory.Pass(); | |
| 233 } | |
| 234 | |
| 235 // Makes the chunk identified with the provided arguments free and possibly | |
| 236 // merges this chunk with the previous and next contiguous ones. | |
| 237 // If the provided chunk is the only one used (and going to be freed) in the | |
| 238 // region then the internal ashmem region is closed so that the underlying | |
| 239 // physical pages are immediately released. | |
| 240 // Note that free chunks are unlocked therefore they can be reclaimed by the | |
| 241 // kernel if needed (under memory pressure) but they are not immediately | |
| 242 // released unfortunately since madvise(MADV_REMOVE) and | |
| 243 // fallocate(FALLOC_FL_PUNCH_HOLE) don't seem to work on ashmem. This might | |
| 244 // change in versions of kernel >=3.5 though. The fact that free chunks are | |
| 245 // not immediately released is the reason why we are trying to minimize | |
| 246 // fragmentation in order not to cause "artificial" memory pressure. | |
| 247 void MergeAndAddFreeChunk_Locked(void* chunk, size_t size) { | |
| 248 allocator_->lock_.AssertAcquired(); | |
| 249 size_t new_free_chunk_size = size; | |
| 250 // Merge with the previous chunk. | |
| 251 void* first_free_chunk = chunk; | |
| 252 DCHECK_NE(0U, used_to_previous_chunk_map_.size()); | |
| 253 const base::hash_map<void*, void*>::iterator previous_chunk_it = | |
| 254 used_to_previous_chunk_map_.find(chunk); | |
| 255 DCHECK(previous_chunk_it != used_to_previous_chunk_map_.end()); | |
| 256 void* previous_chunk = previous_chunk_it->second; | |
| 257 used_to_previous_chunk_map_.erase(previous_chunk_it); | |
| 258 if (previous_chunk) { | |
| 259 const FreeChunk free_chunk = RemoveFreeChunk_Locked(previous_chunk); | |
| 260 if (!free_chunk.is_null()) { | |
| 261 new_free_chunk_size += free_chunk.size; | |
| 262 first_free_chunk = previous_chunk; | |
| 263 // There should not be more contiguous free chunks. | |
|
willchan no longer on Chromium
2013/11/28 22:51:55
Nit: just to be explicit, perhaps
"There should no
Philippe
2013/11/29 12:41:05
Done.
| |
| 264 DCHECK(!address_to_free_chunk_map_.count(free_chunk.previous_chunk)); | |
| 265 } | |
| 266 } | |
| 267 // Merge with the next chunk. | |
|
willchan no longer on Chromium
2013/11/28 22:51:55
Nit: "Merge with the next chunk if free and presen
Philippe
2013/11/29 12:41:05
Done.
| |
| 268 void* next_chunk = static_cast<char*>(chunk) + size; | |
| 269 const FreeChunk next_free_chunk = RemoveFreeChunk_Locked(next_chunk); | |
| 270 if (!next_free_chunk.is_null()) { | |
| 271 new_free_chunk_size += next_free_chunk.size; | |
| 272 // Same as above. | |
| 273 DCHECK(!address_to_free_chunk_map_.count(static_cast<char*>(next_chunk) + | |
| 274 next_free_chunk.size)); | |
| 275 } | |
| 276 const bool whole_ashmem_region_is_free = | |
| 277 used_to_previous_chunk_map_.empty(); | |
| 278 if (!whole_ashmem_region_is_free) { | |
| 279 AddFreeChunk_Locked( | |
| 280 FreeChunk(previous_chunk, first_free_chunk, new_free_chunk_size)); | |
| 281 return; | |
| 282 } | |
| 283 // The whole ashmem region is free thus it can be deleted. | |
| 284 DCHECK_EQ(base_, first_free_chunk); | |
| 285 DCHECK_EQ(0U, free_chunks_.size()); | |
| 286 DCHECK_EQ(0U, address_to_free_chunk_map_.size()); | |
| 287 DCHECK_EQ(0U, used_to_previous_chunk_map_.size()); | |
| 288 allocator_->DeleteAshmemRegion_Locked(this); | |
| 289 } | |
| 290 | |
| 291 void AddFreeChunk_Locked(const FreeChunk& free_chunk) { | |
| 292 allocator_->lock_.AssertAcquired(); | |
| 293 const std::multiset<FreeChunk>::iterator it = free_chunks_.insert( | |
| 294 free_chunk); | |
| 295 address_to_free_chunk_map_.insert(std::make_pair(free_chunk.start, it)); | |
| 296 // Update the next used contiguous chunk, if any, since its previous chunk | |
| 297 // may have changed due to free chunks merging/splitting. | |
| 298 void* const next_used_contiguous_chunk = | |
| 299 static_cast<char*>(free_chunk.start) + free_chunk.size; | |
| 300 base::hash_map<void*, void*>::iterator previous_it = | |
| 301 used_to_previous_chunk_map_.find(next_used_contiguous_chunk); | |
| 302 if (previous_it != used_to_previous_chunk_map_.end()) | |
| 303 previous_it->second = free_chunk.start; | |
| 304 } | |
| 305 | |
| 306 // Finds and removes the free chunk, if any, whose start address is | |
| 307 // |chunk_start|. Returns a copy of the unlinked free chunk or a free chunk | |
| 308 // whose content is null if it was not found. | |
| 309 FreeChunk RemoveFreeChunk_Locked(void* chunk_start) { | |
| 310 allocator_->lock_.AssertAcquired(); | |
| 311 const base::hash_map< | |
| 312 void*, std::multiset<FreeChunk>::iterator>::iterator it = | |
| 313 address_to_free_chunk_map_.find(chunk_start); | |
| 314 if (it == address_to_free_chunk_map_.end()) | |
| 315 return FreeChunk(NULL, NULL, 0U); | |
| 316 return RemoveFreeChunkFromIterator_Locked(it->second); | |
| 317 } | |
| 318 | |
| 319 // Same as above but takes an iterator in. | |
| 320 FreeChunk RemoveFreeChunkFromIterator_Locked( | |
| 321 std::multiset<FreeChunk>::iterator free_chunk_it) { | |
| 322 allocator_->lock_.AssertAcquired(); | |
| 323 if (free_chunk_it == free_chunks_.end()) | |
| 324 return FreeChunk(NULL, NULL, 0U); | |
| 325 DCHECK(free_chunk_it != free_chunks_.end()); | |
| 326 const FreeChunk free_chunk(*free_chunk_it); | |
| 327 address_to_free_chunk_map_.erase(free_chunk_it->start); | |
| 328 free_chunks_.erase(free_chunk_it); | |
| 329 return free_chunk; | |
| 330 } | |
| 331 | |
| 332 const int fd_; | |
| 333 const size_t size_; | |
| 334 void* const base_; | |
| 335 DiscardableMemoryAllocator* const allocator_; | |
| 336 void* highest_allocated_chunk_; | |
| 337 // Points to the end of |highest_allocated_chunk_|. | |
| 338 size_t offset_; | |
| 339 // Allows free chunks recycling (lookup, insertion and removal) in O(log N). | |
| 340 // Note that FreeChunk values are indexed by their size and also note that | |
| 341 // multiple free chunks can have the same size (which is why multiset<> is | |
| 342 // used instead of e.g. set<>). | |
| 343 std::multiset<FreeChunk> free_chunks_; | |
| 344 // Used while merging free contiguous chunks to erase free chunks (from their | |
| 345 // start address) in constant time. Note that multiset<>::{insert,erase}() | |
| 346 // don't invalidate iterators (except the one for the element being removed | |
| 347 // obviously). | |
| 348 base::hash_map< | |
| 349 void*, std::multiset<FreeChunk>::iterator> address_to_free_chunk_map_; | |
| 350 // Maps the address of *used* chunks to the address of their previous | |
| 351 // contiguous chunk. | |
| 352 base::hash_map<void*, void*> used_to_previous_chunk_map_; | |
| 353 | |
| 354 DISALLOW_COPY_AND_ASSIGN(AshmemRegion); | |
| 355 }; | |
| 356 | |
| 357 DiscardableMemoryAllocator::DiscardableAshmemChunk::~DiscardableAshmemChunk() { | |
| 358 if (locked_) | |
| 359 internal::UnlockAshmemRegion(fd_, offset_, size_, address_); | |
| 360 ashmem_region_->OnChunkDeletion(address_, size_); | |
| 361 } | |
| 362 | |
| 363 DiscardableMemoryAllocator::DiscardableMemoryAllocator(const std::string& name) | |
| 364 : name_(name) { | |
| 365 } | |
| 366 | |
| 367 DiscardableMemoryAllocator::~DiscardableMemoryAllocator() { | |
| 368 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 369 DCHECK(ashmem_regions_.empty()); | |
| 370 } | |
| 371 | |
| 372 scoped_ptr<DiscardableMemory> DiscardableMemoryAllocator::Allocate( | |
| 373 size_t size) { | |
| 374 const size_t aligned_size = internal::AlignToNextPage(size); | |
| 375 // TODO(pliard): make this function less naive by e.g. moving the free chunks | |
| 376 // multiset to the allocator itself in order to decrease even more | |
| 377 // fragmentation/speedup allocation. Note that there should not be more than a | |
| 378 // couple (=5) of AshmemRegion instances in practice though. | |
| 379 base::AutoLock auto_lock(lock_); | |
| 380 DCHECK_LE(ashmem_regions_.size(), 5U); | |
| 381 for (ScopedVector<AshmemRegion>::iterator it = ashmem_regions_.begin(); | |
| 382 it != ashmem_regions_.end(); ++it) { | |
| 383 scoped_ptr<DiscardableMemory> memory( | |
| 384 (*it)->Allocate_Locked(size, aligned_size)); | |
| 385 if (memory) | |
| 386 return memory.Pass(); | |
| 387 } | |
| 388 scoped_ptr<AshmemRegion> new_region( | |
| 389 AshmemRegion::Create( | |
| 390 std::max(static_cast<size_t>(kMinAshmemRegionSize), aligned_size), | |
| 391 name_.c_str(), this)); | |
| 392 if (!new_region) { | |
| 393 // TODO(pliard): consider adding an histogram to see how often this happens. | |
| 394 return scoped_ptr<DiscardableMemory>(); | |
| 395 } | |
| 396 ashmem_regions_.push_back(new_region.release()); | |
| 397 return ashmem_regions_.back()->Allocate_Locked(size, aligned_size); | |
| 398 } | |
| 399 | |
| 400 void DiscardableMemoryAllocator::DeleteAshmemRegion_Locked( | |
| 401 AshmemRegion* region) { | |
| 402 lock_.AssertAcquired(); | |
| 403 // Note that there should not be more than a couple of ashmem region instances | |
| 404 // in |ashmem_regions_|. | |
| 405 DCHECK_LE(ashmem_regions_.size(), 5U); | |
| 406 const ScopedVector<AshmemRegion>::iterator it = std::find( | |
| 407 ashmem_regions_.begin(), ashmem_regions_.end(), region); | |
| 408 DCHECK_NE(ashmem_regions_.end(), it); | |
| 409 std::swap(*it, ashmem_regions_.back()); | |
| 410 ashmem_regions_.pop_back(); | |
| 411 } | |
| 412 | |
| 413 } // namespace internal | |
| 414 } // namespace base | |
| OLD | NEW |