Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 // Copyright 2013 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "base/memory/discardable_memory_allocator_android.h" | |
| 6 | |
| 7 #include <algorithm> | |
| 8 #include <cmath> | |
| 9 #include <set> | |
| 10 #include <utility> | |
| 11 | |
| 12 #include "base/basictypes.h" | |
| 13 #include "base/bind.h" | |
| 14 #include "base/callback.h" | |
| 15 #include "base/compiler_specific.h" | |
| 16 #include "base/containers/hash_tables.h" | |
| 17 #include "base/logging.h" | |
| 18 #include "base/memory/discardable_memory.h" | |
| 19 #include "base/memory/discardable_memory_android.h" | |
| 20 #include "base/memory/scoped_vector.h" | |
| 21 #include "base/strings/stringprintf.h" | |
| 22 #include "base/synchronization/lock.h" | |
| 23 #include "base/threading/thread_checker.h" | |
| 24 | |
| 25 // The allocator consists of three parts (classes): | |
| 26 // - DiscardableMemoryAllocator: entry point of all allocations (through its | |
| 27 // Allocate() method) that are dispatched to the AshmemRegion instances (which | |
| 28 // it owns). | |
| 29 // - AshmemRegion: manages allocations and destructions inside a single large | |
| 30 // (e.g. 32 MBytes) ashmem region. | |
| 31 // - DiscardableAshmemChunk: class implementing the DiscardableMemory interface | |
| 32 // whose instances are returned to the client. DiscardableAshmemChunk lets the | |
| 33 // client seamlessly operate on a subrange of the ashmem region managed by | |
| 34 // AshmemRegion. | |
| 35 | |
| 36 namespace base { | |
| 37 namespace { | |
| 38 | |
| 39 // Allow 8 KBytes of fragmentation inside used chunks. | |
| 40 const size_t kMaxChunkFragmentationBytes = 8192; | |
| 41 | |
| 42 class DiscardableAshmemChunk : public DiscardableMemory { | |
| 43 public: | |
| 44 // Note that this is not replaced with base::Callback to save the extra heap | |
| 45 // allocation. | |
| 46 struct DeletionObserver { | |
| 47 virtual void OnChunkDeletion(int fd, | |
| 48 void* previous_chunk, | |
| 49 void* chunk, | |
| 50 size_t size) = 0; | |
| 51 | |
| 52 protected: | |
| 53 virtual ~DeletionObserver() {} | |
| 54 }; | |
| 55 | |
| 56 DiscardableAshmemChunk(DeletionObserver* deletion_observer, | |
| 57 int fd, | |
| 58 void* previous_chunk, | |
| 59 void* address, | |
| 60 size_t offset, | |
| 61 size_t size) | |
| 62 : deletion_observer_(deletion_observer), | |
| 63 fd_(fd), | |
| 64 previous_chunk_(previous_chunk), | |
| 65 address_(address), | |
| 66 offset_(offset), | |
| 67 size_(size), | |
| 68 locked_(true) { | |
| 69 } | |
| 70 | |
| 71 virtual ~DiscardableAshmemChunk() { | |
| 72 if (locked_) | |
| 73 internal::UnlockAshmemRegion(fd_, offset_, size_, address_); | |
| 74 deletion_observer_->OnChunkDeletion(fd_, previous_chunk_, address_, size_); | |
| 75 } | |
| 76 | |
| 77 // DiscardableMemory: | |
| 78 virtual size_t Size() const OVERRIDE { | |
| 79 return size_; | |
| 80 } | |
| 81 | |
| 82 virtual LockDiscardableMemoryStatus Lock() OVERRIDE { | |
| 83 DCHECK(!locked_); | |
| 84 locked_ = true; | |
| 85 return internal::LockAshmemRegion(fd_, offset_, size_, address_); | |
| 86 } | |
| 87 | |
| 88 virtual void Unlock() OVERRIDE { | |
| 89 DCHECK(locked_); | |
| 90 locked_ = false; | |
| 91 internal::UnlockAshmemRegion(fd_, offset_, size_, address_); | |
| 92 } | |
| 93 | |
| 94 virtual void* Memory() const OVERRIDE { | |
| 95 return address_; | |
| 96 } | |
| 97 | |
| 98 private: | |
| 99 DeletionObserver* const deletion_observer_; | |
| 100 const int fd_; | |
| 101 void* const previous_chunk_; | |
| 102 void* const address_; | |
| 103 const size_t offset_; | |
| 104 const size_t size_; | |
| 105 bool locked_; | |
| 106 | |
| 107 DISALLOW_COPY_AND_ASSIGN(DiscardableAshmemChunk); | |
| 108 }; | |
| 109 | |
| 110 } // namespace | |
| 111 | |
| 112 namespace internal { | |
| 113 | |
| 114 class DiscardableMemoryAllocator::AshmemRegion | |
| 115 : public DiscardableAshmemChunk::DeletionObserver { | |
| 116 public: | |
| 117 typedef Callback<void (scoped_ptr<AshmemRegion>)> DeletionCallback; | |
| 118 | |
| 119 static scoped_ptr<AshmemRegion> Create( | |
| 120 size_t size, | |
| 121 const std::string& name, | |
| 122 Lock* lock, | |
| 123 const DeletionCallback& deletion_callback) { | |
| 124 int fd; | |
| 125 void* base; | |
| 126 if (!internal::CreateAshmemRegion(name.c_str(), size, &fd, &base)) | |
| 127 return scoped_ptr<AshmemRegion>(); | |
| 128 return make_scoped_ptr( | |
| 129 new AshmemRegion(fd, size, base, lock, deletion_callback)); | |
| 130 } | |
| 131 | |
| 132 virtual ~AshmemRegion() { | |
| 133 const bool result = internal::CloseAshmemRegion(fd_, size_, base_); | |
| 134 DCHECK(result); | |
| 135 } | |
| 136 | |
| 137 scoped_ptr<DiscardableMemory> Allocate(size_t client_requested_size, | |
| 138 size_t actual_size) { | |
| 139 lock_->AssertAcquired(); | |
| 140 scoped_ptr<DiscardableMemory> memory = RecycleFreeChunk( | |
| 141 client_requested_size, actual_size); | |
| 142 if (memory) | |
| 143 return memory.Pass(); | |
| 144 if (size_ - offset_ < actual_size) { | |
| 145 // This region does not have enough space left to hold the requested size. | |
| 146 return scoped_ptr<DiscardableMemory>(); | |
| 147 } | |
| 148 void* const address = static_cast<char*>(base_) + offset_; | |
| 149 memory.reset( | |
| 150 new DiscardableAshmemChunk(this, fd_, last_allocated_chunk_, address, | |
| 151 offset_, actual_size)); | |
| 152 last_allocated_chunk_ = address; | |
| 153 offset_ += actual_size; | |
| 154 return memory.Pass(); | |
| 155 } | |
| 156 | |
| 157 private: | |
| 158 struct FreeChunk { | |
| 159 FreeChunk(void* previous_chunk, void* start, size_t size) | |
| 160 : previous_chunk(previous_chunk), | |
| 161 start(start), | |
| 162 size(size) { | |
| 163 } | |
| 164 | |
| 165 void* const previous_chunk; | |
| 166 void* const start; | |
| 167 const size_t size; | |
| 168 | |
| 169 bool is_null() const { return !start; } | |
| 170 | |
| 171 bool operator<(const FreeChunk& other) const { | |
| 172 return size < other.size; | |
| 173 } | |
| 174 }; | |
| 175 | |
| 176 AshmemRegion(int fd, | |
| 177 size_t size, | |
| 178 void* base, | |
| 179 Lock* lock, | |
| 180 const DeletionCallback& deletion_callback) | |
| 181 : fd_(fd), | |
| 182 size_(size), | |
| 183 base_(base), | |
| 184 offset_(0), | |
| 185 lock_(lock), | |
| 186 deletion_callback_(deletion_callback), | |
| 187 last_allocated_chunk_(NULL) { | |
| 188 } | |
| 189 | |
| 190 // DiscardableAshmemChunk::DeletionObserver: | |
| 191 virtual void OnChunkDeletion(int fd, | |
| 192 void* previous_chunk, | |
| 193 void* chunk, | |
| 194 size_t size) OVERRIDE { | |
| 195 base::AutoLock auto_lock(*lock_); | |
| 196 MergeAndAddFreeChunk(fd, previous_chunk, chunk, size); | |
| 197 } | |
| 198 | |
| 199 // Tries to reuse a previously freed chunk by doing a closest size match. | |
| 200 scoped_ptr<DiscardableMemory> RecycleFreeChunk(size_t client_requested_size, | |
| 201 size_t actual_size) { | |
| 202 lock_->AssertAcquired(); | |
| 203 const std::multiset<FreeChunk>::iterator chunk_it = | |
| 204 free_chunks_.lower_bound(FreeChunk(NULL, NULL, actual_size)); | |
| 205 if (chunk_it == free_chunks_.end()) | |
| 206 return scoped_ptr<DiscardableMemory>(); | |
| 207 size_t recycled_chunk_size = chunk_it->size; | |
| 208 const size_t fragmentation_bytes = chunk_it->size - client_requested_size; | |
| 209 if (fragmentation_bytes >= kMaxChunkFragmentationBytes) { | |
| 210 // Split the free chunk being recycled if it's too large so that its | |
| 211 // unused tail doesn't get recycled (i.e. locked) which would prevent it | |
| 212 // from being evicted under memory pressure. | |
| 213 void* const previous_chunk = chunk_it->start; | |
| 214 void* const chunk_start = | |
| 215 static_cast<char*>(chunk_it->start) + actual_size; | |
| 216 const size_t chunk_size = chunk_it->size - actual_size; | |
| 217 // Note that merging is not needed here since there can't be contiguous | |
| 218 // free chunks at this point. | |
| 219 AddFreeChunk(FreeChunk(previous_chunk, chunk_start, chunk_size)); | |
| 220 recycled_chunk_size = actual_size; | |
| 221 } | |
| 222 const size_t offset = | |
| 223 static_cast<char*>(chunk_it->start) - static_cast<char*>(base_); | |
| 224 internal::LockAshmemRegion( | |
| 225 fd_, offset, recycled_chunk_size, chunk_it->start); | |
| 226 scoped_ptr<DiscardableMemory> memory( | |
| 227 new DiscardableAshmemChunk( | |
| 228 this, fd_, chunk_it->previous_chunk, chunk_it->start, offset, | |
| 229 recycled_chunk_size)); | |
| 230 free_chunk_for_address_.erase(reinterpret_cast<uintptr_t>(chunk_it->start)); | |
| 231 free_chunks_.erase(chunk_it); | |
| 232 return memory.Pass(); | |
| 233 } | |
| 234 | |
| 235 // Makes the chunk identified with the provided arguments free and possibly | |
| 236 // merges this chunk with the previous and next contiguous ones according to | |
| 237 // the value of |chunk_merging_flags|. | |
| 238 // If the provided chunk is the only one used (and going to be freed) in the | |
| 239 // region then the internal ashmem region is closed so that the underlying | |
| 240 // physical pages are immediately released. | |
| 241 // Note that free chunks are unlocked therefore they can be reclaimed by the | |
| 242 // kernel if needed (under memory pressure) but they are not immediately | |
| 243 // released unfortunately since madvise(MADV_REMOVE) and | |
| 244 // fallocate(FALLOC_FL_PUNCH_HOLE) don't seem to work on ashmem. This might | |
| 245 // change in versions of kernel >=3.5 though. The fact that free chunks are | |
| 246 // not immediately released is the reason why we are trying to minimize | |
| 247 // fragmentation. | |
| 248 void MergeAndAddFreeChunk(int fd, | |
|
pasko
2013/10/28 17:44:45
FYI: parameter |fd| is not used.
Philippe
2013/10/28 17:50:22
Wow, thanks! It was used at some point I believe b
| |
| 249 void* previous_chunk, | |
| 250 void* chunk, | |
| 251 size_t size) { | |
| 252 lock_->AssertAcquired(); | |
| 253 size_t new_free_chunk_size = size; | |
| 254 void* first_free_chunk = chunk; | |
| 255 while (previous_chunk) { | |
| 256 const FreeChunk free_chunk = RemoveFreeChunk(previous_chunk); | |
| 257 if (free_chunk.is_null()) | |
| 258 break; | |
| 259 new_free_chunk_size += free_chunk.size; | |
| 260 first_free_chunk = previous_chunk; | |
| 261 previous_chunk = free_chunk.previous_chunk; | |
| 262 } | |
| 263 const void* next_chunk = static_cast<const char*>(chunk) + size; | |
| 264 while (true) { | |
|
pasko
2013/10/28 18:10:33
Really a nit:
I generally prefer this style of lo
Philippe
2013/10/29 10:11:25
Thanks for the snippet Egor :) The while loop look
pasko
2013/10/29 10:47:57
Hm, I did not think of asymmetry being a concern.
Philippe
2013/10/29 10:54:35
As you may have noticed I'm not super generous in
| |
| 265 const FreeChunk free_chunk = RemoveFreeChunk(next_chunk); | |
| 266 if (free_chunk.is_null()) | |
| 267 break; | |
| 268 new_free_chunk_size += free_chunk.size; | |
| 269 next_chunk = static_cast<const char*>(next_chunk) + free_chunk.size; | |
| 270 } | |
| 271 const bool whole_ashmem_region_is_free = new_free_chunk_size == size_; | |
| 272 if (!whole_ashmem_region_is_free) { | |
| 273 AddFreeChunk( | |
| 274 FreeChunk(previous_chunk, first_free_chunk, new_free_chunk_size)); | |
| 275 return; | |
| 276 } | |
| 277 // The whole ashmem region is free thus it can be deleted. | |
| 278 DCHECK_EQ(size_, new_free_chunk_size); | |
| 279 DCHECK(free_chunks_.empty() && free_chunk_for_address_.empty()); | |
| 280 deletion_callback_.Run(make_scoped_ptr(this)); // Deletes |this|. | |
| 281 } | |
| 282 | |
| 283 void AddFreeChunk(const FreeChunk& free_chunk) { | |
| 284 const std::multiset<FreeChunk>::iterator it = free_chunks_.insert( | |
| 285 free_chunk); | |
| 286 free_chunk_for_address_.insert( | |
| 287 std::make_pair(reinterpret_cast<uintptr_t>(free_chunk.start), it)); | |
| 288 } | |
| 289 | |
| 290 // Finds and unlinks the free chunk, if any, whose start address is | |
| 291 // |chunk_start|. Returns a copy of the unlinked free chunk or a free chunk | |
| 292 // whose content is null if it was not found. | |
| 293 FreeChunk RemoveFreeChunk(const void* chunk_start) { | |
| 294 lock_->AssertAcquired(); | |
| 295 const base::hash_map< | |
| 296 uintptr_t, std::multiset<FreeChunk>::iterator>::iterator it = | |
| 297 free_chunk_for_address_.find( | |
| 298 reinterpret_cast<uintptr_t>(chunk_start)); | |
| 299 if (it == free_chunk_for_address_.end()) | |
| 300 return FreeChunk(NULL, NULL, 0U); | |
| 301 const std::multiset<FreeChunk>::iterator free_chunk_it = it->second; | |
| 302 const FreeChunk free_chunk(*free_chunk_it); | |
| 303 DCHECK_EQ(chunk_start, free_chunk.start); | |
| 304 free_chunk_for_address_.erase(it); | |
| 305 free_chunks_.erase(free_chunk_it); | |
| 306 return free_chunk; | |
| 307 } | |
| 308 | |
| 309 const int fd_; | |
| 310 const size_t size_; | |
| 311 void* const base_; | |
| 312 size_t offset_; | |
| 313 base::Lock* const lock_; | |
| 314 const DeletionCallback deletion_callback_; | |
| 315 void* last_allocated_chunk_; | |
| 316 std::multiset<FreeChunk> free_chunks_; | |
| 317 base::hash_map< | |
| 318 uintptr_t, std::multiset<FreeChunk>::iterator> free_chunk_for_address_; | |
| 319 | |
| 320 DISALLOW_COPY_AND_ASSIGN(AshmemRegion); | |
| 321 }; | |
| 322 | |
| 323 DiscardableMemoryAllocator::DiscardableMemoryAllocator(const std::string& name) | |
| 324 : name_(name) { | |
| 325 } | |
| 326 | |
| 327 DiscardableMemoryAllocator::~DiscardableMemoryAllocator() { | |
| 328 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 329 } | |
| 330 | |
| 331 scoped_ptr<DiscardableMemory> DiscardableMemoryAllocator::Allocate( | |
| 332 size_t size) { | |
| 333 const size_t aligned_size = internal::AlignToNextPage(size); | |
| 334 base::AutoLock auto_lock(lock_); | |
| 335 for (ScopedVector<AshmemRegion>::iterator it = ashmem_regions_.begin(); | |
| 336 it != ashmem_regions_.end(); ++it) { | |
| 337 scoped_ptr<DiscardableMemory> memory((*it)->Allocate(size, aligned_size)); | |
| 338 if (memory) | |
| 339 return memory.Pass(); | |
| 340 } | |
| 341 scoped_ptr<AshmemRegion> new_region( | |
| 342 AshmemRegion::Create( | |
| 343 std::max(static_cast<size_t>(kMinAshmemRegionSize), aligned_size), | |
| 344 name_.c_str(), | |
| 345 &lock_, | |
| 346 base::Bind(&DiscardableMemoryAllocator::DeleteAshmemRegion, | |
| 347 base::Unretained(this)))); | |
| 348 if (!new_region) { | |
| 349 // TODO(pliard): consider adding an histogram to see how often this happens. | |
| 350 return scoped_ptr<DiscardableMemory>(); | |
| 351 } | |
| 352 ashmem_regions_.push_back(new_region.release()); | |
| 353 return ashmem_regions_.back()->Allocate(size, aligned_size); | |
| 354 } | |
| 355 | |
| 356 void DiscardableMemoryAllocator::DeleteAshmemRegion( | |
| 357 scoped_ptr<AshmemRegion> region) { | |
| 358 lock_.AssertAcquired(); | |
| 359 // Note that there should not be more than a couple of ashmem region instances | |
| 360 // in |ashmem_regions_|. | |
| 361 const ScopedVector<AshmemRegion>::iterator it = std::find( | |
| 362 ashmem_regions_.begin(), ashmem_regions_.end(), region.get()); | |
| 363 DCHECK_NE(ashmem_regions_.end(), it); | |
| 364 std::swap(*it, ashmem_regions_.back()); | |
| 365 ashmem_regions_.resize(ashmem_regions_.size() - 1); | |
| 366 // |region| was deleted by the resize() above. | |
| 367 ignore_result(region.release()); | |
| 368 } | |
| 369 | |
| 370 } // namespace internal | |
| 371 } // namespace base | |
| OLD | NEW |