Chromium Code Reviews| Index: base/memory/discardable_memory_allocator_android.cc |
| diff --git a/base/memory/discardable_memory_allocator_android.cc b/base/memory/discardable_memory_allocator_android.cc |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..1eaa0632f18139682a2b4a55f26c6cd1e8f07adf |
| --- /dev/null |
| +++ b/base/memory/discardable_memory_allocator_android.cc |
| @@ -0,0 +1,398 @@ |
| +// Copyright 2013 The Chromium Authors. All rights reserved. |
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +#include "base/memory/discardable_memory_allocator.h" |
| + |
| +#include <cmath> |
| +#include <set> |
| +#include <utility> |
| + |
| +#include "base/basictypes.h" |
| +#include "base/compiler_specific.h" |
| +#include "base/containers/hash_tables.h" |
| +#include "base/logging.h" |
| +#include "base/memory/discardable_memory.h" |
| +#include "base/memory/discardable_memory_android.h" |
| +#include "base/memory/scoped_vector.h" |
| +#include "base/strings/stringprintf.h" |
| +#include "base/synchronization/lock.h" |
| +#include "base/threading/thread_checker.h" |
| + |
| +namespace base { |
| +namespace { |
| + |
| +const size_t kDefaultAshmemRegionSize = 32 * 1024 * 1024; |
| +const int kInvalidFD = -1; |
| + |
| +class DiscardableMemoryChunk : public DiscardableMemory { |
|
pasko
2013/10/22 20:14:29
A top-level comment would be good since it is not
Philippe
2013/10/23 11:46:56
I added a small top-level comment introducing very
|
| + public: |
| + struct DeletionObserver { |
| + virtual void OnChunkDeletion(int fd, |
| + void* previous_chunk, |
| + void* chunk, |
| + size_t size) = 0; |
| + |
| + protected: |
| + ~DeletionObserver() {} |
| + }; |
| + |
| + DiscardableMemoryChunk(DeletionObserver* deletion_observer, |
| + int fd, |
| + void* previous_chunk, |
| + void* address, |
| + size_t offset, |
| + size_t size) |
| + : deletion_observer_(deletion_observer), |
| + fd_(fd), |
| + previous_chunk_(previous_chunk), |
| + address_(address), |
| + offset_(offset), |
| + size_(size), |
| + locked_(true) { |
| + } |
| + |
| + virtual ~DiscardableMemoryChunk() { |
| + if (locked_) |
| + internal::UnlockAshmemRegion(fd_, offset_, size_, address_); |
| + deletion_observer_->OnChunkDeletion(fd_, previous_chunk_, address_, size_); |
| + } |
| + |
| + // DiscardableMemory: |
| + virtual size_t Size() const OVERRIDE { |
| + return size_; |
| + } |
| + |
| + virtual LockDiscardableMemoryStatus Lock() OVERRIDE { |
| + DCHECK(!locked_); |
| + locked_ = true; |
| + return internal::LockAshmemRegion(fd_, offset_, size_, address_); |
| + } |
| + |
| + virtual void Unlock() OVERRIDE { |
| + DCHECK(locked_); |
| + locked_ = false; |
| + internal::UnlockAshmemRegion(fd_, offset_, size_, address_); |
| + } |
| + |
| + virtual void* Memory() const OVERRIDE { |
| + return address_; |
| + } |
| + |
| + private: |
| + DeletionObserver* const deletion_observer_; |
| + const int fd_; |
| + void* const previous_chunk_; |
| + void* const address_; |
| + const size_t offset_; |
| + const size_t size_; |
| + bool locked_; |
| + |
| + DISALLOW_COPY_AND_ASSIGN(DiscardableMemoryChunk); |
| +}; |
| + |
| +class AshmemRegion : public DiscardableMemoryChunk::DeletionObserver { |
| + public: |
| + AshmemRegion(size_t size, const std::string& name) |
| + : fd_(kInvalidFD), |
| + base_(NULL), |
| + size_(size), |
| + offset_(0), |
| + name_(name), |
| + last_allocated_chunk_(NULL) { |
| + } |
| + |
| + ~AshmemRegion() { |
| + DCHECK(thread_checker_.CalledOnValidThread()); |
| + base::AutoLock auto_lock(lock_); |
| + if (!AshmemRegionClosed()) |
| + CloseAshmemRegion(); |
| + } |
| + |
| + scoped_ptr<DiscardableMemory> Allocate(size_t client_requested_size, |
| + size_t actual_size) { |
| + base::AutoLock auto_lock(lock_); |
| + if (AshmemRegionClosed()) |
| + if (!OpenAshmemRegion()) |
|
pasko
2013/10/22 20:14:29
did you mean:
if (AshmemRegionClosed() && !OpenAsh
Philippe
2013/10/23 11:46:56
Yeah, it's the same, right? :)
|
| + return scoped_ptr<DiscardableMemory>(); |
| + scoped_ptr<DiscardableMemory> memory = RecycleFreeChunk( |
| + client_requested_size, actual_size); |
| + if (memory) |
| + return memory.Pass(); |
| + if (size_ - offset_ < actual_size) { |
| + // This region does not have enough space left to hold the requested size. |
| + return scoped_ptr<DiscardableMemory>(); |
| + } |
| + void* const address = static_cast<char*>(base_) + offset_; |
| + memory.reset( |
| + new DiscardableMemoryChunk(this, fd_, last_allocated_chunk_, address, |
| + offset_, actual_size)); |
| + last_allocated_chunk_ = address; |
| + offset_ += actual_size; |
| + return memory.Pass(); |
| + } |
| + |
| + private: |
| + struct FreeChunk { |
| + FreeChunk(void* previous_chunk, void* start, size_t size) |
| + : previous_chunk(previous_chunk), |
| + start(start), |
| + size(size) { |
| + } |
| + |
| + void* const previous_chunk; |
| + void* const start; |
| + const size_t size; |
| + |
| + bool is_null() const { return !start; } |
| + |
| + bool operator<(const FreeChunk& other) const { |
| + return size < other.size; |
| + } |
| + }; |
| + |
| + // DiscardableMemoryChunk::DeletionObserver: |
| + virtual void OnChunkDeletion(int fd, |
| + void* previous_chunk, |
| + void* chunk, |
| + size_t size) OVERRIDE { |
| + base::AutoLock auto_lock(lock_); |
| + AddFreeChunk(fd, previous_chunk, chunk, size, |
| + MERGE_PREVIOUS_CHUNKS | MERGE_NEXT_CHUNKS); |
| + } |
| + |
| + // Tries to recycle a previously freed chunk by doing a closest size match. |
|
pasko
2013/10/22 20:14:29
nit: A synonym to "recycle" would help in the comm
Philippe
2013/10/23 11:46:56
Done.
|
| + scoped_ptr<DiscardableMemory> RecycleFreeChunk(size_t client_requested_size, |
| + size_t actual_size) { |
| + lock_.AssertAcquired(); |
| + const std::multiset<FreeChunk>::iterator chunk_it = |
| + free_chunks_.lower_bound(FreeChunk(NULL, NULL, actual_size)); |
| + if (chunk_it == free_chunks_.end()) |
| + return scoped_ptr<DiscardableMemory>(); |
| + size_t recycled_chunk_size = chunk_it->size; |
| + const size_t fragmentation_kbytes = |
| + (chunk_it->size - client_requested_size) / 1024; |
| + if (fragmentation_kbytes >= 16) { |
|
pasko
2013/10/22 20:14:29
the code has quite a few heuristics, how about mak
Philippe
2013/10/23 11:46:56
I'm not a huge fan generally of externalizing cons
|
| + // Split the free chunk being recycled if it's too large so that its |
| + // unused tail doesn't get recycled (i.e. locked) which would prevent it |
| + // from being evicted under memory pressure. |
| + const int fd = -1; |
| + void* const previous_chunk = chunk_it->start; |
| + AddFreeChunk( |
| + fd, previous_chunk, static_cast<char*>(chunk_it->start) + actual_size, |
| + chunk_it->size - actual_size, MERGE_NEXT_CHUNKS); |
| + recycled_chunk_size = actual_size; |
| + } |
| + const size_t offset = |
| + static_cast<char*>(chunk_it->start) - static_cast<char*>(base_); |
| + internal::LockAshmemRegion( |
| + fd_, offset, recycled_chunk_size, chunk_it->start); |
| + scoped_ptr<DiscardableMemory> memory( |
| + new DiscardableMemoryChunk( |
| + this, fd_, chunk_it->previous_chunk, chunk_it->start, offset, |
| + actual_size)); |
| + free_chunk_for_address_.erase(reinterpret_cast<uintptr_t>(chunk_it->start)); |
| + free_chunks_.erase(chunk_it); |
| + return memory.Pass(); |
| + } |
| + |
| + enum ContiguousChunksMergingFlags { |
|
pasko
2013/10/22 20:14:29
OK, splitting chunks is probably a good idea, but
Philippe
2013/10/23 11:46:56
I did see that merging was happening in production
Philippe
2013/10/24 08:35:53
For the record Egor and I observed offline that we
|
| + MERGE_PREVIOUS_CHUNKS = 1, |
| + MERGE_NEXT_CHUNKS = 2, |
| + }; |
| + |
| + // Makes the chunk identified with the provided arguments free and possibly |
| + // merges this chunk with the previous and next contiguous ones according to |
| + // the value of |chunk_merging_flags|. |
| + // If the provided chunk is the only one used (and going to be freed) in the |
| + // region then the internal ashmem region is closed so that the underlying |
| + // physical pages are immediately released. |
| + // Note that free chunks are unlocked therefore they can be reclaimed by the |
| + // kernel if needed (under memory pressure) but they are not immediately |
| + // released unfortunately since madvise(MADV_REMOVE) and |
| + // fallocate(FALLOC_FL_PUNCH_HOLE) don't seem to work on ashmem. This might |
| + // change in versions of kernel >=3.5 though. |
| + void AddFreeChunk(int fd, |
| + void* previous_chunk, |
| + void* chunk, |
| + size_t size, |
| + int chunk_merging_flags) { |
| + lock_.AssertAcquired(); |
| + size_t new_free_chunk_size = size; |
| + void* first_free_chunk = chunk; |
| + if (chunk_merging_flags & MERGE_PREVIOUS_CHUNKS) { |
| + while (previous_chunk) { |
| + const FreeChunk free_chunk = UnlinkFreeChunk(previous_chunk); |
| + if (free_chunk.is_null()) |
| + break; |
| + new_free_chunk_size += free_chunk.size; |
| + first_free_chunk = previous_chunk; |
| + previous_chunk = free_chunk.previous_chunk; |
| + } |
| + } |
| + const void* next_chunk = static_cast<const char*>(chunk) + size; |
| + if (chunk_merging_flags & MERGE_NEXT_CHUNKS) { |
| + while (true) { |
| + const FreeChunk free_chunk = UnlinkFreeChunk(next_chunk); |
| + if (free_chunk.is_null()) |
| + break; |
| + new_free_chunk_size += free_chunk.size; |
| + next_chunk = static_cast<const char*>(next_chunk) + free_chunk.size; |
| + } |
| + } |
| + const bool whole_ashmem_region_is_free = new_free_chunk_size == size_; |
| + if (!whole_ashmem_region_is_free) { |
| + const std::multiset<FreeChunk>::iterator it = free_chunks_.insert( |
| + FreeChunk(previous_chunk, first_free_chunk, new_free_chunk_size)); |
| + free_chunk_for_address_.insert( |
| + std::make_pair(reinterpret_cast<uintptr_t>(first_free_chunk), it)); |
| + return; |
| + } |
| + // The whole ashmem region is free thus it can be closed. Note that deleting |
| + // the instance and notifying the allocator would be cleaner (it would allow |
| + // |fd_| and |base_| to be immutable in particular) but this would imply |
| + // some non-trivial threading interactions since this method can be called |
| + // on any thread and the allocator has its own lock. |
| + DCHECK_EQ(size_, new_free_chunk_size); |
| + DCHECK(free_chunks_.empty() && free_chunk_for_address_.empty()); |
| + DCHECK(!AshmemRegionClosed()); |
| + CloseAshmemRegion(); |
| + } |
| + |
| + // Finds and unlinks the free chunk, if any, whose start address is |
| + // |chunk_start|. Returns a copy of the unlinked free chunk or a free chunk |
| + // whose content is null if it was not found. |
| + FreeChunk UnlinkFreeChunk(const void* chunk_start) { |
| + lock_.AssertAcquired(); |
| + const base::hash_map< |
| + uintptr_t, std::multiset<FreeChunk>::iterator>::iterator it = |
| + free_chunk_for_address_.find( |
| + reinterpret_cast<uintptr_t>(chunk_start)); |
| + if (it == free_chunk_for_address_.end()) |
| + return FreeChunk(NULL, NULL, 0U); |
| + const std::multiset<FreeChunk>::iterator free_chunk_it = it->second; |
| + const FreeChunk free_chunk(*free_chunk_it); |
| + DCHECK_EQ(chunk_start, free_chunk.start); |
| + free_chunk_for_address_.erase(it); |
| + free_chunks_.erase(free_chunk_it); |
| + return free_chunk; |
| + } |
| + |
| + bool AshmemRegionClosed() const { |
| + lock_.AssertAcquired(); |
| + DCHECK((fd_ == kInvalidFD && !base_) || (fd_ != kInvalidFD && base_)); |
| + return fd_ == kInvalidFD; |
| + } |
| + |
| + void CloseAshmemRegion() { |
| + lock_.AssertAcquired(); |
| + DCHECK(fd_ != kInvalidFD && base_); |
| + const bool result = internal::DeleteAshmemRegion(fd_, size_, base_); |
| + DCHECK(result); |
| + fd_ = kInvalidFD; |
| + base_ = NULL; |
| + offset_ = 0U; |
| + last_allocated_chunk_ = NULL; |
| + } |
| + |
| + bool OpenAshmemRegion() { |
| + lock_.AssertAcquired(); |
| + DCHECK(fd_ == kInvalidFD && !base_ && !last_allocated_chunk_); |
| + int fd; |
| + void* address; |
| + if (!internal::CreateAshmemRegion(name_.c_str(), size_, &fd, &address)) |
| + return false; |
| + fd_ = fd; |
| + base_ = address; |
| + return true; |
| + } |
| + |
| + base::ThreadChecker thread_checker_; |
| + base::Lock lock_; // Protects the state below. |
| + int fd_; |
| + void* base_; |
| + const size_t size_; |
| + size_t offset_; |
| + const std::string name_; |
| + void* last_allocated_chunk_; |
| + std::multiset<FreeChunk> free_chunks_; |
| + base::hash_map< |
| + uintptr_t, std::multiset<FreeChunk>::iterator> free_chunk_for_address_; |
| + |
| + DISALLOW_COPY_AND_ASSIGN(AshmemRegion); |
| +}; |
| + |
| +class DiscardableMemoryAllocatorAndroid : public DiscardableMemoryAllocator { |
| + public: |
| + DiscardableMemoryAllocatorAndroid(const std::string& name) |
| + : name_(name), |
| + force_use_allocator_(false) { |
| + } |
| + |
| + // Constructor used for testing. |force_use_allocator_| specifies whether the |
| + // allocator should unconditionally be used (i.e. not only when a certain |
| + // amount of open ashmem file descriptors was reached). This is used for |
| + // testing to make sure that the unit tests don't only exerce the fast path |
| + // that doesn't use the allocator. |
| + DiscardableMemoryAllocatorAndroid(const std::string& name, |
| + bool force_use_allocator) |
| + : name_(name), |
| + force_use_allocator_(force_use_allocator) { |
| + } |
| + |
| + virtual ~DiscardableMemoryAllocatorAndroid() { |
| + DCHECK(thread_checker_.CalledOnValidThread()); |
| + } |
| + |
| + // DiscardableMemoryAllocator: |
| + virtual scoped_ptr<DiscardableMemory> Allocate(size_t size) OVERRIDE { |
| + // Use the actual allocator only once we past a certain number of open |
|
pasko
2013/10/22 20:14:29
I find this comment a bit confusing. It requires s
Philippe
2013/10/23 11:46:56
Thanks. This part is now in discardable_memory_and
|
| + // ashmem file descriptors (=90% of the ashmem fd limit). Not using the |
| + // allocator allows us to immediately release the pages backing allocated |
| + // ashmem regions (by closing the ashmem fd) when the client requests to |
| + // delete them as opposed to only unlocking (=unpinning) them as the |
| + // allocator does. Only unlocking them means that they will only be released |
| + // under memory pressure which could by itself cause memory pressure. |
| + const bool use_allocator = force_use_allocator_ || |
| + internal::GetCurrentNumberOfAshmemFDs() > |
| + (0.9 * internal::GetAshmemFDLimit()); |
| + if (!use_allocator) |
| + return DiscardableMemory::CreateLockedMemory(size); |
| + const size_t aligned_size = internal::PageAlign(size); |
| + base::AutoLock auto_lock(lock_); |
| + for (ScopedVector<AshmemRegion>::iterator it = ashmem_regions_.begin(); |
| + it != ashmem_regions_.end(); ++it) { |
| + scoped_ptr<DiscardableMemory> memory((*it)->Allocate(size, aligned_size)); |
| + if (memory) |
| + return memory.Pass(); |
| + } |
| + ashmem_regions_.push_back( |
| + new AshmemRegion( |
| + std::max(kDefaultAshmemRegionSize, aligned_size), name_.c_str())); |
|
pasko
2013/10/22 20:14:29
The "Default" part in the name is confusing, shoul
Philippe
2013/10/23 11:46:56
Done.
|
| + return ashmem_regions_.back()->Allocate(size, aligned_size); |
| + } |
| + |
| + private: |
| + base::ThreadChecker thread_checker_; |
| + const std::string name_; |
| + const bool force_use_allocator_; // Used for testing. |
| + base::Lock lock_; // Protects the state below. |
| + ScopedVector<AshmemRegion> ashmem_regions_; |
| +}; |
| + |
| +} // namespace |
| + |
| +scoped_ptr<DiscardableMemoryAllocator> DiscardableMemoryAllocator::Create( |
| + const std::string& name) { |
| + return scoped_ptr<DiscardableMemoryAllocator>( |
| + new DiscardableMemoryAllocatorAndroid(name)); |
| +} |
| + |
| +scoped_ptr<DiscardableMemoryAllocator> |
| +CreateDiscardableMemoryAllocatorForTesting(const std::string& name) { |
| + const bool force_use_allocator = true; |
| + return scoped_ptr<DiscardableMemoryAllocator>( |
| + new DiscardableMemoryAllocatorAndroid(name, force_use_allocator)); |
| +} |
| + |
| +} // namespace base |