| Index: base/memory/discardable_memory_allocator_android.cc
|
| diff --git a/base/memory/discardable_memory_allocator_android.cc b/base/memory/discardable_memory_allocator_android.cc
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..805dfa3836102678ca49baef16d5c40c94a1b494
|
| --- /dev/null
|
| +++ b/base/memory/discardable_memory_allocator_android.cc
|
| @@ -0,0 +1,369 @@
|
| +// Copyright 2013 The Chromium Authors. All rights reserved.
|
| +// Use of this source code is governed by a BSD-style license that can be
|
| +// found in the LICENSE file.
|
| +
|
| +#include "base/memory/discardable_memory_allocator_android.h"
|
| +
|
| +#include <cmath>
|
| +#include <set>
|
| +#include <utility>
|
| +
|
| +#include "base/basictypes.h"
|
| +#include "base/compiler_specific.h"
|
| +#include "base/containers/hash_tables.h"
|
| +#include "base/logging.h"
|
| +#include "base/memory/discardable_memory.h"
|
| +#include "base/memory/discardable_memory_android.h"
|
| +#include "base/memory/scoped_vector.h"
|
| +#include "base/strings/stringprintf.h"
|
| +#include "base/synchronization/lock.h"
|
| +#include "base/threading/thread_checker.h"
|
| +
|
| +// The allocator consists of three parts (classes):
|
| +// - DiscardableMemoryAllocator: entry point of all allocations (through its
|
| +// Allocate() method) that are dispatched to the AshmemRegion instances (which
|
| +// it owns).
|
| +// - AshmemRegion: manages allocations and destructions inside a single large
|
| +// (e.g. 32 MBytes) ashmem region.
|
| +// - DiscardableAshmemChunk: class implementing the DiscardableMemory interface
|
| +// whose instances are returned to the client. DiscardableAshmemChunk lets the
|
| +// client seamlessly operate on a subrange of the ashmem region managed by
|
| +// AshmemRegion.
|
| +
|
| +namespace base {
|
| +namespace {
|
| +
|
| +const int kInvalidFD = -1;
|
| +
|
| +// Allow 8 KBytes of fragmentation inside used chunks.
|
| +const size_t kMaxChunkFragmentationBytes = 8192;
|
| +
|
| +class DiscardableAshmemChunk : public DiscardableMemory {
|
| + public:
|
| + struct DeletionObserver {
|
| + virtual void OnChunkDeletion(int fd,
|
| + void* previous_chunk,
|
| + void* chunk,
|
| + size_t size) = 0;
|
| +
|
| + protected:
|
| + virtual ~DeletionObserver() {}
|
| + };
|
| +
|
| + DiscardableAshmemChunk(DeletionObserver* deletion_observer,
|
| + int fd,
|
| + void* previous_chunk,
|
| + void* address,
|
| + size_t offset,
|
| + size_t size)
|
| + : deletion_observer_(deletion_observer),
|
| + fd_(fd),
|
| + previous_chunk_(previous_chunk),
|
| + address_(address),
|
| + offset_(offset),
|
| + size_(size),
|
| + locked_(true) {
|
| + }
|
| +
|
| + virtual ~DiscardableAshmemChunk() {
|
| + if (locked_)
|
| + internal::UnlockAshmemRegion(fd_, offset_, size_, address_);
|
| + deletion_observer_->OnChunkDeletion(fd_, previous_chunk_, address_, size_);
|
| + }
|
| +
|
| + // DiscardableMemory:
|
| + virtual size_t Size() const OVERRIDE {
|
| + return size_;
|
| + }
|
| +
|
| + virtual LockDiscardableMemoryStatus Lock() OVERRIDE {
|
| + DCHECK(!locked_);
|
| + locked_ = true;
|
| + return internal::LockAshmemRegion(fd_, offset_, size_, address_);
|
| + }
|
| +
|
| + virtual void Unlock() OVERRIDE {
|
| + DCHECK(locked_);
|
| + locked_ = false;
|
| + internal::UnlockAshmemRegion(fd_, offset_, size_, address_);
|
| + }
|
| +
|
| + virtual void* Memory() const OVERRIDE {
|
| + return address_;
|
| + }
|
| +
|
| + private:
|
| + DeletionObserver* const deletion_observer_;
|
| + const int fd_;
|
| + void* const previous_chunk_;
|
| + void* const address_;
|
| + const size_t offset_;
|
| + const size_t size_;
|
| + bool locked_;
|
| +
|
| + DISALLOW_COPY_AND_ASSIGN(DiscardableAshmemChunk);
|
| +};
|
| +
|
| +} // namespace
|
| +
|
| +namespace internal {
|
| +
|
| +class DiscardableMemoryAllocator::AshmemRegion
|
| + : public DiscardableAshmemChunk::DeletionObserver {
|
| + public:
|
| + AshmemRegion(size_t size, const std::string& name)
|
| + : fd_(kInvalidFD),
|
| + base_(NULL),
|
| + size_(size),
|
| + offset_(0),
|
| + name_(name),
|
| + last_allocated_chunk_(NULL) {
|
| + }
|
| +
|
| + virtual ~AshmemRegion() {
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| + base::AutoLock auto_lock(lock_);
|
| + if (!AshmemRegionClosed())
|
| + CloseAshmemRegion();
|
| + }
|
| +
|
| + scoped_ptr<DiscardableMemory> Allocate(size_t client_requested_size,
|
| + size_t actual_size) {
|
| + base::AutoLock auto_lock(lock_);
|
| + if (AshmemRegionClosed() && !OpenAshmemRegion())
|
| + return scoped_ptr<DiscardableMemory>();
|
| + scoped_ptr<DiscardableMemory> memory = RecycleFreeChunk(
|
| + client_requested_size, actual_size);
|
| + if (memory)
|
| + return memory.Pass();
|
| + if (size_ - offset_ < actual_size) {
|
| + // This region does not have enough space left to hold the requested size.
|
| + return scoped_ptr<DiscardableMemory>();
|
| + }
|
| + void* const address = static_cast<char*>(base_) + offset_;
|
| + memory.reset(
|
| + new DiscardableAshmemChunk(this, fd_, last_allocated_chunk_, address,
|
| + offset_, actual_size));
|
| + last_allocated_chunk_ = address;
|
| + offset_ += actual_size;
|
| + return memory.Pass();
|
| + }
|
| +
|
| + private:
|
| + struct FreeChunk {
|
| + FreeChunk(void* previous_chunk, void* start, size_t size)
|
| + : previous_chunk(previous_chunk),
|
| + start(start),
|
| + size(size) {
|
| + }
|
| +
|
| + void* const previous_chunk;
|
| + void* const start;
|
| + const size_t size;
|
| +
|
| + bool is_null() const { return !start; }
|
| +
|
| + bool operator<(const FreeChunk& other) const {
|
| + return size < other.size;
|
| + }
|
| + };
|
| +
|
| + // DiscardableAshmemChunk::DeletionObserver:
|
| + virtual void OnChunkDeletion(int fd,
|
| + void* previous_chunk,
|
| + void* chunk,
|
| + size_t size) OVERRIDE {
|
| + base::AutoLock auto_lock(lock_);
|
| + AddFreeChunk(fd, previous_chunk, chunk, size,
|
| + MERGE_PREVIOUS_CHUNKS | MERGE_NEXT_CHUNKS);
|
| + }
|
| +
|
| + // Tries to reuse a previously freed chunk by doing a closest size match.
|
| + scoped_ptr<DiscardableMemory> RecycleFreeChunk(size_t client_requested_size,
|
| + size_t actual_size) {
|
| + lock_.AssertAcquired();
|
| + const std::multiset<FreeChunk>::iterator chunk_it =
|
| + free_chunks_.lower_bound(FreeChunk(NULL, NULL, actual_size));
|
| + if (chunk_it == free_chunks_.end())
|
| + return scoped_ptr<DiscardableMemory>();
|
| + size_t recycled_chunk_size = chunk_it->size;
|
| + const size_t fragmentation_bytes = chunk_it->size - client_requested_size;
|
| + if (fragmentation_bytes >= kMaxChunkFragmentationBytes) {
|
| + // Split the free chunk being recycled if it's too large so that its
|
| + // unused tail doesn't get recycled (i.e. locked) which would prevent it
|
| + // from being evicted under memory pressure.
|
| + const int fd = -1;
|
| + void* const previous_chunk = chunk_it->start;
|
| + AddFreeChunk(
|
| + fd, previous_chunk, static_cast<char*>(chunk_it->start) + actual_size,
|
| + chunk_it->size - actual_size, MERGE_NEXT_CHUNKS);
|
| + recycled_chunk_size = actual_size;
|
| + }
|
| + const size_t offset =
|
| + static_cast<char*>(chunk_it->start) - static_cast<char*>(base_);
|
| + internal::LockAshmemRegion(
|
| + fd_, offset, recycled_chunk_size, chunk_it->start);
|
| + scoped_ptr<DiscardableMemory> memory(
|
| + new DiscardableAshmemChunk(
|
| + this, fd_, chunk_it->previous_chunk, chunk_it->start, offset,
|
| + recycled_chunk_size));
|
| + free_chunk_for_address_.erase(reinterpret_cast<uintptr_t>(chunk_it->start));
|
| + free_chunks_.erase(chunk_it);
|
| + return memory.Pass();
|
| + }
|
| +
|
| + enum ContiguousChunksMergingFlags {
|
| + MERGE_PREVIOUS_CHUNKS = 1,
|
| + MERGE_NEXT_CHUNKS = 2,
|
| + };
|
| +
|
| + // Makes the chunk identified with the provided arguments free and possibly
|
| + // merges this chunk with the previous and next contiguous ones according to
|
| + // the value of |chunk_merging_flags|.
|
| + // If the provided chunk is the only one used (and going to be freed) in the
|
| + // region then the internal ashmem region is closed so that the underlying
|
| + // physical pages are immediately released.
|
| + // Note that free chunks are unlocked therefore they can be reclaimed by the
|
| + // kernel if needed (under memory pressure) but they are not immediately
|
| + // released unfortunately since madvise(MADV_REMOVE) and
|
| + // fallocate(FALLOC_FL_PUNCH_HOLE) don't seem to work on ashmem. This might
|
| + // change in versions of kernel >=3.5 though. The fact that free chunks are
|
| + // not immediately released is the reason why we are trying to minimize
|
| + // fragmentation.
|
| + void AddFreeChunk(int fd,
|
| + void* previous_chunk,
|
| + void* chunk,
|
| + size_t size,
|
| + int chunk_merging_flags) {
|
| + lock_.AssertAcquired();
|
| + size_t new_free_chunk_size = size;
|
| + void* first_free_chunk = chunk;
|
| + if (chunk_merging_flags & MERGE_PREVIOUS_CHUNKS) {
|
| + while (previous_chunk) {
|
| + const FreeChunk free_chunk = UnlinkFreeChunk(previous_chunk);
|
| + if (free_chunk.is_null())
|
| + break;
|
| + new_free_chunk_size += free_chunk.size;
|
| + first_free_chunk = previous_chunk;
|
| + previous_chunk = free_chunk.previous_chunk;
|
| + }
|
| + }
|
| + const void* next_chunk = static_cast<const char*>(chunk) + size;
|
| + if (chunk_merging_flags & MERGE_NEXT_CHUNKS) {
|
| + while (true) {
|
| + const FreeChunk free_chunk = UnlinkFreeChunk(next_chunk);
|
| + if (free_chunk.is_null())
|
| + break;
|
| + new_free_chunk_size += free_chunk.size;
|
| + next_chunk = static_cast<const char*>(next_chunk) + free_chunk.size;
|
| + }
|
| + }
|
| + const bool whole_ashmem_region_is_free = new_free_chunk_size == size_;
|
| + if (!whole_ashmem_region_is_free) {
|
| + const std::multiset<FreeChunk>::iterator it = free_chunks_.insert(
|
| + FreeChunk(previous_chunk, first_free_chunk, new_free_chunk_size));
|
| + free_chunk_for_address_.insert(
|
| + std::make_pair(reinterpret_cast<uintptr_t>(first_free_chunk), it));
|
| + return;
|
| + }
|
| + // The whole ashmem region is free thus it can be closed. Note that deleting
|
| + // the instance and notifying the allocator would be cleaner (it would allow
|
| + // |fd_| and |base_| to be immutable in particular) but this would imply
|
| + // some non-trivial threading interactions since this method can be called
|
| + // on any thread and the allocator has its own lock.
|
| + DCHECK_EQ(size_, new_free_chunk_size);
|
| + DCHECK(free_chunks_.empty() && free_chunk_for_address_.empty());
|
| + DCHECK(!AshmemRegionClosed());
|
| + CloseAshmemRegion();
|
| + }
|
| +
|
| + // Finds and unlinks the free chunk, if any, whose start address is
|
| + // |chunk_start|. Returns a copy of the unlinked free chunk or a free chunk
|
| + // whose content is null if it was not found.
|
| + FreeChunk UnlinkFreeChunk(const void* chunk_start) {
|
| + lock_.AssertAcquired();
|
| + const base::hash_map<
|
| + uintptr_t, std::multiset<FreeChunk>::iterator>::iterator it =
|
| + free_chunk_for_address_.find(
|
| + reinterpret_cast<uintptr_t>(chunk_start));
|
| + if (it == free_chunk_for_address_.end())
|
| + return FreeChunk(NULL, NULL, 0U);
|
| + const std::multiset<FreeChunk>::iterator free_chunk_it = it->second;
|
| + const FreeChunk free_chunk(*free_chunk_it);
|
| + DCHECK_EQ(chunk_start, free_chunk.start);
|
| + free_chunk_for_address_.erase(it);
|
| + free_chunks_.erase(free_chunk_it);
|
| + return free_chunk;
|
| + }
|
| +
|
| + bool AshmemRegionClosed() const {
|
| + lock_.AssertAcquired();
|
| + DCHECK((fd_ == kInvalidFD && !base_) || (fd_ != kInvalidFD && base_));
|
| + return fd_ == kInvalidFD;
|
| + }
|
| +
|
| + void CloseAshmemRegion() {
|
| + lock_.AssertAcquired();
|
| + DCHECK(fd_ != kInvalidFD && base_);
|
| + const bool result = internal::DeleteAshmemRegion(fd_, size_, base_);
|
| + DCHECK(result);
|
| + fd_ = kInvalidFD;
|
| + base_ = NULL;
|
| + offset_ = 0U;
|
| + last_allocated_chunk_ = NULL;
|
| + }
|
| +
|
| + bool OpenAshmemRegion() {
|
| + lock_.AssertAcquired();
|
| + DCHECK(fd_ == kInvalidFD && !base_ && !last_allocated_chunk_);
|
| + int fd;
|
| + void* address;
|
| + if (!internal::CreateAshmemRegion(name_.c_str(), size_, &fd, &address))
|
| + return false;
|
| + fd_ = fd;
|
| + base_ = address;
|
| + return true;
|
| + }
|
| +
|
| + base::ThreadChecker thread_checker_;
|
| + base::Lock lock_; // Protects the state below.
|
| + int fd_;
|
| + void* base_;
|
| + const size_t size_;
|
| + size_t offset_;
|
| + const std::string name_;
|
| + void* last_allocated_chunk_;
|
| + std::multiset<FreeChunk> free_chunks_;
|
| + base::hash_map<
|
| + uintptr_t, std::multiset<FreeChunk>::iterator> free_chunk_for_address_;
|
| +
|
| + DISALLOW_COPY_AND_ASSIGN(AshmemRegion);
|
| +};
|
| +
|
| +DiscardableMemoryAllocator::DiscardableMemoryAllocator(const std::string& name)
|
| + : name_(name) {
|
| +}
|
| +
|
| +DiscardableMemoryAllocator::~DiscardableMemoryAllocator() {
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| +}
|
| +
|
| +scoped_ptr<DiscardableMemory> DiscardableMemoryAllocator::Allocate(
|
| + size_t size) {
|
| + const size_t aligned_size = internal::AlignToNextPage(size);
|
| + base::AutoLock auto_lock(lock_);
|
| + for (ScopedVector<AshmemRegion>::iterator it = ashmem_regions_.begin();
|
| + it != ashmem_regions_.end(); ++it) {
|
| + scoped_ptr<DiscardableMemory> memory((*it)->Allocate(size, aligned_size));
|
| + if (memory)
|
| + return memory.Pass();
|
| + }
|
| + ashmem_regions_.push_back(
|
| + new AshmemRegion(
|
| + std::max(static_cast<size_t>(kMinAshmemRegionSize), aligned_size),
|
| + name_.c_str()));
|
| + return ashmem_regions_.back()->Allocate(size, aligned_size);
|
| +}
|
| +
|
| +} // namespace internal
|
| +} // namespace base
|
|
|