| Index: base/memory/discardable_memory_allocator_android.cc
|
| diff --git a/base/memory/discardable_memory_allocator_android.cc b/base/memory/discardable_memory_allocator_android.cc
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..a212f2b853f836499f2e58bba1e09a737330b133
|
| --- /dev/null
|
| +++ b/base/memory/discardable_memory_allocator_android.cc
|
| @@ -0,0 +1,284 @@
|
| +// Copyright (c) 2013 The Chromium Authors. All rights reserved.
|
| +// Use of this source code is governed by a BSD-style license that can be
|
| +// found in the LICENSE file.
|
| +
|
| +#include "base/memory/discardable_memory_allocator.h"
|
| +
|
| +#include <sys/mman.h>
|
| +#include <unistd.h>
|
| +
|
| +#include <cmath>
|
| +#include <set>
|
| +#include <utility>
|
| +
|
| +#include "base/basictypes.h"
|
| +#include "base/compiler_specific.h"
|
| +#include "base/containers/hash_tables.h"
|
| +#include "base/logging.h"
|
| +#include "base/memory/discardable_memory.h"
|
| +#include "base/memory/discardable_memory_android.h"
|
| +#include "base/memory/linked_ptr.h"
|
| +#include "base/memory/scoped_vector.h"
|
| +#include "base/strings/stringprintf.h"
|
| +#include "base/synchronization/lock.h"
|
| +#include "base/threading/platform_thread.h"
|
| +#include "base/threading/thread_checker.h"
|
| +
|
| +namespace base {
|
| +namespace {
|
| +
|
| +const size_t kDefaultAshmemRegionSize = 32 * 1024 * 1024;
|
| +
|
| +size_t PageAlign(size_t size, size_t page_size) {
|
| + const size_t mask = ~(page_size - 1);
|
| + return (size + page_size - 1) & mask;
|
| +}
|
| +
|
| +class DiscardableMemoryChunk : public DiscardableMemory {
|
| + public:
|
| + struct DeletionObserver {
|
| + virtual void OnChunkDeletion(void* addr, size_t size, bool locked) = 0;
|
| + };
|
| +
|
| + DiscardableMemoryChunk(DeletionObserver* deletion_observer,
|
| + int fd,
|
| + void* address,
|
| + size_t offset,
|
| + size_t client_requested_size,
|
| + size_t actual_size)
|
| + : DiscardableMemory(client_requested_size),
|
| + deletion_observer_(deletion_observer),
|
| + fd_(fd),
|
| + address_(address),
|
| + offset_(offset),
|
| + size_(actual_size),
|
| + locked_(true) {
|
| + }
|
| +
|
| + virtual ~DiscardableMemoryChunk() {
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| + if (locked_)
|
| + Unlock();
|
| + deletion_observer_->OnChunkDeletion(address_, size_, locked_);
|
| + }
|
| +
|
| + // DiscardableMemory:
|
| + virtual LockDiscardableMemoryStatus Lock() OVERRIDE {
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| + locked_ = true;
|
| + return internal::LockAshmemRegion(fd_, offset_, size_, address_);
|
| + }
|
| +
|
| + virtual void Unlock() OVERRIDE {
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| + locked_ = false;
|
| + internal::UnlockAshmemRegion(fd_, offset_, size_, address_);
|
| + }
|
| +
|
| + virtual void* Memory() const OVERRIDE {
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| + DCHECK(locked_);
|
| + return address_;
|
| + }
|
| +
|
| + private:
|
| + base::ThreadChecker thread_checker_;
|
| + DeletionObserver* const deletion_observer_;
|
| + const int fd_;
|
| + void* const address_;
|
| + const size_t offset_;
|
| + const size_t size_ : sizeof(size_t) * 8 - 1;
|
| + bool locked_ : 1;
|
| +
|
| + DISALLOW_COPY_AND_ASSIGN(DiscardableMemoryChunk);
|
| +};
|
| +
|
| +class AshmemRegion : public DiscardableMemoryChunk::DeletionObserver {
|
| + public:
|
| + ~AshmemRegion() {
|
| + const bool result = internal::DeleteAshmemRegion(fd_, size_, base_);
|
| + DCHECK(result);
|
| + }
|
| +
|
| + // DiscardableMemoryChunk::DeletionObserver:
|
| + virtual void OnChunkDeletion(void* addr, size_t size, bool locked) OVERRIDE {
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| + free_chunks_.insert(FreeChunk(addr, size, locked));
|
| + // Keep the chunk mapped but let the kernel know that it can immediately
|
| + // release the underlying physical pages.
|
| + const int result = madvise(addr, size, MADV_DONTNEED);
|
| + DCHECK_NE(-1, result);
|
| + }
|
| +
|
| + static scoped_ptr<AshmemRegion> Create(size_t size, const char* name) {
|
| + int fd;
|
| + void* address;
|
| + if (!internal::CreateAshmemRegion(name, size, &fd, &address))
|
| + return scoped_ptr<AshmemRegion>();
|
| + return make_scoped_ptr(new AshmemRegion(fd, address, size));
|
| + }
|
| +
|
| + scoped_ptr<DiscardableMemory> Allocate(size_t client_requested_size,
|
| + size_t actual_size) {
|
| + const std::multiset<FreeChunk>::iterator chunk_it =
|
| + free_chunks_.lower_bound(FreeChunk(NULL, actual_size, false));
|
| + if (chunk_it != free_chunks_.end()) {
|
| + const int result = madvise(chunk_it->start, chunk_it->size, MADV_NORMAL);
|
| + DCHECK_NE(-1, result);
|
| + const size_t offset =
|
| + static_cast<char*>(chunk_it->start) - static_cast<char*>(base_);
|
| + if (!chunk_it->locked) {
|
| + // Lock the chunk being recycled if it was left in an unlocked state.
|
| + internal::LockAshmemRegion(
|
| + fd_, offset, chunk_it->size, chunk_it->start);
|
| + }
|
| + scoped_ptr<DiscardableMemory> memory(
|
| + new DiscardableMemoryChunk(this, fd_, chunk_it->start, offset,
|
| + client_requested_size, actual_size));
|
| + free_chunks_.erase(chunk_it);
|
| + return memory.Pass();
|
| + }
|
| + if (size_ - offset_ < actual_size) {
|
| + // This region does not have enough space left to hold the requested size.
|
| + return scoped_ptr<DiscardableMemory>();
|
| + }
|
| + void* const address = static_cast<char*>(base_) + offset_;
|
| + scoped_ptr<DiscardableMemory> memory(
|
| + new DiscardableMemoryChunk(
|
| + this, fd_, address, offset_, client_requested_size, actual_size));
|
| + offset_ += actual_size;
|
| + return memory.Pass();
|
| + }
|
| +
|
| + private:
|
| + struct FreeChunk {
|
| + FreeChunk(void* start, size_t size, bool locked)
|
| + : start(start),
|
| + size(size),
|
| + locked(locked) {
|
| + }
|
| +
|
| + void* const start;
|
| + const size_t size : sizeof(size_t) * 8 - 1;
|
| + const bool locked : 1;
|
| +
|
| + bool operator<(const FreeChunk& other) const {
|
| + return size < other.size;
|
| + }
|
| + };
|
| +
|
| + AshmemRegion(int fd, void* base, size_t size)
|
| + : fd_(fd),
|
| + base_(base),
|
| + size_(size),
|
| + offset_(0) {
|
| + }
|
| +
|
| + const int fd_;
|
| + void* const base_;
|
| + const size_t size_;
|
| + size_t offset_;
|
| + std::multiset<FreeChunk> free_chunks_;
|
| + base::ThreadChecker thread_checker_;
|
| +
|
| + DISALLOW_COPY_AND_ASSIGN(AshmemRegion);
|
| +};
|
| +
|
| +class DiscardableMemoryAllocatorAndroid : public DiscardableMemoryAllocator {
|
| + public:
|
| + DiscardableMemoryAllocatorAndroid(const std::string& name)
|
| + : page_size_(getpagesize()),
|
| + name_(name) {
|
| + }
|
| +
|
| + virtual ~DiscardableMemoryAllocatorAndroid() {
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| + }
|
| +
|
| + // DiscardableMemoryAllocator:
|
| + virtual scoped_ptr<DiscardableMemory> Allocate(size_t size) OVERRIDE {
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| + const size_t aligned_size = PageAlign(size, page_size_);
|
| + for (ScopedVector<AshmemRegion>::iterator it = ashmem_regions_.begin();
|
| + it != ashmem_regions_.end(); ++it) {
|
| + scoped_ptr<DiscardableMemory> memory((*it)->Allocate(size, aligned_size));
|
| + if (memory)
|
| + return memory.Pass();
|
| + }
|
| + scoped_ptr<AshmemRegion> ashmem_region = AshmemRegion::Create(
|
| + std::max(kDefaultAshmemRegionSize, aligned_size), name_.c_str());
|
| + if (!ashmem_region)
|
| + return scoped_ptr<DiscardableMemory>();
|
| +
|
| + ashmem_regions_.push_back(ashmem_region.release());
|
| + return ashmem_regions_.back()->Allocate(size, aligned_size);
|
| + }
|
| +
|
| + private:
|
| + const size_t page_size_;
|
| + const std::string name_;
|
| + ScopedVector<AshmemRegion> ashmem_regions_;
|
| + base::ThreadChecker thread_checker_;
|
| +};
|
| +
|
| +// Stores per-thread allocator instances and dispatches allocations to them.
|
| +// Note that not sharing free chunks across threads doesn't increase the overall
|
| +// memory footprint since free chunks are not committed.
|
| +class ThreadSafeAllocatorWrapper : public DiscardableMemoryAllocator {
|
| + public:
|
| + ThreadSafeAllocatorWrapper(const std::string& name) : name_(name) {}
|
| +
|
| + // DiscardableMemoryAllocator:
|
| + virtual scoped_ptr<DiscardableMemory> Allocate(size_t size) OVERRIDE {
|
| + const base::PlatformThreadId thread_id = base::PlatformThread::CurrentId();
|
| + DiscardableMemoryAllocatorAndroid* thread_allocator = NULL;
|
| + {
|
| + const base::AutoLock auto_lock(lock_);
|
| + const std::pair<
|
| + base::hash_map<
|
| + base::PlatformThreadId,
|
| + linked_ptr<DiscardableMemoryAllocatorAndroid> >::iterator,
|
| + bool> result =
|
| + per_thread_allocators_.insert(
|
| + std::make_pair(
|
| + thread_id,
|
| + linked_ptr<DiscardableMemoryAllocatorAndroid>(NULL)));
|
| + const bool did_insert = result.second;
|
| + if (did_insert) {
|
| + result.first->second.reset(
|
| + new DiscardableMemoryAllocatorAndroid(
|
| + base::StringPrintf("%s-Thread-%d", name_.c_str(), thread_id)));
|
| + }
|
| + thread_allocator = result.first->second.get();
|
| + }
|
| + return thread_allocator->Allocate(size);
|
| + }
|
| +
|
| + private:
|
| + const std::string name_;
|
| + // Protects the hash_map below.
|
| + base::Lock lock_;
|
| + // Stores one allocator per thread.
|
| + base::hash_map<
|
| + base::PlatformThreadId,
|
| + linked_ptr<DiscardableMemoryAllocatorAndroid> > per_thread_allocators_;
|
| +
|
| + DISALLOW_COPY_AND_ASSIGN(ThreadSafeAllocatorWrapper);
|
| +};
|
| +
|
| +} // namespace
|
| +
|
| +scoped_ptr<DiscardableMemoryAllocator> DiscardableMemoryAllocator::Create(
|
| + const std::string& name) {
|
| + return scoped_ptr<DiscardableMemoryAllocator>(
|
| + new DiscardableMemoryAllocatorAndroid(name));
|
| +}
|
| +
|
| +scoped_ptr<DiscardableMemoryAllocator>
|
| +DiscardableMemoryAllocator::CreateThreadSafeInstance(
|
| + const std::string& name) {
|
| + return scoped_ptr<DiscardableMemoryAllocator>(
|
| + new ThreadSafeAllocatorWrapper(name));
|
| +}
|
| +
|
| +} // namespace base
|
|
|