Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(510)

Side by Side Diff: base/memory/discardable_memory_allocator_android.cc

Issue 25293002: Add DiscardableMemoryAllocator to work around FD limit issue. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Shorten critical section + add unit test Created 7 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/memory/discardable_memory_allocator.h"
6
7 #include <sys/mman.h>
8 #include <unistd.h>
9
10 #include <cmath>
11 #include <set>
12 #include <utility>
13
14 #include "base/basictypes.h"
15 #include "base/compiler_specific.h"
16 #include "base/containers/hash_tables.h"
17 #include "base/logging.h"
18 #include "base/memory/discardable_memory.h"
19 #include "base/memory/discardable_memory_android.h"
20 #include "base/memory/linked_ptr.h"
21 #include "base/memory/scoped_vector.h"
22 #include "base/strings/stringprintf.h"
23 #include "base/synchronization/lock.h"
24 #include "base/threading/platform_thread.h"
25 #include "base/threading/thread_checker.h"
26
27 namespace base {
28 namespace {
29
30 const size_t kDefaultAshmemRegionSize = 32 * 1024 * 1024;
31
32 size_t PageAlign(size_t size, size_t page_size) {
33 const size_t mask = ~(page_size - 1);
34 return (size + page_size - 1) & mask;
35 }
36
37 class DiscardableMemoryChunk : public DiscardableMemory {
38 public:
39 struct DeletionObserver {
40 virtual void OnChunkDeletion(void* addr, size_t size, bool locked) = 0;
41 };
42
43 DiscardableMemoryChunk(DeletionObserver* deletion_observer,
44 int fd,
45 void* address,
46 size_t offset,
47 size_t client_requested_size,
48 size_t actual_size)
49 : DiscardableMemory(client_requested_size),
50 deletion_observer_(deletion_observer),
51 fd_(fd),
52 address_(address),
53 offset_(offset),
54 size_(actual_size),
55 locked_(true) {
56 }
57
58 virtual ~DiscardableMemoryChunk() {
59 DCHECK(thread_checker_.CalledOnValidThread());
60 if (locked_)
61 Unlock();
62 deletion_observer_->OnChunkDeletion(address_, size_, locked_);
63 }
64
65 // DiscardableMemory:
66 virtual LockDiscardableMemoryStatus Lock() OVERRIDE {
67 DCHECK(thread_checker_.CalledOnValidThread());
68 locked_ = true;
69 return internal::LockAshmemRegion(fd_, offset_, size_, address_);
70 }
71
72 virtual void Unlock() OVERRIDE {
73 DCHECK(thread_checker_.CalledOnValidThread());
74 locked_ = false;
75 internal::UnlockAshmemRegion(fd_, offset_, size_, address_);
76 }
77
78 virtual void* Memory() const OVERRIDE {
79 DCHECK(thread_checker_.CalledOnValidThread());
80 DCHECK(locked_);
81 return address_;
82 }
83
84 private:
85 base::ThreadChecker thread_checker_;
86 DeletionObserver* const deletion_observer_;
87 const int fd_;
88 void* const address_;
89 const size_t offset_;
90 const size_t size_ : sizeof(size_t) * 8 - 1;
91 bool locked_ : 1;
92
93 DISALLOW_COPY_AND_ASSIGN(DiscardableMemoryChunk);
94 };
95
96 class AshmemRegion : public DiscardableMemoryChunk::DeletionObserver {
97 public:
98 ~AshmemRegion() {
99 const bool result = internal::DeleteAshmemRegion(fd_, size_, base_);
100 DCHECK(result);
101 }
102
103 // DiscardableMemoryChunk::DeletionObserver:
104 virtual void OnChunkDeletion(void* addr, size_t size, bool locked) OVERRIDE {
105 DCHECK(thread_checker_.CalledOnValidThread());
106 free_chunks_.insert(FreeChunk(addr, size, locked));
107 // Keep the chunk mapped but let the kernel know that it can immediately
108 // release the underlying physical pages.
109 const int result = madvise(addr, size, MADV_DONTNEED);
110 DCHECK_NE(-1, result);
111 }
112
113 static scoped_ptr<AshmemRegion> Create(size_t size, const char* name) {
114 int fd;
115 void* address;
116 if (!internal::CreateAshmemRegion(name, size, &fd, &address))
117 return scoped_ptr<AshmemRegion>();
118 return make_scoped_ptr(new AshmemRegion(fd, address, size));
119 }
120
121 scoped_ptr<DiscardableMemory> Allocate(size_t client_requested_size,
122 size_t actual_size) {
123 const std::multiset<FreeChunk>::iterator chunk_it =
124 free_chunks_.lower_bound(FreeChunk(NULL, actual_size, false));
125 if (chunk_it != free_chunks_.end()) {
126 const int result = madvise(chunk_it->start, chunk_it->size, MADV_NORMAL);
127 DCHECK_NE(-1, result);
128 const size_t offset =
129 static_cast<char*>(chunk_it->start) - static_cast<char*>(base_);
130 if (!chunk_it->locked) {
131 // Lock the chunk being recycled if it was left in an unlocked state.
132 internal::LockAshmemRegion(
133 fd_, offset, chunk_it->size, chunk_it->start);
134 }
135 scoped_ptr<DiscardableMemory> memory(
136 new DiscardableMemoryChunk(this, fd_, chunk_it->start, offset,
137 client_requested_size, actual_size));
138 free_chunks_.erase(chunk_it);
139 return memory.Pass();
140 }
141 if (size_ - offset_ < actual_size) {
142 // This region does not have enough space left to hold the requested size.
143 return scoped_ptr<DiscardableMemory>();
144 }
145 void* const address = static_cast<char*>(base_) + offset_;
146 scoped_ptr<DiscardableMemory> memory(
147 new DiscardableMemoryChunk(
148 this, fd_, address, offset_, client_requested_size, actual_size));
149 offset_ += actual_size;
150 return memory.Pass();
151 }
152
153 private:
154 struct FreeChunk {
155 FreeChunk(void* start, size_t size, bool locked)
156 : start(start),
157 size(size),
158 locked(locked) {
159 }
160
161 void* const start;
162 const size_t size : sizeof(size_t) * 8 - 1;
163 const bool locked : 1;
164
165 bool operator<(const FreeChunk& other) const {
166 return size < other.size;
167 }
168 };
169
170 AshmemRegion(int fd, void* base, size_t size)
171 : fd_(fd),
172 base_(base),
173 size_(size),
174 offset_(0) {
175 }
176
177 const int fd_;
178 void* const base_;
179 const size_t size_;
180 size_t offset_;
181 std::multiset<FreeChunk> free_chunks_;
182 base::ThreadChecker thread_checker_;
183
184 DISALLOW_COPY_AND_ASSIGN(AshmemRegion);
185 };
186
187 class DiscardableMemoryAllocatorAndroid : public DiscardableMemoryAllocator {
188 public:
189 DiscardableMemoryAllocatorAndroid(const std::string& name)
190 : page_size_(getpagesize()),
191 name_(name) {
192 }
193
194 virtual ~DiscardableMemoryAllocatorAndroid() {
195 DCHECK(thread_checker_.CalledOnValidThread());
196 }
197
198 // DiscardableMemoryAllocator:
199 virtual scoped_ptr<DiscardableMemory> Allocate(size_t size) OVERRIDE {
200 DCHECK(thread_checker_.CalledOnValidThread());
201 const size_t aligned_size = PageAlign(size, page_size_);
202 for (ScopedVector<AshmemRegion>::iterator it = ashmem_regions_.begin();
203 it != ashmem_regions_.end(); ++it) {
204 scoped_ptr<DiscardableMemory> memory((*it)->Allocate(size, aligned_size));
205 if (memory)
206 return memory.Pass();
207 }
208 scoped_ptr<AshmemRegion> ashmem_region = AshmemRegion::Create(
209 std::max(kDefaultAshmemRegionSize, aligned_size), name_.c_str());
210 if (!ashmem_region)
211 return scoped_ptr<DiscardableMemory>();
212
213 ashmem_regions_.push_back(ashmem_region.release());
214 return ashmem_regions_.back()->Allocate(size, aligned_size);
215 }
216
217 private:
218 const size_t page_size_;
219 const std::string name_;
220 ScopedVector<AshmemRegion> ashmem_regions_;
221 base::ThreadChecker thread_checker_;
222 };
223
224 // Stores per-thread allocator instances and dispatches allocations to them.
225 // Note that not sharing free chunks across threads doesn't increase the overall
226 // memory footprint since free chunks are not committed.
227 class ThreadSafeAllocatorWrapper : public DiscardableMemoryAllocator {
228 public:
229 ThreadSafeAllocatorWrapper(const std::string& name) : name_(name) {}
230
231 // DiscardableMemoryAllocator:
232 virtual scoped_ptr<DiscardableMemory> Allocate(size_t size) OVERRIDE {
233 const base::PlatformThreadId thread_id = base::PlatformThread::CurrentId();
234 DiscardableMemoryAllocatorAndroid* thread_allocator = NULL;
235 {
236 const base::AutoLock auto_lock(lock_);
237 const std::pair<
238 base::hash_map<
239 base::PlatformThreadId,
240 linked_ptr<DiscardableMemoryAllocatorAndroid> >::iterator,
241 bool> result =
242 per_thread_allocators_.insert(
243 std::make_pair(
244 thread_id,
245 linked_ptr<DiscardableMemoryAllocatorAndroid>(NULL)));
246 const bool did_insert = result.second;
247 if (did_insert) {
248 result.first->second.reset(
249 new DiscardableMemoryAllocatorAndroid(
250 base::StringPrintf("%s-Thread-%d", name_.c_str(), thread_id)));
251 }
252 thread_allocator = result.first->second.get();
253 }
254 return thread_allocator->Allocate(size);
255 }
256
257 private:
258 const std::string name_;
259 // Protects the hash_map below.
260 base::Lock lock_;
261 // Stores one allocator per thread.
262 base::hash_map<
263 base::PlatformThreadId,
264 linked_ptr<DiscardableMemoryAllocatorAndroid> > per_thread_allocators_;
265
266 DISALLOW_COPY_AND_ASSIGN(ThreadSafeAllocatorWrapper);
267 };
268
269 } // namespace
270
271 scoped_ptr<DiscardableMemoryAllocator> DiscardableMemoryAllocator::Create(
272 const std::string& name) {
273 return scoped_ptr<DiscardableMemoryAllocator>(
274 new DiscardableMemoryAllocatorAndroid(name));
275 }
276
277 scoped_ptr<DiscardableMemoryAllocator>
278 DiscardableMemoryAllocator::CreateThreadSafeInstance(
279 const std::string& name) {
280 return scoped_ptr<DiscardableMemoryAllocator>(
281 new ThreadSafeAllocatorWrapper(name));
282 }
283
284 } // namespace base
OLDNEW
« no previous file with comments | « base/memory/discardable_memory_allocator.cc ('k') | base/memory/discardable_memory_allocator_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698