Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(2)

Side by Side Diff: base/memory/discardable_memory_allocator_android.cc

Issue 25293002: Add DiscardableMemoryAllocator to work around FD limit issue. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Add merging comments Created 7 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/memory/discardable_memory_allocator_android.h"
6
7 #include <algorithm>
8 #include <cmath>
9 #include <set>
10 #include <utility>
11
12 #include "base/basictypes.h"
13 #include "base/bind.h"
14 #include "base/callback.h"
15 #include "base/compiler_specific.h"
16 #include "base/containers/hash_tables.h"
17 #include "base/logging.h"
18 #include "base/memory/discardable_memory.h"
19 #include "base/memory/discardable_memory_android.h"
20 #include "base/memory/scoped_vector.h"
21 #include "base/strings/stringprintf.h"
22 #include "base/synchronization/lock.h"
23 #include "base/threading/thread_checker.h"
24
25 // The allocator consists of three parts (classes):
26 // - DiscardableMemoryAllocator: entry point of all allocations (through its
27 // Allocate() method) that are dispatched to the AshmemRegion instances (which
28 // it owns).
29 // - AshmemRegion: manages allocations and destructions inside a single large
30 // (e.g. 32 MBytes) ashmem region.
31 // - DiscardableAshmemChunk: class implementing the DiscardableMemory interface
32 // whose instances are returned to the client. DiscardableAshmemChunk lets the
33 // client seamlessly operate on a subrange of the ashmem region managed by
34 // AshmemRegion.
35
36 namespace base {
37 namespace {
38
39 // Allow 8 KBytes of fragmentation inside used chunks.
40 const size_t kMaxChunkFragmentationBytes = 8192;
41
42 class DiscardableAshmemChunk : public DiscardableMemory {
43 public:
44 // Note that this is not replaced with base::Callback to save the extra heap
willchan no longer on Chromium 2013/11/18 14:53:13 My initial inclination is to call this premature o
Philippe 2013/11/18 16:34:51 I would also call that premature optimization if t
willchan no longer on Chromium 2013/11/19 02:19:43 Since this isn't in a public header, I don't mind
45 // allocation.
46 struct DeletionObserver {
willchan no longer on Chromium 2013/11/18 14:53:13 class?
Philippe 2013/11/18 16:34:51 Done.
47 virtual void OnChunkDeletion(void* previous_chunk,
48 void* chunk,
49 size_t size) = 0;
50
51 protected:
52 virtual ~DeletionObserver() {}
53 };
54
55 DiscardableAshmemChunk(DeletionObserver* deletion_observer,
willchan no longer on Chromium 2013/11/18 14:53:13 The lifetime of the deletion_observer needs to be
Philippe 2013/11/18 16:34:51 Yeah, done.
56 int fd,
57 void* previous_chunk,
58 void* address,
59 size_t offset,
60 size_t size)
61 : deletion_observer_(deletion_observer),
62 fd_(fd),
63 previous_chunk_(previous_chunk),
64 address_(address),
65 offset_(offset),
66 size_(size),
67 locked_(true) {
68 }
69
70 virtual ~DiscardableAshmemChunk() {
71 if (locked_)
72 internal::UnlockAshmemRegion(fd_, offset_, size_, address_);
73 deletion_observer_->OnChunkDeletion(previous_chunk_, address_, size_);
74 }
75
76 // DiscardableMemory:
77 virtual size_t Size() const OVERRIDE {
78 return size_;
79 }
80
81 virtual LockDiscardableMemoryStatus Lock() OVERRIDE {
82 DCHECK(!locked_);
83 locked_ = true;
84 return internal::LockAshmemRegion(fd_, offset_, size_, address_);
85 }
86
87 virtual void Unlock() OVERRIDE {
88 DCHECK(locked_);
89 locked_ = false;
90 internal::UnlockAshmemRegion(fd_, offset_, size_, address_);
91 }
92
93 virtual void* Memory() const OVERRIDE {
94 return address_;
95 }
96
97 private:
98 DeletionObserver* const deletion_observer_;
99 const int fd_;
100 void* const previous_chunk_;
101 void* const address_;
102 const size_t offset_;
103 const size_t size_;
104 bool locked_;
105
106 DISALLOW_COPY_AND_ASSIGN(DiscardableAshmemChunk);
107 };
108
109 } // namespace
110
111 namespace internal {
112
113 class DiscardableMemoryAllocator::AshmemRegion
114 : public DiscardableAshmemChunk::DeletionObserver {
115 public:
116 typedef Callback<void (scoped_ptr<AshmemRegion>)> DeletionCallback;
117
118 static scoped_ptr<AshmemRegion> Create(
119 size_t size,
120 const std::string& name,
121 Lock* lock,
122 const DeletionCallback& deletion_callback) {
123 int fd;
124 void* base;
125 if (!internal::CreateAshmemRegion(name.c_str(), size, &fd, &base))
126 return scoped_ptr<AshmemRegion>();
127 return make_scoped_ptr(
128 new AshmemRegion(fd, size, base, lock, deletion_callback));
129 }
130
131 virtual ~AshmemRegion() {
132 const bool result = internal::CloseAshmemRegion(fd_, size_, base_);
133 DCHECK(result);
134 }
135
136 scoped_ptr<DiscardableMemory> Allocate(size_t client_requested_size,
137 size_t actual_size) {
138 lock_->AssertAcquired();
139 scoped_ptr<DiscardableMemory> memory = RecycleFreeChunk(
140 client_requested_size, actual_size);
141 if (memory)
142 return memory.Pass();
143 if (size_ - offset_ < actual_size) {
144 // This region does not have enough space left to hold the requested size.
145 return scoped_ptr<DiscardableMemory>();
146 }
147 void* const address = static_cast<char*>(base_) + offset_;
148 memory.reset(
149 new DiscardableAshmemChunk(this, fd_, last_allocated_chunk_, address,
150 offset_, actual_size));
151 last_allocated_chunk_ = address;
152 offset_ += actual_size;
153 return memory.Pass();
154 }
155
156 private:
157 struct FreeChunk {
158 FreeChunk(void* previous_chunk, void* start, size_t size)
159 : previous_chunk(previous_chunk),
160 start(start),
161 size(size) {
162 }
163
164 void* const previous_chunk;
165 void* const start;
166 const size_t size;
167
168 bool is_null() const { return !start; }
169
170 bool operator<(const FreeChunk& other) const {
171 return size < other.size;
172 }
173 };
174
175 AshmemRegion(int fd,
176 size_t size,
177 void* base,
178 Lock* lock,
179 const DeletionCallback& deletion_callback)
180 : fd_(fd),
181 size_(size),
182 base_(base),
183 offset_(0),
184 lock_(lock),
185 deletion_callback_(deletion_callback),
186 last_allocated_chunk_(NULL) {
187 }
188
189 // DiscardableAshmemChunk::DeletionObserver:
190 virtual void OnChunkDeletion(void* previous_chunk,
191 void* chunk,
192 size_t size) OVERRIDE {
193 base::AutoLock auto_lock(*lock_);
194 MergeAndAddFreeChunk(previous_chunk, chunk, size);
195 }
196
197 // Tries to reuse a previously freed chunk by doing a closest size match.
198 scoped_ptr<DiscardableMemory> RecycleFreeChunk(size_t client_requested_size,
199 size_t actual_size) {
200 lock_->AssertAcquired();
201 const std::multiset<FreeChunk>::iterator chunk_it =
202 free_chunks_.lower_bound(FreeChunk(NULL, NULL, actual_size));
203 if (chunk_it == free_chunks_.end())
204 return scoped_ptr<DiscardableMemory>();
205 size_t recycled_chunk_size = chunk_it->size;
206 const size_t fragmentation_bytes = chunk_it->size - client_requested_size;
207 if (fragmentation_bytes >= kMaxChunkFragmentationBytes) {
208 // Split the free chunk being recycled if it's too large so that its
209 // unused tail doesn't get recycled (i.e. locked) which would prevent it
210 // from being evicted under memory pressure.
211 void* const previous_chunk = chunk_it->start;
212 void* const chunk_start =
213 static_cast<char*>(chunk_it->start) + actual_size;
214 const size_t chunk_size = chunk_it->size - actual_size;
215 // Note that merging is not needed here since there can't be contiguous
216 // free chunks at this point.
217 AddFreeChunk(FreeChunk(previous_chunk, chunk_start, chunk_size));
218 recycled_chunk_size = actual_size;
219 }
220 const size_t offset =
221 static_cast<char*>(chunk_it->start) - static_cast<char*>(base_);
222 internal::LockAshmemRegion(
223 fd_, offset, recycled_chunk_size, chunk_it->start);
224 scoped_ptr<DiscardableMemory> memory(
225 new DiscardableAshmemChunk(
226 this, fd_, chunk_it->previous_chunk, chunk_it->start, offset,
227 recycled_chunk_size));
228 free_chunk_for_address_.erase(reinterpret_cast<uintptr_t>(chunk_it->start));
229 free_chunks_.erase(chunk_it);
230 return memory.Pass();
231 }
232
233 // Makes the chunk identified with the provided arguments free and possibly
234 // merges this chunk with the previous and next contiguous ones according to
235 // the value of |chunk_merging_flags|.
236 // If the provided chunk is the only one used (and going to be freed) in the
237 // region then the internal ashmem region is closed so that the underlying
238 // physical pages are immediately released.
239 // Note that free chunks are unlocked therefore they can be reclaimed by the
240 // kernel if needed (under memory pressure) but they are not immediately
241 // released unfortunately since madvise(MADV_REMOVE) and
242 // fallocate(FALLOC_FL_PUNCH_HOLE) don't seem to work on ashmem. This might
243 // change in versions of kernel >=3.5 though. The fact that free chunks are
244 // not immediately released is the reason why we are trying to minimize
245 // fragmentation.
246 void MergeAndAddFreeChunk(void* previous_chunk, void* chunk, size_t size) {
247 lock_->AssertAcquired();
248 size_t new_free_chunk_size = size;
249 // Merge with the previous chunks.
250 void* first_free_chunk = chunk;
251 while (previous_chunk) {
252 const FreeChunk free_chunk = RemoveFreeChunk(previous_chunk);
253 if (free_chunk.is_null())
254 break;
255 new_free_chunk_size += free_chunk.size;
256 first_free_chunk = previous_chunk;
257 previous_chunk = free_chunk.previous_chunk;
258 }
259 // Merge with the next chunks.
260 const void* next_chunk = static_cast<const char*>(chunk) + size;
261 while (true) {
262 const FreeChunk free_chunk = RemoveFreeChunk(next_chunk);
263 if (free_chunk.is_null())
264 break;
265 new_free_chunk_size += free_chunk.size;
266 next_chunk = static_cast<const char*>(next_chunk) + free_chunk.size;
267 }
268 const bool whole_ashmem_region_is_free = new_free_chunk_size == size_;
269 if (!whole_ashmem_region_is_free) {
270 AddFreeChunk(
271 FreeChunk(previous_chunk, first_free_chunk, new_free_chunk_size));
272 return;
273 }
274 // The whole ashmem region is free thus it can be deleted.
275 DCHECK_EQ(size_, new_free_chunk_size);
276 DCHECK(free_chunks_.empty() && free_chunk_for_address_.empty());
277 deletion_callback_.Run(make_scoped_ptr(this)); // Deletes |this|.
278 }
279
280 void AddFreeChunk(const FreeChunk& free_chunk) {
281 const std::multiset<FreeChunk>::iterator it = free_chunks_.insert(
282 free_chunk);
283 free_chunk_for_address_.insert(
284 std::make_pair(reinterpret_cast<uintptr_t>(free_chunk.start), it));
285 }
286
287 // Finds and removes the free chunk, if any, whose start address is
288 // |chunk_start|. Returns a copy of the unlinked free chunk or a free chunk
289 // whose content is null if it was not found.
290 FreeChunk RemoveFreeChunk(const void* chunk_start) {
291 lock_->AssertAcquired();
292 const base::hash_map<
293 uintptr_t, std::multiset<FreeChunk>::iterator>::iterator it =
294 free_chunk_for_address_.find(
295 reinterpret_cast<uintptr_t>(chunk_start));
296 if (it == free_chunk_for_address_.end())
297 return FreeChunk(NULL, NULL, 0U);
298 const std::multiset<FreeChunk>::iterator free_chunk_it = it->second;
299 const FreeChunk free_chunk(*free_chunk_it);
300 DCHECK_EQ(chunk_start, free_chunk.start);
301 free_chunk_for_address_.erase(it);
302 free_chunks_.erase(free_chunk_it);
303 return free_chunk;
304 }
305
306 const int fd_;
307 const size_t size_;
308 void* const base_;
309 size_t offset_;
310 base::Lock* const lock_;
311 const DeletionCallback deletion_callback_;
312 void* last_allocated_chunk_;
313 std::multiset<FreeChunk> free_chunks_;
314 base::hash_map<
315 uintptr_t, std::multiset<FreeChunk>::iterator> free_chunk_for_address_;
316
317 DISALLOW_COPY_AND_ASSIGN(AshmemRegion);
318 };
319
320 DiscardableMemoryAllocator::DiscardableMemoryAllocator(const std::string& name)
321 : name_(name) {
322 }
323
324 DiscardableMemoryAllocator::~DiscardableMemoryAllocator() {
325 DCHECK(thread_checker_.CalledOnValidThread());
326 }
327
328 scoped_ptr<DiscardableMemory> DiscardableMemoryAllocator::Allocate(
329 size_t size) {
330 const size_t aligned_size = internal::AlignToNextPage(size);
331 base::AutoLock auto_lock(lock_);
332 for (ScopedVector<AshmemRegion>::iterator it = ashmem_regions_.begin();
333 it != ashmem_regions_.end(); ++it) {
334 scoped_ptr<DiscardableMemory> memory((*it)->Allocate(size, aligned_size));
335 if (memory)
336 return memory.Pass();
337 }
338 scoped_ptr<AshmemRegion> new_region(
339 AshmemRegion::Create(
340 std::max(static_cast<size_t>(kMinAshmemRegionSize), aligned_size),
341 name_.c_str(),
342 &lock_,
343 base::Bind(&DiscardableMemoryAllocator::DeleteAshmemRegion,
344 base::Unretained(this))));
345 if (!new_region) {
346 // TODO(pliard): consider adding an histogram to see how often this happens.
347 return scoped_ptr<DiscardableMemory>();
348 }
349 ashmem_regions_.push_back(new_region.release());
350 return ashmem_regions_.back()->Allocate(size, aligned_size);
351 }
352
353 void DiscardableMemoryAllocator::DeleteAshmemRegion(
354 scoped_ptr<AshmemRegion> region) {
355 lock_.AssertAcquired();
356 // Note that there should not be more than a couple of ashmem region instances
357 // in |ashmem_regions_|.
358 const ScopedVector<AshmemRegion>::iterator it = std::find(
359 ashmem_regions_.begin(), ashmem_regions_.end(), region.get());
360 DCHECK_NE(ashmem_regions_.end(), it);
361 std::swap(*it, ashmem_regions_.back());
362 ashmem_regions_.resize(ashmem_regions_.size() - 1);
363 // |region| was deleted by the resize() above.
364 ignore_result(region.release());
365 }
366
367 } // namespace internal
368 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698