Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(181)

Side by Side Diff: base/memory/discardable_memory_allocator_android.cc

Issue 25293002: Add DiscardableMemoryAllocator to work around FD limit issue. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Rebase Created 7 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/memory/discardable_memory_allocator_android.h"
6
7 #include <algorithm>
8 #include <cmath>
9 #include <set>
10 #include <utility>
11
12 #include "base/basictypes.h"
13 #include "base/containers/hash_tables.h"
14 #include "base/logging.h"
15 #include "base/memory/discardable_memory.h"
16 #include "base/memory/discardable_memory_android.h"
17 #include "base/memory/scoped_vector.h"
18 #include "base/synchronization/lock.h"
19 #include "base/threading/thread_checker.h"
20
21 // The allocator consists of three parts (classes):
22 // - DiscardableMemoryAllocator: entry point of all allocations (through its
23 // Allocate() method) that are dispatched to the AshmemRegion instances (which
24 // it owns).
25 // - AshmemRegion: manages allocations and destructions inside a single large
26 // (e.g. 32 MBytes) ashmem region.
27 // - DiscardableAshmemChunk: class implementing the DiscardableMemory interface
28 // whose instances are returned to the client. DiscardableAshmemChunk lets the
29 // client seamlessly operate on a subrange of the ashmem region managed by
30 // AshmemRegion.
31
32 namespace base {
33 namespace {
34
35 // Only tolerate fragmentation in used chunks *caused by the client* (as opposed
36 // to the allocator when a free chunk is reused). The client can cause such
37 // fragmentation by e.g. requesting 4097 bytes. This size would be rounded up to
38 // 8192 by the allocator which would cause 4095 bytes of fragmentation (which is
39 // currently the maximum allowed). If the client requests 4096 bytes and a free
40 // chunk of 8192 bytes is available then the free chunk gets splitted into two
41 // pieces to minimize fragmentation (since 8192 - 4096 = 4096 which is greater
42 // than 4095).
43 // TODO(pliard): tune this if splitting chunks too often leads to performance
44 // issues.
45 const size_t kMaxChunkFragmentationBytes = 4096 - 1;
46
47 } // namespace
48
49 namespace internal {
50
51 class DiscardableMemoryAllocator::DiscardableAshmemChunk
52 : public DiscardableMemory {
53 public:
54 // Note that |ashmem_region| must outlive |this|.
55 DiscardableAshmemChunk(AshmemRegion* ashmem_region,
56 int fd,
57 void* address,
58 size_t offset,
59 size_t size)
60 : ashmem_region_(ashmem_region),
61 fd_(fd),
62 address_(address),
63 offset_(offset),
64 size_(size),
65 locked_(true) {
66 }
67
68 // Implemented below AshmemRegion since this requires the full definition of
69 // AshmemRegion.
70 virtual ~DiscardableAshmemChunk();
71
72 // DiscardableMemory:
73 virtual LockDiscardableMemoryStatus Lock() OVERRIDE {
74 DCHECK(!locked_);
75 locked_ = true;
76 return internal::LockAshmemRegion(fd_, offset_, size_, address_);
77 }
78
79 virtual void Unlock() OVERRIDE {
80 DCHECK(locked_);
81 locked_ = false;
82 internal::UnlockAshmemRegion(fd_, offset_, size_, address_);
83 }
84
85 virtual void* Memory() const OVERRIDE {
86 return address_;
87 }
88
89 private:
90 AshmemRegion* const ashmem_region_;
91 const int fd_;
92 void* const address_;
93 const size_t offset_;
94 const size_t size_;
95 bool locked_;
96
97 DISALLOW_COPY_AND_ASSIGN(DiscardableAshmemChunk);
98 };
99
100 class DiscardableMemoryAllocator::AshmemRegion {
101 public:
102 // Note that |allocator| must outlive |this|.
103 static scoped_ptr<AshmemRegion> Create(
104 size_t size,
105 const std::string& name,
106 DiscardableMemoryAllocator* allocator) {
107 int fd;
108 void* base;
109 if (!internal::CreateAshmemRegion(name.c_str(), size, &fd, &base))
110 return scoped_ptr<AshmemRegion>();
111 return make_scoped_ptr(new AshmemRegion(fd, size, base, allocator));
112 }
113
114 virtual ~AshmemRegion() {
115 const bool result = internal::CloseAshmemRegion(fd_, size_, base_);
116 DCHECK(result);
117 }
118
119 // Returns a new instance of DiscardableMemory whose size is greater or equal
120 // than |actual_size| (which is expected to be greater or equal than
121 // |client_requested_size|).
122 // Allocation works as follows:
123 // 1) Reuse a previously freed chunk and return it if it succeeded. See
124 // ReuseFreeChunk_Locked() below for more information.
125 // 2) If no free chunk could be reused and the region is not big enough for
126 // the requested size then NULL is returned.
127 // 3) If there is enough room in the ashmem region then a new chunk is
128 // returned. This new chunk starts at |offset_| which is the end of the
129 // previously highest chunk in the region.
130 scoped_ptr<DiscardableMemory> Allocate_Locked(size_t client_requested_size,
131 size_t actual_size) {
132 DCHECK_LE(client_requested_size, actual_size);
133 allocator_->lock_.AssertAcquired();
134 scoped_ptr<DiscardableMemory> memory = ReuseFreeChunk_Locked(
135 client_requested_size, actual_size);
136 if (memory)
137 return memory.Pass();
138 if (size_ - offset_ < actual_size) {
139 // This region does not have enough space left to hold the requested size.
140 return scoped_ptr<DiscardableMemory>();
141 }
142 void* const address = static_cast<char*>(base_) + offset_;
143 memory.reset(
144 new DiscardableAshmemChunk(this, fd_, address, offset_, actual_size));
145 used_to_previous_chunk_map_.insert(
146 std::make_pair(address, highest_allocated_chunk_));
147 highest_allocated_chunk_ = address;
148 offset_ += actual_size;
149 return memory.Pass();
150 }
151
152 void OnChunkDeletion(void* chunk, size_t size) OVERRIDE {
153 base::AutoLock auto_lock(allocator_->lock_);
willchan no longer on Chromium 2013/12/01 00:58:56 Not sure if you care, but base:: is unnecessary, s
Philippe 2013/12/02 10:56:39 Yes, good point.
154 MergeAndAddFreeChunk_Locked(chunk, size);
willchan no longer on Chromium 2013/12/01 00:58:56 Nit: I like to add comments after this indicating
Philippe 2013/12/02 10:56:39 Yes, good idea.
155 }
156
157 private:
158 struct FreeChunk {
159 FreeChunk(void* previous_chunk, void* start, size_t size)
160 : previous_chunk(previous_chunk),
161 start(start),
162 size(size) {
163 }
164
165 void* const previous_chunk;
166 void* const start;
167 const size_t size;
168
169 bool is_null() const { return !start; }
170
171 bool operator<(const FreeChunk& other) const {
172 return size < other.size;
173 }
174 };
175
176 // Note that |allocator| must outlive |this|.
177 AshmemRegion(int fd,
178 size_t size,
179 void* base,
180 DiscardableMemoryAllocator* allocator)
181 : fd_(fd),
182 size_(size),
183 base_(base),
184 allocator_(allocator),
185 highest_allocated_chunk_(NULL),
186 offset_(0) {
187 DCHECK_GE(fd_, 0);
188 DCHECK_GE(size, kMinAshmemRegionSize);
189 DCHECK(base);
190 DCHECK(allocator);
191 }
192
193 // Tries to reuse a previously freed chunk by doing a closest size match.
194 scoped_ptr<DiscardableMemory> ReuseFreeChunk_Locked(
195 size_t client_requested_size,
196 size_t actual_size) {
197 allocator_->lock_.AssertAcquired();
198 const FreeChunk reused_chunk = RemoveFreeChunkFromIterator_Locked(
199 free_chunks_.lower_bound(FreeChunk(NULL, NULL, actual_size)));
200 if (reused_chunk.is_null())
201 return scoped_ptr<DiscardableMemory>();
202
203 used_to_previous_chunk_map_.insert(
204 std::make_pair(reused_chunk.start, reused_chunk.previous_chunk));
205 size_t reused_chunk_size = reused_chunk.size;
206 // |client_requested_size| is used below rather than |actual_size| to
207 // reflect the amount of bytes that would not be usable by the client (i.e.
208 // wasted). Using |actual_size| instead would not allow us to detect
209 // fragmentation caused by the client if he did misaligned allocations.
210 const size_t fragmentation_bytes =
211 reused_chunk.size - client_requested_size;
willchan no longer on Chromium 2013/12/01 00:58:56 Nit: add a DCHECK_GE(reused_chunk.size, client_req
Philippe 2013/12/02 10:56:39 Yeah, good idea.
212 if (fragmentation_bytes > kMaxChunkFragmentationBytes) {
213 // Split the free chunk being recycled so that its unused tail doesn't get
214 // reused (i.e. locked) which would prevent it from being evicted under
215 // memory pressure.
216 reused_chunk_size = actual_size;
217 void* const previous_chunk = reused_chunk.start;
willchan no longer on Chromium 2013/12/01 00:58:56 I think it'd be clearer if you deleted this. Since
Philippe 2013/12/02 10:56:39 Done.
218 void* const new_chunk_start =
219 static_cast<char*>(reused_chunk.start) + actual_size;
220 DCHECK_GT(reused_chunk.size, actual_size);
221 const size_t new_chunk_size = reused_chunk.size - actual_size;
222 // Note that merging is not needed here since there can't be contiguous
223 // free chunks at this point.
224 AddFreeChunk_Locked(
225 FreeChunk(previous_chunk, new_chunk_start, new_chunk_size));
226 }
227 const size_t offset =
228 static_cast<char*>(reused_chunk.start) - static_cast<char*>(base_);
229 internal::LockAshmemRegion(
230 fd_, offset, reused_chunk_size, reused_chunk.start);
231 scoped_ptr<DiscardableMemory> memory(
232 new DiscardableAshmemChunk(this, fd_, reused_chunk.start, offset,
233 reused_chunk_size));
234 return memory.Pass();
235 }
236
237 // Makes the chunk identified with the provided arguments free and possibly
238 // merges this chunk with the previous and next contiguous ones.
239 // If the provided chunk is the only one used (and going to be freed) in the
240 // region then the internal ashmem region is closed so that the underlying
241 // physical pages are immediately released.
242 // Note that free chunks are unlocked therefore they can be reclaimed by the
243 // kernel if needed (under memory pressure) but they are not immediately
244 // released unfortunately since madvise(MADV_REMOVE) and
245 // fallocate(FALLOC_FL_PUNCH_HOLE) don't seem to work on ashmem. This might
246 // change in versions of kernel >=3.5 though. The fact that free chunks are
247 // not immediately released is the reason why we are trying to minimize
248 // fragmentation in order not to cause "artificial" memory pressure.
249 void MergeAndAddFreeChunk_Locked(void* chunk, size_t size) {
250 allocator_->lock_.AssertAcquired();
251 size_t new_free_chunk_size = size;
252 // Merge with the previous chunk.
253 void* first_free_chunk = chunk;
254 DCHECK_NE(0U, used_to_previous_chunk_map_.size());
willchan no longer on Chromium 2013/12/01 00:58:56 DCHECK(!used_to_previous_chunk_map_.empty()) Some
Philippe 2013/12/02 10:56:39 I used this pattern to have more detailed assertio
255 const base::hash_map<void*, void*>::iterator previous_chunk_it =
256 used_to_previous_chunk_map_.find(chunk);
257 DCHECK(previous_chunk_it != used_to_previous_chunk_map_.end());
258 void* previous_chunk = previous_chunk_it->second;
259 used_to_previous_chunk_map_.erase(previous_chunk_it);
260 if (previous_chunk) {
261 const FreeChunk free_chunk = RemoveFreeChunk_Locked(previous_chunk);
262 if (!free_chunk.is_null()) {
263 new_free_chunk_size += free_chunk.size;
264 first_free_chunk = previous_chunk;
265 // There should not be more contiguous previous free chunks.
266 DCHECK(!address_to_free_chunk_map_.count(free_chunk.previous_chunk));
267 }
268 }
269 // Merge with the next chunk if free and present.
270 void* next_chunk = static_cast<char*>(chunk) + size;
271 const FreeChunk next_free_chunk = RemoveFreeChunk_Locked(next_chunk);
272 if (!next_free_chunk.is_null()) {
273 new_free_chunk_size += next_free_chunk.size;
274 // Same as above.
275 DCHECK(!address_to_free_chunk_map_.count(static_cast<char*>(next_chunk) +
276 next_free_chunk.size));
277 }
278 const bool whole_ashmem_region_is_free =
279 used_to_previous_chunk_map_.empty();
280 if (!whole_ashmem_region_is_free) {
281 AddFreeChunk_Locked(
282 FreeChunk(previous_chunk, first_free_chunk, new_free_chunk_size));
283 return;
284 }
285 // The whole ashmem region is free thus it can be deleted.
286 DCHECK_EQ(base_, first_free_chunk);
287 DCHECK_EQ(0U, free_chunks_.size());
willchan no longer on Chromium 2013/12/01 00:58:56 I'd use .empty() for these guys too.
Philippe 2013/12/02 10:56:39 Done.
288 DCHECK_EQ(0U, address_to_free_chunk_map_.size());
289 DCHECK_EQ(0U, used_to_previous_chunk_map_.size());
290 allocator_->DeleteAshmemRegion_Locked(this);
291 }
292
293 void AddFreeChunk_Locked(const FreeChunk& free_chunk) {
294 allocator_->lock_.AssertAcquired();
295 const std::multiset<FreeChunk>::iterator it = free_chunks_.insert(
296 free_chunk);
297 address_to_free_chunk_map_.insert(std::make_pair(free_chunk.start, it));
298 // Update the next used contiguous chunk, if any, since its previous chunk
299 // may have changed due to free chunks merging/splitting.
300 void* const next_used_contiguous_chunk =
301 static_cast<char*>(free_chunk.start) + free_chunk.size;
302 base::hash_map<void*, void*>::iterator previous_it =
303 used_to_previous_chunk_map_.find(next_used_contiguous_chunk);
304 if (previous_it != used_to_previous_chunk_map_.end())
305 previous_it->second = free_chunk.start;
306 }
307
308 // Finds and removes the free chunk, if any, whose start address is
309 // |chunk_start|. Returns a copy of the unlinked free chunk or a free chunk
310 // whose content is null if it was not found.
311 FreeChunk RemoveFreeChunk_Locked(void* chunk_start) {
312 allocator_->lock_.AssertAcquired();
313 const base::hash_map<
314 void*, std::multiset<FreeChunk>::iterator>::iterator it =
315 address_to_free_chunk_map_.find(chunk_start);
316 if (it == address_to_free_chunk_map_.end())
317 return FreeChunk(NULL, NULL, 0U);
318 return RemoveFreeChunkFromIterator_Locked(it->second);
319 }
320
321 // Same as above but takes an iterator in.
322 FreeChunk RemoveFreeChunkFromIterator_Locked(
323 std::multiset<FreeChunk>::iterator free_chunk_it) {
324 allocator_->lock_.AssertAcquired();
325 if (free_chunk_it == free_chunks_.end())
326 return FreeChunk(NULL, NULL, 0U);
327 DCHECK(free_chunk_it != free_chunks_.end());
328 const FreeChunk free_chunk(*free_chunk_it);
329 address_to_free_chunk_map_.erase(free_chunk_it->start);
330 free_chunks_.erase(free_chunk_it);
331 return free_chunk;
332 }
333
334 const int fd_;
335 const size_t size_;
336 void* const base_;
337 DiscardableMemoryAllocator* const allocator_;
338 void* highest_allocated_chunk_;
339 // Points to the end of |highest_allocated_chunk_|.
340 size_t offset_;
341 // Allows free chunks recycling (lookup, insertion and removal) in O(log N).
342 // Note that FreeChunk values are indexed by their size and also note that
343 // multiple free chunks can have the same size (which is why multiset<> is
344 // used instead of e.g. set<>).
345 std::multiset<FreeChunk> free_chunks_;
346 // Used while merging free contiguous chunks to erase free chunks (from their
347 // start address) in constant time. Note that multiset<>::{insert,erase}()
348 // don't invalidate iterators (except the one for the element being removed
349 // obviously).
350 base::hash_map<
351 void*, std::multiset<FreeChunk>::iterator> address_to_free_chunk_map_;
352 // Maps the address of *used* chunks to the address of their previous
353 // contiguous chunk.
354 base::hash_map<void*, void*> used_to_previous_chunk_map_;
355
356 DISALLOW_COPY_AND_ASSIGN(AshmemRegion);
357 };
358
359 DiscardableMemoryAllocator::DiscardableAshmemChunk::~DiscardableAshmemChunk() {
360 if (locked_)
361 internal::UnlockAshmemRegion(fd_, offset_, size_, address_);
362 ashmem_region_->OnChunkDeletion(address_, size_);
363 }
364
365 DiscardableMemoryAllocator::DiscardableMemoryAllocator(const std::string& name)
366 : name_(name) {
367 }
368
369 DiscardableMemoryAllocator::~DiscardableMemoryAllocator() {
370 DCHECK(thread_checker_.CalledOnValidThread());
371 DCHECK(ashmem_regions_.empty());
372 }
373
374 scoped_ptr<DiscardableMemory> DiscardableMemoryAllocator::Allocate(
375 size_t size) {
376 const size_t aligned_size = internal::AlignToNextPage(size);
377 // TODO(pliard): make this function less naive by e.g. moving the free chunks
378 // multiset to the allocator itself in order to decrease even more
379 // fragmentation/speedup allocation. Note that there should not be more than a
380 // couple (=5) of AshmemRegion instances in practice though.
381 base::AutoLock auto_lock(lock_);
382 DCHECK_LE(ashmem_regions_.size(), 5U);
383 for (ScopedVector<AshmemRegion>::iterator it = ashmem_regions_.begin();
384 it != ashmem_regions_.end(); ++it) {
385 scoped_ptr<DiscardableMemory> memory(
386 (*it)->Allocate_Locked(size, aligned_size));
387 if (memory)
388 return memory.Pass();
389 }
390 scoped_ptr<AshmemRegion> new_region(
391 AshmemRegion::Create(
392 std::max(static_cast<size_t>(kMinAshmemRegionSize), aligned_size),
393 name_.c_str(), this));
394 if (!new_region) {
395 // TODO(pliard): consider adding an histogram to see how often this happens.
396 return scoped_ptr<DiscardableMemory>();
397 }
398 ashmem_regions_.push_back(new_region.release());
399 return ashmem_regions_.back()->Allocate_Locked(size, aligned_size);
400 }
401
402 void DiscardableMemoryAllocator::DeleteAshmemRegion_Locked(
403 AshmemRegion* region) {
404 lock_.AssertAcquired();
405 // Note that there should not be more than a couple of ashmem region instances
406 // in |ashmem_regions_|.
407 DCHECK_LE(ashmem_regions_.size(), 5U);
408 const ScopedVector<AshmemRegion>::iterator it = std::find(
409 ashmem_regions_.begin(), ashmem_regions_.end(), region);
410 DCHECK_NE(ashmem_regions_.end(), it);
411 std::swap(*it, ashmem_regions_.back());
412 ashmem_regions_.pop_back();
413 }
414
415 } // namespace internal
416 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698