OLD | NEW |
| (Empty) |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "base/memory/discardable_memory_allocator_android.h" | |
6 | |
7 #include <sys/mman.h> | |
8 #include <unistd.h> | |
9 | |
10 #include <algorithm> | |
11 #include <cmath> | |
12 #include <limits> | |
13 #include <set> | |
14 #include <utility> | |
15 | |
16 #include "base/basictypes.h" | |
17 #include "base/containers/hash_tables.h" | |
18 #include "base/file_util.h" | |
19 #include "base/files/scoped_file.h" | |
20 #include "base/logging.h" | |
21 #include "base/memory/discardable_memory.h" | |
22 #include "base/memory/scoped_vector.h" | |
23 #include "base/synchronization/lock.h" | |
24 #include "base/threading/thread_checker.h" | |
25 #include "third_party/ashmem/ashmem.h" | |
26 | |
27 // The allocator consists of three parts (classes): | |
28 // - DiscardableMemoryAllocator: entry point of all allocations (through its | |
29 // Allocate() method) that are dispatched to the AshmemRegion instances (which | |
30 // it owns). | |
31 // - AshmemRegion: manages allocations and destructions inside a single large | |
32 // (e.g. 32 MBytes) ashmem region. | |
33 // - DiscardableAshmemChunk: class implementing the DiscardableMemory interface | |
34 // whose instances are returned to the client. DiscardableAshmemChunk lets the | |
35 // client seamlessly operate on a subrange of the ashmem region managed by | |
36 // AshmemRegion. | |
37 | |
38 namespace base { | |
39 namespace { | |
40 | |
41 // Only tolerate fragmentation in used chunks *caused by the client* (as opposed | |
42 // to the allocator when a free chunk is reused). The client can cause such | |
43 // fragmentation by e.g. requesting 4097 bytes. This size would be rounded up to | |
44 // 8192 by the allocator which would cause 4095 bytes of fragmentation (which is | |
45 // currently the maximum allowed). If the client requests 4096 bytes and a free | |
46 // chunk of 8192 bytes is available then the free chunk gets splitted into two | |
47 // pieces to minimize fragmentation (since 8192 - 4096 = 4096 which is greater | |
48 // than 4095). | |
49 // TODO(pliard): tune this if splitting chunks too often leads to performance | |
50 // issues. | |
51 const size_t kMaxChunkFragmentationBytes = 4096 - 1; | |
52 | |
53 const size_t kMinAshmemRegionSize = 32 * 1024 * 1024; | |
54 | |
55 // Returns 0 if the provided size is too high to be aligned. | |
56 size_t AlignToNextPage(size_t size) { | |
57 const size_t kPageSize = 4096; | |
58 DCHECK_EQ(static_cast<int>(kPageSize), getpagesize()); | |
59 if (size > std::numeric_limits<size_t>::max() - kPageSize + 1) | |
60 return 0; | |
61 const size_t mask = ~(kPageSize - 1); | |
62 return (size + kPageSize - 1) & mask; | |
63 } | |
64 | |
65 bool CreateAshmemRegion(const char* name, | |
66 size_t size, | |
67 int* out_fd, | |
68 void** out_address) { | |
69 base::ScopedFD fd(ashmem_create_region(name, size)); | |
70 if (!fd.is_valid()) { | |
71 DLOG(ERROR) << "ashmem_create_region() failed"; | |
72 return false; | |
73 } | |
74 | |
75 const int err = ashmem_set_prot_region(fd.get(), PROT_READ | PROT_WRITE); | |
76 if (err < 0) { | |
77 DLOG(ERROR) << "Error " << err << " when setting protection of ashmem"; | |
78 return false; | |
79 } | |
80 | |
81 // There is a problem using MAP_PRIVATE here. As we are constantly calling | |
82 // Lock() and Unlock(), data could get lost if they are not written to the | |
83 // underlying file when Unlock() gets called. | |
84 void* const address = mmap( | |
85 NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0); | |
86 if (address == MAP_FAILED) { | |
87 DPLOG(ERROR) << "Failed to map memory."; | |
88 return false; | |
89 } | |
90 | |
91 *out_fd = fd.release(); | |
92 *out_address = address; | |
93 return true; | |
94 } | |
95 | |
96 bool CloseAshmemRegion(int fd, size_t size, void* address) { | |
97 if (munmap(address, size) == -1) { | |
98 DPLOG(ERROR) << "Failed to unmap memory."; | |
99 close(fd); | |
100 return false; | |
101 } | |
102 return close(fd) == 0; | |
103 } | |
104 | |
105 DiscardableMemoryLockStatus LockAshmemRegion(int fd, size_t off, size_t size) { | |
106 const int result = ashmem_pin_region(fd, off, size); | |
107 return result == ASHMEM_WAS_PURGED ? DISCARDABLE_MEMORY_LOCK_STATUS_PURGED | |
108 : DISCARDABLE_MEMORY_LOCK_STATUS_SUCCESS; | |
109 } | |
110 | |
111 bool UnlockAshmemRegion(int fd, size_t off, size_t size) { | |
112 const int failed = ashmem_unpin_region(fd, off, size); | |
113 if (failed) | |
114 DLOG(ERROR) << "Failed to unpin memory."; | |
115 return !failed; | |
116 } | |
117 | |
118 } // namespace | |
119 | |
120 namespace internal { | |
121 | |
122 class DiscardableMemoryAllocator::DiscardableAshmemChunk | |
123 : public DiscardableMemory { | |
124 public: | |
125 // Note that |ashmem_region| must outlive |this|. | |
126 DiscardableAshmemChunk(AshmemRegion* ashmem_region, | |
127 int fd, | |
128 void* address, | |
129 size_t offset, | |
130 size_t size) | |
131 : ashmem_region_(ashmem_region), | |
132 fd_(fd), | |
133 address_(address), | |
134 offset_(offset), | |
135 size_(size), | |
136 locked_(true) { | |
137 } | |
138 | |
139 // Implemented below AshmemRegion since this requires the full definition of | |
140 // AshmemRegion. | |
141 virtual ~DiscardableAshmemChunk(); | |
142 | |
143 // DiscardableMemory: | |
144 virtual DiscardableMemoryLockStatus Lock() OVERRIDE { | |
145 DCHECK(!locked_); | |
146 locked_ = true; | |
147 return LockAshmemRegion(fd_, offset_, size_); | |
148 } | |
149 | |
150 virtual void Unlock() OVERRIDE { | |
151 DCHECK(locked_); | |
152 locked_ = false; | |
153 UnlockAshmemRegion(fd_, offset_, size_); | |
154 } | |
155 | |
156 virtual void* Memory() const OVERRIDE { | |
157 return address_; | |
158 } | |
159 | |
160 private: | |
161 AshmemRegion* const ashmem_region_; | |
162 const int fd_; | |
163 void* const address_; | |
164 const size_t offset_; | |
165 const size_t size_; | |
166 bool locked_; | |
167 | |
168 DISALLOW_COPY_AND_ASSIGN(DiscardableAshmemChunk); | |
169 }; | |
170 | |
171 class DiscardableMemoryAllocator::AshmemRegion { | |
172 public: | |
173 // Note that |allocator| must outlive |this|. | |
174 static scoped_ptr<AshmemRegion> Create( | |
175 size_t size, | |
176 const std::string& name, | |
177 DiscardableMemoryAllocator* allocator) { | |
178 DCHECK_EQ(size, AlignToNextPage(size)); | |
179 int fd; | |
180 void* base; | |
181 if (!CreateAshmemRegion(name.c_str(), size, &fd, &base)) | |
182 return scoped_ptr<AshmemRegion>(); | |
183 return make_scoped_ptr(new AshmemRegion(fd, size, base, allocator)); | |
184 } | |
185 | |
186 ~AshmemRegion() { | |
187 const bool result = CloseAshmemRegion(fd_, size_, base_); | |
188 DCHECK(result); | |
189 DCHECK(!highest_allocated_chunk_); | |
190 } | |
191 | |
192 // Returns a new instance of DiscardableMemory whose size is greater or equal | |
193 // than |actual_size| (which is expected to be greater or equal than | |
194 // |client_requested_size|). | |
195 // Allocation works as follows: | |
196 // 1) Reuse a previously freed chunk and return it if it succeeded. See | |
197 // ReuseFreeChunk_Locked() below for more information. | |
198 // 2) If no free chunk could be reused and the region is not big enough for | |
199 // the requested size then NULL is returned. | |
200 // 3) If there is enough room in the ashmem region then a new chunk is | |
201 // returned. This new chunk starts at |offset_| which is the end of the | |
202 // previously highest chunk in the region. | |
203 scoped_ptr<DiscardableMemory> Allocate_Locked(size_t client_requested_size, | |
204 size_t actual_size) { | |
205 DCHECK_LE(client_requested_size, actual_size); | |
206 allocator_->lock_.AssertAcquired(); | |
207 | |
208 // Check that the |highest_allocated_chunk_| field doesn't contain a stale | |
209 // pointer. It should point to either a free chunk or a used chunk. | |
210 DCHECK(!highest_allocated_chunk_ || | |
211 address_to_free_chunk_map_.find(highest_allocated_chunk_) != | |
212 address_to_free_chunk_map_.end() || | |
213 used_to_previous_chunk_map_.find(highest_allocated_chunk_) != | |
214 used_to_previous_chunk_map_.end()); | |
215 | |
216 scoped_ptr<DiscardableMemory> memory = ReuseFreeChunk_Locked( | |
217 client_requested_size, actual_size); | |
218 if (memory) | |
219 return memory.Pass(); | |
220 | |
221 if (size_ - offset_ < actual_size) { | |
222 // This region does not have enough space left to hold the requested size. | |
223 return scoped_ptr<DiscardableMemory>(); | |
224 } | |
225 | |
226 void* const address = static_cast<char*>(base_) + offset_; | |
227 memory.reset( | |
228 new DiscardableAshmemChunk(this, fd_, address, offset_, actual_size)); | |
229 | |
230 used_to_previous_chunk_map_.insert( | |
231 std::make_pair(address, highest_allocated_chunk_)); | |
232 highest_allocated_chunk_ = address; | |
233 offset_ += actual_size; | |
234 DCHECK_LE(offset_, size_); | |
235 return memory.Pass(); | |
236 } | |
237 | |
238 void OnChunkDeletion(void* chunk, size_t size) { | |
239 AutoLock auto_lock(allocator_->lock_); | |
240 MergeAndAddFreeChunk_Locked(chunk, size); | |
241 // Note that |this| might be deleted beyond this point. | |
242 } | |
243 | |
244 private: | |
245 struct FreeChunk { | |
246 FreeChunk() : previous_chunk(NULL), start(NULL), size(0) {} | |
247 | |
248 explicit FreeChunk(size_t size) | |
249 : previous_chunk(NULL), | |
250 start(NULL), | |
251 size(size) { | |
252 } | |
253 | |
254 FreeChunk(void* previous_chunk, void* start, size_t size) | |
255 : previous_chunk(previous_chunk), | |
256 start(start), | |
257 size(size) { | |
258 DCHECK_LT(previous_chunk, start); | |
259 } | |
260 | |
261 void* const previous_chunk; | |
262 void* const start; | |
263 const size_t size; | |
264 | |
265 bool is_null() const { return !start; } | |
266 | |
267 bool operator<(const FreeChunk& other) const { | |
268 return size < other.size; | |
269 } | |
270 }; | |
271 | |
272 // Note that |allocator| must outlive |this|. | |
273 AshmemRegion(int fd, | |
274 size_t size, | |
275 void* base, | |
276 DiscardableMemoryAllocator* allocator) | |
277 : fd_(fd), | |
278 size_(size), | |
279 base_(base), | |
280 allocator_(allocator), | |
281 highest_allocated_chunk_(NULL), | |
282 offset_(0) { | |
283 DCHECK_GE(fd_, 0); | |
284 DCHECK_GE(size, kMinAshmemRegionSize); | |
285 DCHECK(base); | |
286 DCHECK(allocator); | |
287 } | |
288 | |
289 // Tries to reuse a previously freed chunk by doing a closest size match. | |
290 scoped_ptr<DiscardableMemory> ReuseFreeChunk_Locked( | |
291 size_t client_requested_size, | |
292 size_t actual_size) { | |
293 allocator_->lock_.AssertAcquired(); | |
294 const FreeChunk reused_chunk = RemoveFreeChunkFromIterator_Locked( | |
295 free_chunks_.lower_bound(FreeChunk(actual_size))); | |
296 if (reused_chunk.is_null()) | |
297 return scoped_ptr<DiscardableMemory>(); | |
298 | |
299 used_to_previous_chunk_map_.insert( | |
300 std::make_pair(reused_chunk.start, reused_chunk.previous_chunk)); | |
301 size_t reused_chunk_size = reused_chunk.size; | |
302 // |client_requested_size| is used below rather than |actual_size| to | |
303 // reflect the amount of bytes that would not be usable by the client (i.e. | |
304 // wasted). Using |actual_size| instead would not allow us to detect | |
305 // fragmentation caused by the client if he did misaligned allocations. | |
306 DCHECK_GE(reused_chunk.size, client_requested_size); | |
307 const size_t fragmentation_bytes = | |
308 reused_chunk.size - client_requested_size; | |
309 | |
310 if (fragmentation_bytes > kMaxChunkFragmentationBytes) { | |
311 // Split the free chunk being recycled so that its unused tail doesn't get | |
312 // reused (i.e. locked) which would prevent it from being evicted under | |
313 // memory pressure. | |
314 reused_chunk_size = actual_size; | |
315 void* const new_chunk_start = | |
316 static_cast<char*>(reused_chunk.start) + actual_size; | |
317 if (reused_chunk.start == highest_allocated_chunk_) { | |
318 // We also need to update the pointer to the highest allocated chunk in | |
319 // case we are splitting the highest chunk. | |
320 highest_allocated_chunk_ = new_chunk_start; | |
321 } | |
322 DCHECK_GT(reused_chunk.size, actual_size); | |
323 const size_t new_chunk_size = reused_chunk.size - actual_size; | |
324 // Note that merging is not needed here since there can't be contiguous | |
325 // free chunks at this point. | |
326 AddFreeChunk_Locked( | |
327 FreeChunk(reused_chunk.start, new_chunk_start, new_chunk_size)); | |
328 } | |
329 | |
330 const size_t offset = | |
331 static_cast<char*>(reused_chunk.start) - static_cast<char*>(base_); | |
332 LockAshmemRegion(fd_, offset, reused_chunk_size); | |
333 scoped_ptr<DiscardableMemory> memory( | |
334 new DiscardableAshmemChunk(this, fd_, reused_chunk.start, offset, | |
335 reused_chunk_size)); | |
336 return memory.Pass(); | |
337 } | |
338 | |
339 // Makes the chunk identified with the provided arguments free and possibly | |
340 // merges this chunk with the previous and next contiguous ones. | |
341 // If the provided chunk is the only one used (and going to be freed) in the | |
342 // region then the internal ashmem region is closed so that the underlying | |
343 // physical pages are immediately released. | |
344 // Note that free chunks are unlocked therefore they can be reclaimed by the | |
345 // kernel if needed (under memory pressure) but they are not immediately | |
346 // released unfortunately since madvise(MADV_REMOVE) and | |
347 // fallocate(FALLOC_FL_PUNCH_HOLE) don't seem to work on ashmem. This might | |
348 // change in versions of kernel >=3.5 though. The fact that free chunks are | |
349 // not immediately released is the reason why we are trying to minimize | |
350 // fragmentation in order not to cause "artificial" memory pressure. | |
351 void MergeAndAddFreeChunk_Locked(void* chunk, size_t size) { | |
352 allocator_->lock_.AssertAcquired(); | |
353 size_t new_free_chunk_size = size; | |
354 // Merge with the previous chunk. | |
355 void* first_free_chunk = chunk; | |
356 DCHECK(!used_to_previous_chunk_map_.empty()); | |
357 const hash_map<void*, void*>::iterator previous_chunk_it = | |
358 used_to_previous_chunk_map_.find(chunk); | |
359 DCHECK(previous_chunk_it != used_to_previous_chunk_map_.end()); | |
360 void* previous_chunk = previous_chunk_it->second; | |
361 used_to_previous_chunk_map_.erase(previous_chunk_it); | |
362 | |
363 if (previous_chunk) { | |
364 const FreeChunk free_chunk = RemoveFreeChunk_Locked(previous_chunk); | |
365 if (!free_chunk.is_null()) { | |
366 new_free_chunk_size += free_chunk.size; | |
367 first_free_chunk = previous_chunk; | |
368 if (chunk == highest_allocated_chunk_) | |
369 highest_allocated_chunk_ = previous_chunk; | |
370 | |
371 // There should not be more contiguous previous free chunks. | |
372 previous_chunk = free_chunk.previous_chunk; | |
373 DCHECK(!address_to_free_chunk_map_.count(previous_chunk)); | |
374 } | |
375 } | |
376 | |
377 // Merge with the next chunk if free and present. | |
378 void* next_chunk = static_cast<char*>(chunk) + size; | |
379 const FreeChunk next_free_chunk = RemoveFreeChunk_Locked(next_chunk); | |
380 if (!next_free_chunk.is_null()) { | |
381 new_free_chunk_size += next_free_chunk.size; | |
382 if (next_free_chunk.start == highest_allocated_chunk_) | |
383 highest_allocated_chunk_ = first_free_chunk; | |
384 | |
385 // Same as above. | |
386 DCHECK(!address_to_free_chunk_map_.count(static_cast<char*>(next_chunk) + | |
387 next_free_chunk.size)); | |
388 } | |
389 | |
390 const bool whole_ashmem_region_is_free = | |
391 used_to_previous_chunk_map_.empty(); | |
392 if (!whole_ashmem_region_is_free) { | |
393 AddFreeChunk_Locked( | |
394 FreeChunk(previous_chunk, first_free_chunk, new_free_chunk_size)); | |
395 return; | |
396 } | |
397 | |
398 // The whole ashmem region is free thus it can be deleted. | |
399 DCHECK_EQ(base_, first_free_chunk); | |
400 DCHECK_EQ(base_, highest_allocated_chunk_); | |
401 DCHECK(free_chunks_.empty()); | |
402 DCHECK(address_to_free_chunk_map_.empty()); | |
403 DCHECK(used_to_previous_chunk_map_.empty()); | |
404 highest_allocated_chunk_ = NULL; | |
405 allocator_->DeleteAshmemRegion_Locked(this); // Deletes |this|. | |
406 } | |
407 | |
408 void AddFreeChunk_Locked(const FreeChunk& free_chunk) { | |
409 allocator_->lock_.AssertAcquired(); | |
410 const std::multiset<FreeChunk>::iterator it = free_chunks_.insert( | |
411 free_chunk); | |
412 address_to_free_chunk_map_.insert(std::make_pair(free_chunk.start, it)); | |
413 // Update the next used contiguous chunk, if any, since its previous chunk | |
414 // may have changed due to free chunks merging/splitting. | |
415 void* const next_used_contiguous_chunk = | |
416 static_cast<char*>(free_chunk.start) + free_chunk.size; | |
417 hash_map<void*, void*>::iterator previous_it = | |
418 used_to_previous_chunk_map_.find(next_used_contiguous_chunk); | |
419 if (previous_it != used_to_previous_chunk_map_.end()) | |
420 previous_it->second = free_chunk.start; | |
421 } | |
422 | |
423 // Finds and removes the free chunk, if any, whose start address is | |
424 // |chunk_start|. Returns a copy of the unlinked free chunk or a free chunk | |
425 // whose content is null if it was not found. | |
426 FreeChunk RemoveFreeChunk_Locked(void* chunk_start) { | |
427 allocator_->lock_.AssertAcquired(); | |
428 const hash_map< | |
429 void*, std::multiset<FreeChunk>::iterator>::iterator it = | |
430 address_to_free_chunk_map_.find(chunk_start); | |
431 if (it == address_to_free_chunk_map_.end()) | |
432 return FreeChunk(); | |
433 return RemoveFreeChunkFromIterator_Locked(it->second); | |
434 } | |
435 | |
436 // Same as above but takes an iterator in. | |
437 FreeChunk RemoveFreeChunkFromIterator_Locked( | |
438 std::multiset<FreeChunk>::iterator free_chunk_it) { | |
439 allocator_->lock_.AssertAcquired(); | |
440 if (free_chunk_it == free_chunks_.end()) | |
441 return FreeChunk(); | |
442 DCHECK(free_chunk_it != free_chunks_.end()); | |
443 const FreeChunk free_chunk(*free_chunk_it); | |
444 address_to_free_chunk_map_.erase(free_chunk_it->start); | |
445 free_chunks_.erase(free_chunk_it); | |
446 return free_chunk; | |
447 } | |
448 | |
449 const int fd_; | |
450 const size_t size_; | |
451 void* const base_; | |
452 DiscardableMemoryAllocator* const allocator_; | |
453 // Points to the chunk with the highest address in the region. This pointer | |
454 // needs to be carefully updated when chunks are merged/split. | |
455 void* highest_allocated_chunk_; | |
456 // Points to the end of |highest_allocated_chunk_|. | |
457 size_t offset_; | |
458 // Allows free chunks recycling (lookup, insertion and removal) in O(log N). | |
459 // Note that FreeChunk values are indexed by their size and also note that | |
460 // multiple free chunks can have the same size (which is why multiset<> is | |
461 // used instead of e.g. set<>). | |
462 std::multiset<FreeChunk> free_chunks_; | |
463 // Used while merging free contiguous chunks to erase free chunks (from their | |
464 // start address) in constant time. Note that multiset<>::{insert,erase}() | |
465 // don't invalidate iterators (except the one for the element being removed | |
466 // obviously). | |
467 hash_map< | |
468 void*, std::multiset<FreeChunk>::iterator> address_to_free_chunk_map_; | |
469 // Maps the address of *used* chunks to the address of their previous | |
470 // contiguous chunk. | |
471 hash_map<void*, void*> used_to_previous_chunk_map_; | |
472 | |
473 DISALLOW_COPY_AND_ASSIGN(AshmemRegion); | |
474 }; | |
475 | |
476 DiscardableMemoryAllocator::DiscardableAshmemChunk::~DiscardableAshmemChunk() { | |
477 if (locked_) | |
478 UnlockAshmemRegion(fd_, offset_, size_); | |
479 ashmem_region_->OnChunkDeletion(address_, size_); | |
480 } | |
481 | |
482 DiscardableMemoryAllocator::DiscardableMemoryAllocator( | |
483 const std::string& name, | |
484 size_t ashmem_region_size) | |
485 : name_(name), | |
486 ashmem_region_size_( | |
487 std::max(kMinAshmemRegionSize, AlignToNextPage(ashmem_region_size))), | |
488 last_ashmem_region_size_(0) { | |
489 DCHECK_GE(ashmem_region_size_, kMinAshmemRegionSize); | |
490 } | |
491 | |
492 DiscardableMemoryAllocator::~DiscardableMemoryAllocator() { | |
493 DCHECK(thread_checker_.CalledOnValidThread()); | |
494 DCHECK(ashmem_regions_.empty()); | |
495 } | |
496 | |
497 scoped_ptr<DiscardableMemory> DiscardableMemoryAllocator::Allocate( | |
498 size_t size) { | |
499 const size_t aligned_size = AlignToNextPage(size); | |
500 if (!aligned_size) | |
501 return scoped_ptr<DiscardableMemory>(); | |
502 // TODO(pliard): make this function less naive by e.g. moving the free chunks | |
503 // multiset to the allocator itself in order to decrease even more | |
504 // fragmentation/speedup allocation. Note that there should not be more than a | |
505 // couple (=5) of AshmemRegion instances in practice though. | |
506 AutoLock auto_lock(lock_); | |
507 DCHECK_LE(ashmem_regions_.size(), 5U); | |
508 for (ScopedVector<AshmemRegion>::iterator it = ashmem_regions_.begin(); | |
509 it != ashmem_regions_.end(); ++it) { | |
510 scoped_ptr<DiscardableMemory> memory( | |
511 (*it)->Allocate_Locked(size, aligned_size)); | |
512 if (memory) | |
513 return memory.Pass(); | |
514 } | |
515 // The creation of the (large) ashmem region might fail if the address space | |
516 // is too fragmented. In case creation fails the allocator retries by | |
517 // repetitively dividing the size by 2. | |
518 const size_t min_region_size = std::max(kMinAshmemRegionSize, aligned_size); | |
519 for (size_t region_size = std::max(ashmem_region_size_, aligned_size); | |
520 region_size >= min_region_size; | |
521 region_size = AlignToNextPage(region_size / 2)) { | |
522 scoped_ptr<AshmemRegion> new_region( | |
523 AshmemRegion::Create(region_size, name_.c_str(), this)); | |
524 if (!new_region) | |
525 continue; | |
526 last_ashmem_region_size_ = region_size; | |
527 ashmem_regions_.push_back(new_region.release()); | |
528 return ashmem_regions_.back()->Allocate_Locked(size, aligned_size); | |
529 } | |
530 // TODO(pliard): consider adding an histogram to see how often this happens. | |
531 return scoped_ptr<DiscardableMemory>(); | |
532 } | |
533 | |
534 size_t DiscardableMemoryAllocator::last_ashmem_region_size() const { | |
535 AutoLock auto_lock(lock_); | |
536 return last_ashmem_region_size_; | |
537 } | |
538 | |
539 void DiscardableMemoryAllocator::DeleteAshmemRegion_Locked( | |
540 AshmemRegion* region) { | |
541 lock_.AssertAcquired(); | |
542 // Note that there should not be more than a couple of ashmem region instances | |
543 // in |ashmem_regions_|. | |
544 DCHECK_LE(ashmem_regions_.size(), 5U); | |
545 const ScopedVector<AshmemRegion>::iterator it = std::find( | |
546 ashmem_regions_.begin(), ashmem_regions_.end(), region); | |
547 DCHECK_NE(ashmem_regions_.end(), it); | |
548 std::swap(*it, ashmem_regions_.back()); | |
549 ashmem_regions_.pop_back(); | |
550 } | |
551 | |
552 } // namespace internal | |
553 } // namespace base | |
OLD | NEW |