OLD | NEW |
| (Empty) |
1 // Copyright 2014 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "base/memory/discardable_memory_ashmem_allocator.h" | |
6 | |
7 #include <sys/mman.h> | |
8 #include <unistd.h> | |
9 | |
10 #include <algorithm> | |
11 #include <cmath> | |
12 #include <limits> | |
13 #include <set> | |
14 #include <utility> | |
15 | |
16 #include "base/basictypes.h" | |
17 #include "base/containers/hash_tables.h" | |
18 #include "base/files/file_util.h" | |
19 #include "base/files/scoped_file.h" | |
20 #include "base/logging.h" | |
21 #include "base/memory/scoped_vector.h" | |
22 #include "third_party/ashmem/ashmem.h" | |
23 | |
24 // The allocator consists of three parts (classes): | |
25 // - DiscardableMemoryAshmemAllocator: entry point of all allocations (through | |
26 // its Allocate() method) that are dispatched to the AshmemRegion instances | |
27 // (which it owns). | |
28 // - AshmemRegion: manages allocations and destructions inside a single large | |
29 // (e.g. 32 MBytes) ashmem region. | |
30 // - DiscardableAshmemChunk: class mimicking the DiscardableMemory interface | |
31 // whose instances are returned to the client. | |
32 | |
33 namespace base { | |
34 namespace { | |
35 | |
36 // Only tolerate fragmentation in used chunks *caused by the client* (as opposed | |
37 // to the allocator when a free chunk is reused). The client can cause such | |
38 // fragmentation by e.g. requesting 4097 bytes. This size would be rounded up to | |
39 // 8192 by the allocator which would cause 4095 bytes of fragmentation (which is | |
40 // currently the maximum allowed). If the client requests 4096 bytes and a free | |
41 // chunk of 8192 bytes is available then the free chunk gets splitted into two | |
42 // pieces to minimize fragmentation (since 8192 - 4096 = 4096 which is greater | |
43 // than 4095). | |
44 // TODO(pliard): tune this if splitting chunks too often leads to performance | |
45 // issues. | |
46 const size_t kMaxChunkFragmentationBytes = 4096 - 1; | |
47 | |
48 const size_t kMinAshmemRegionSize = 32 * 1024 * 1024; | |
49 | |
50 // Returns 0 if the provided size is too high to be aligned. | |
51 size_t AlignToNextPage(size_t size) { | |
52 const size_t kPageSize = 4096; | |
53 DCHECK_EQ(static_cast<int>(kPageSize), getpagesize()); | |
54 if (size > std::numeric_limits<size_t>::max() - kPageSize + 1) | |
55 return 0; | |
56 const size_t mask = ~(kPageSize - 1); | |
57 return (size + kPageSize - 1) & mask; | |
58 } | |
59 | |
60 bool CreateAshmemRegion(const char* name, | |
61 size_t size, | |
62 int* out_fd, | |
63 uintptr_t* out_address) { | |
64 base::ScopedFD fd(ashmem_create_region(name, size)); | |
65 if (!fd.is_valid()) { | |
66 DLOG(ERROR) << "ashmem_create_region() failed"; | |
67 return false; | |
68 } | |
69 | |
70 const int err = ashmem_set_prot_region(fd.get(), PROT_READ | PROT_WRITE); | |
71 if (err < 0) { | |
72 DLOG(ERROR) << "Error " << err << " when setting protection of ashmem"; | |
73 return false; | |
74 } | |
75 | |
76 // There is a problem using MAP_PRIVATE here. As we are constantly calling | |
77 // Lock() and Unlock(), data could get lost if they are not written to the | |
78 // underlying file when Unlock() gets called. | |
79 void* const address = mmap( | |
80 NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0); | |
81 if (address == MAP_FAILED) { | |
82 DPLOG(ERROR) << "Failed to map memory."; | |
83 return false; | |
84 } | |
85 | |
86 *out_fd = fd.release(); | |
87 *out_address = reinterpret_cast<uintptr_t>(address); | |
88 return true; | |
89 } | |
90 | |
91 bool CloseAshmemRegion(int fd, size_t size, void* address) { | |
92 if (munmap(address, size) == -1) { | |
93 DPLOG(ERROR) << "Failed to unmap memory."; | |
94 close(fd); | |
95 return false; | |
96 } | |
97 return close(fd) == 0; | |
98 } | |
99 | |
100 bool LockAshmemRegion(int fd, size_t off, size_t size) { | |
101 return ashmem_pin_region(fd, off, size) != ASHMEM_WAS_PURGED; | |
102 } | |
103 | |
104 bool UnlockAshmemRegion(int fd, size_t off, size_t size) { | |
105 const int failed = ashmem_unpin_region(fd, off, size); | |
106 if (failed) | |
107 DLOG(ERROR) << "Failed to unpin memory."; | |
108 return !failed; | |
109 } | |
110 | |
111 } // namespace | |
112 | |
113 namespace internal { | |
114 | |
115 class AshmemRegion { | |
116 public: | |
117 // Note that |allocator| must outlive |this|. | |
118 static scoped_ptr<AshmemRegion> Create( | |
119 size_t size, | |
120 const std::string& name, | |
121 DiscardableMemoryAshmemAllocator* allocator) { | |
122 DCHECK_EQ(size, AlignToNextPage(size)); | |
123 int fd; | |
124 uintptr_t base; | |
125 if (!CreateAshmemRegion(name.c_str(), size, &fd, &base)) | |
126 return scoped_ptr<AshmemRegion>(); | |
127 return make_scoped_ptr(new AshmemRegion(fd, size, base, allocator)); | |
128 } | |
129 | |
130 ~AshmemRegion() { | |
131 const bool result = CloseAshmemRegion( | |
132 fd_, size_, reinterpret_cast<void*>(base_)); | |
133 DCHECK(result); | |
134 DCHECK(!highest_allocated_chunk_); | |
135 } | |
136 | |
137 // Returns a new instance of DiscardableAshmemChunk whose size is greater or | |
138 // equal than |actual_size| (which is expected to be greater or equal than | |
139 // |client_requested_size|). | |
140 // Allocation works as follows: | |
141 // 1) Reuse a previously freed chunk and return it if it succeeded. See | |
142 // ReuseFreeChunk_Locked() below for more information. | |
143 // 2) If no free chunk could be reused and the region is not big enough for | |
144 // the requested size then NULL is returned. | |
145 // 3) If there is enough room in the ashmem region then a new chunk is | |
146 // returned. This new chunk starts at |offset_| which is the end of the | |
147 // previously highest chunk in the region. | |
148 scoped_ptr<DiscardableAshmemChunk> Allocate_Locked( | |
149 size_t client_requested_size, | |
150 size_t actual_size) { | |
151 DCHECK_LE(client_requested_size, actual_size); | |
152 allocator_->lock_.AssertAcquired(); | |
153 | |
154 // Check that the |highest_allocated_chunk_| field doesn't contain a stale | |
155 // pointer. It should point to either a free chunk or a used chunk. | |
156 DCHECK(!highest_allocated_chunk_ || | |
157 address_to_free_chunk_map_.find(highest_allocated_chunk_) != | |
158 address_to_free_chunk_map_.end() || | |
159 used_to_previous_chunk_map_.find(highest_allocated_chunk_) != | |
160 used_to_previous_chunk_map_.end()); | |
161 | |
162 scoped_ptr<DiscardableAshmemChunk> memory = ReuseFreeChunk_Locked( | |
163 client_requested_size, actual_size); | |
164 if (memory) | |
165 return memory.Pass(); | |
166 | |
167 if (size_ - offset_ < actual_size) { | |
168 // This region does not have enough space left to hold the requested size. | |
169 return scoped_ptr<DiscardableAshmemChunk>(); | |
170 } | |
171 | |
172 uintptr_t const address = base_ + offset_; | |
173 memory.reset( | |
174 new DiscardableAshmemChunk(this, fd_, reinterpret_cast<void*>(address), | |
175 offset_, actual_size)); | |
176 | |
177 used_to_previous_chunk_map_.insert( | |
178 std::make_pair(address, highest_allocated_chunk_)); | |
179 highest_allocated_chunk_ = reinterpret_cast<uintptr_t>(address); | |
180 offset_ += actual_size; | |
181 DCHECK_LE(offset_, size_); | |
182 return memory.Pass(); | |
183 } | |
184 | |
185 void OnChunkDeletion(uintptr_t chunk, size_t size) { | |
186 AutoLock auto_lock(allocator_->lock_); | |
187 MergeAndAddFreeChunk_Locked(chunk, size); | |
188 // Note that |this| might be deleted beyond this point. | |
189 } | |
190 | |
191 private: | |
192 struct FreeChunk { | |
193 FreeChunk() : previous_chunk(0), start(0), size(0) {} | |
194 | |
195 explicit FreeChunk(size_t size) | |
196 : previous_chunk(0), | |
197 start(0), | |
198 size(size) { | |
199 } | |
200 | |
201 FreeChunk(uintptr_t previous_chunk, uintptr_t start, size_t size) | |
202 : previous_chunk(previous_chunk), | |
203 start(start), | |
204 size(size) { | |
205 DCHECK_LT(previous_chunk, start); | |
206 } | |
207 | |
208 uintptr_t const previous_chunk; | |
209 uintptr_t const start; | |
210 const size_t size; | |
211 | |
212 bool is_null() const { return !start; } | |
213 | |
214 bool operator<(const FreeChunk& other) const { | |
215 return size < other.size; | |
216 } | |
217 }; | |
218 | |
219 // Note that |allocator| must outlive |this|. | |
220 AshmemRegion(int fd, | |
221 size_t size, | |
222 uintptr_t base, | |
223 DiscardableMemoryAshmemAllocator* allocator) | |
224 : fd_(fd), | |
225 size_(size), | |
226 base_(base), | |
227 allocator_(allocator), | |
228 highest_allocated_chunk_(0), | |
229 offset_(0) { | |
230 DCHECK_GE(fd_, 0); | |
231 DCHECK_GE(size, kMinAshmemRegionSize); | |
232 DCHECK(base); | |
233 DCHECK(allocator); | |
234 } | |
235 | |
236 // Tries to reuse a previously freed chunk by doing a closest size match. | |
237 scoped_ptr<DiscardableAshmemChunk> ReuseFreeChunk_Locked( | |
238 size_t client_requested_size, | |
239 size_t actual_size) { | |
240 allocator_->lock_.AssertAcquired(); | |
241 const FreeChunk reused_chunk = RemoveFreeChunkFromIterator_Locked( | |
242 free_chunks_.lower_bound(FreeChunk(actual_size))); | |
243 if (reused_chunk.is_null()) | |
244 return scoped_ptr<DiscardableAshmemChunk>(); | |
245 | |
246 used_to_previous_chunk_map_.insert( | |
247 std::make_pair(reused_chunk.start, reused_chunk.previous_chunk)); | |
248 size_t reused_chunk_size = reused_chunk.size; | |
249 // |client_requested_size| is used below rather than |actual_size| to | |
250 // reflect the amount of bytes that would not be usable by the client (i.e. | |
251 // wasted). Using |actual_size| instead would not allow us to detect | |
252 // fragmentation caused by the client if he did misaligned allocations. | |
253 DCHECK_GE(reused_chunk.size, client_requested_size); | |
254 const size_t fragmentation_bytes = | |
255 reused_chunk.size - client_requested_size; | |
256 | |
257 if (fragmentation_bytes > kMaxChunkFragmentationBytes) { | |
258 // Split the free chunk being recycled so that its unused tail doesn't get | |
259 // reused (i.e. locked) which would prevent it from being evicted under | |
260 // memory pressure. | |
261 reused_chunk_size = actual_size; | |
262 uintptr_t const new_chunk_start = reused_chunk.start + actual_size; | |
263 if (reused_chunk.start == highest_allocated_chunk_) { | |
264 // We also need to update the pointer to the highest allocated chunk in | |
265 // case we are splitting the highest chunk. | |
266 highest_allocated_chunk_ = new_chunk_start; | |
267 } | |
268 DCHECK_GT(reused_chunk.size, actual_size); | |
269 const size_t new_chunk_size = reused_chunk.size - actual_size; | |
270 // Note that merging is not needed here since there can't be contiguous | |
271 // free chunks at this point. | |
272 AddFreeChunk_Locked( | |
273 FreeChunk(reused_chunk.start, new_chunk_start, new_chunk_size)); | |
274 } | |
275 | |
276 const size_t offset = reused_chunk.start - base_; | |
277 LockAshmemRegion(fd_, offset, reused_chunk_size); | |
278 scoped_ptr<DiscardableAshmemChunk> memory( | |
279 new DiscardableAshmemChunk(this, fd_, | |
280 reinterpret_cast<void*>(reused_chunk.start), | |
281 offset, reused_chunk_size)); | |
282 return memory.Pass(); | |
283 } | |
284 | |
285 // Makes the chunk identified with the provided arguments free and possibly | |
286 // merges this chunk with the previous and next contiguous ones. | |
287 // If the provided chunk is the only one used (and going to be freed) in the | |
288 // region then the internal ashmem region is closed so that the underlying | |
289 // physical pages are immediately released. | |
290 // Note that free chunks are unlocked therefore they can be reclaimed by the | |
291 // kernel if needed (under memory pressure) but they are not immediately | |
292 // released unfortunately since madvise(MADV_REMOVE) and | |
293 // fallocate(FALLOC_FL_PUNCH_HOLE) don't seem to work on ashmem. This might | |
294 // change in versions of kernel >=3.5 though. The fact that free chunks are | |
295 // not immediately released is the reason why we are trying to minimize | |
296 // fragmentation in order not to cause "artificial" memory pressure. | |
297 void MergeAndAddFreeChunk_Locked(uintptr_t chunk, size_t size) { | |
298 allocator_->lock_.AssertAcquired(); | |
299 size_t new_free_chunk_size = size; | |
300 // Merge with the previous chunk. | |
301 uintptr_t first_free_chunk = chunk; | |
302 DCHECK(!used_to_previous_chunk_map_.empty()); | |
303 const hash_map<uintptr_t, uintptr_t>::iterator previous_chunk_it = | |
304 used_to_previous_chunk_map_.find(chunk); | |
305 DCHECK(previous_chunk_it != used_to_previous_chunk_map_.end()); | |
306 uintptr_t previous_chunk = previous_chunk_it->second; | |
307 used_to_previous_chunk_map_.erase(previous_chunk_it); | |
308 | |
309 if (previous_chunk) { | |
310 const FreeChunk free_chunk = RemoveFreeChunk_Locked(previous_chunk); | |
311 if (!free_chunk.is_null()) { | |
312 new_free_chunk_size += free_chunk.size; | |
313 first_free_chunk = previous_chunk; | |
314 if (chunk == highest_allocated_chunk_) | |
315 highest_allocated_chunk_ = previous_chunk; | |
316 | |
317 // There should not be more contiguous previous free chunks. | |
318 previous_chunk = free_chunk.previous_chunk; | |
319 DCHECK(!address_to_free_chunk_map_.count(previous_chunk)); | |
320 } | |
321 } | |
322 | |
323 // Merge with the next chunk if free and present. | |
324 uintptr_t next_chunk = chunk + size; | |
325 const FreeChunk next_free_chunk = RemoveFreeChunk_Locked(next_chunk); | |
326 if (!next_free_chunk.is_null()) { | |
327 new_free_chunk_size += next_free_chunk.size; | |
328 if (next_free_chunk.start == highest_allocated_chunk_) | |
329 highest_allocated_chunk_ = first_free_chunk; | |
330 | |
331 // Same as above. | |
332 DCHECK( | |
333 !address_to_free_chunk_map_.count(next_chunk + next_free_chunk.size)); | |
334 } | |
335 | |
336 const bool whole_ashmem_region_is_free = | |
337 used_to_previous_chunk_map_.empty(); | |
338 if (!whole_ashmem_region_is_free) { | |
339 AddFreeChunk_Locked( | |
340 FreeChunk(previous_chunk, first_free_chunk, new_free_chunk_size)); | |
341 return; | |
342 } | |
343 | |
344 // The whole ashmem region is free thus it can be deleted. | |
345 DCHECK_EQ(base_, first_free_chunk); | |
346 DCHECK_EQ(base_, highest_allocated_chunk_); | |
347 DCHECK(free_chunks_.empty()); | |
348 DCHECK(address_to_free_chunk_map_.empty()); | |
349 DCHECK(used_to_previous_chunk_map_.empty()); | |
350 highest_allocated_chunk_ = 0; | |
351 allocator_->DeleteAshmemRegion_Locked(this); // Deletes |this|. | |
352 } | |
353 | |
354 void AddFreeChunk_Locked(const FreeChunk& free_chunk) { | |
355 allocator_->lock_.AssertAcquired(); | |
356 const std::multiset<FreeChunk>::iterator it = free_chunks_.insert( | |
357 free_chunk); | |
358 address_to_free_chunk_map_.insert(std::make_pair(free_chunk.start, it)); | |
359 // Update the next used contiguous chunk, if any, since its previous chunk | |
360 // may have changed due to free chunks merging/splitting. | |
361 uintptr_t const next_used_contiguous_chunk = | |
362 free_chunk.start + free_chunk.size; | |
363 hash_map<uintptr_t, uintptr_t>::iterator previous_it = | |
364 used_to_previous_chunk_map_.find(next_used_contiguous_chunk); | |
365 if (previous_it != used_to_previous_chunk_map_.end()) | |
366 previous_it->second = free_chunk.start; | |
367 } | |
368 | |
369 // Finds and removes the free chunk, if any, whose start address is | |
370 // |chunk_start|. Returns a copy of the unlinked free chunk or a free chunk | |
371 // whose content is null if it was not found. | |
372 FreeChunk RemoveFreeChunk_Locked(uintptr_t chunk_start) { | |
373 allocator_->lock_.AssertAcquired(); | |
374 const hash_map< | |
375 uintptr_t, std::multiset<FreeChunk>::iterator>::iterator it = | |
376 address_to_free_chunk_map_.find(chunk_start); | |
377 if (it == address_to_free_chunk_map_.end()) | |
378 return FreeChunk(); | |
379 return RemoveFreeChunkFromIterator_Locked(it->second); | |
380 } | |
381 | |
382 // Same as above but takes an iterator in. | |
383 FreeChunk RemoveFreeChunkFromIterator_Locked( | |
384 std::multiset<FreeChunk>::iterator free_chunk_it) { | |
385 allocator_->lock_.AssertAcquired(); | |
386 if (free_chunk_it == free_chunks_.end()) | |
387 return FreeChunk(); | |
388 DCHECK(free_chunk_it != free_chunks_.end()); | |
389 const FreeChunk free_chunk(*free_chunk_it); | |
390 address_to_free_chunk_map_.erase(free_chunk_it->start); | |
391 free_chunks_.erase(free_chunk_it); | |
392 return free_chunk; | |
393 } | |
394 | |
395 const int fd_; | |
396 const size_t size_; | |
397 uintptr_t const base_; | |
398 DiscardableMemoryAshmemAllocator* const allocator_; | |
399 // Points to the chunk with the highest address in the region. This pointer | |
400 // needs to be carefully updated when chunks are merged/split. | |
401 uintptr_t highest_allocated_chunk_; | |
402 // Points to the end of |highest_allocated_chunk_|. | |
403 size_t offset_; | |
404 // Allows free chunks recycling (lookup, insertion and removal) in O(log N). | |
405 // Note that FreeChunk values are indexed by their size and also note that | |
406 // multiple free chunks can have the same size (which is why multiset<> is | |
407 // used instead of e.g. set<>). | |
408 std::multiset<FreeChunk> free_chunks_; | |
409 // Used while merging free contiguous chunks to erase free chunks (from their | |
410 // start address) in constant time. Note that multiset<>::{insert,erase}() | |
411 // don't invalidate iterators (except the one for the element being removed | |
412 // obviously). | |
413 hash_map< | |
414 uintptr_t, std::multiset<FreeChunk>::iterator> address_to_free_chunk_map_; | |
415 // Maps the address of *used* chunks to the address of their previous | |
416 // contiguous chunk. | |
417 hash_map<uintptr_t, uintptr_t> used_to_previous_chunk_map_; | |
418 | |
419 DISALLOW_COPY_AND_ASSIGN(AshmemRegion); | |
420 }; | |
421 | |
422 DiscardableAshmemChunk::~DiscardableAshmemChunk() { | |
423 if (locked_) | |
424 UnlockAshmemRegion(fd_, offset_, size_); | |
425 ashmem_region_->OnChunkDeletion(reinterpret_cast<uintptr_t>(address_), size_); | |
426 } | |
427 | |
428 bool DiscardableAshmemChunk::Lock() { | |
429 DCHECK(!locked_); | |
430 locked_ = true; | |
431 return LockAshmemRegion(fd_, offset_, size_); | |
432 } | |
433 | |
434 void DiscardableAshmemChunk::Unlock() { | |
435 DCHECK(locked_); | |
436 locked_ = false; | |
437 UnlockAshmemRegion(fd_, offset_, size_); | |
438 } | |
439 | |
440 void* DiscardableAshmemChunk::Memory() const { | |
441 return address_; | |
442 } | |
443 | |
444 // Note that |ashmem_region| must outlive |this|. | |
445 DiscardableAshmemChunk::DiscardableAshmemChunk(AshmemRegion* ashmem_region, | |
446 int fd, | |
447 void* address, | |
448 size_t offset, | |
449 size_t size) | |
450 : ashmem_region_(ashmem_region), | |
451 fd_(fd), | |
452 address_(address), | |
453 offset_(offset), | |
454 size_(size), | |
455 locked_(true) { | |
456 } | |
457 | |
458 DiscardableMemoryAshmemAllocator::DiscardableMemoryAshmemAllocator( | |
459 const std::string& name, | |
460 size_t ashmem_region_size) | |
461 : name_(name), | |
462 ashmem_region_size_( | |
463 std::max(kMinAshmemRegionSize, AlignToNextPage(ashmem_region_size))), | |
464 last_ashmem_region_size_(0) { | |
465 DCHECK_GE(ashmem_region_size_, kMinAshmemRegionSize); | |
466 } | |
467 | |
468 DiscardableMemoryAshmemAllocator::~DiscardableMemoryAshmemAllocator() { | |
469 DCHECK(ashmem_regions_.empty()); | |
470 } | |
471 | |
472 scoped_ptr<DiscardableAshmemChunk> DiscardableMemoryAshmemAllocator::Allocate( | |
473 size_t size) { | |
474 const size_t aligned_size = AlignToNextPage(size); | |
475 if (!aligned_size) | |
476 return scoped_ptr<DiscardableAshmemChunk>(); | |
477 // TODO(pliard): make this function less naive by e.g. moving the free chunks | |
478 // multiset to the allocator itself in order to decrease even more | |
479 // fragmentation/speedup allocation. Note that there should not be more than a | |
480 // couple (=5) of AshmemRegion instances in practice though. | |
481 AutoLock auto_lock(lock_); | |
482 DCHECK_LE(ashmem_regions_.size(), 5U); | |
483 for (ScopedVector<AshmemRegion>::iterator it = ashmem_regions_.begin(); | |
484 it != ashmem_regions_.end(); ++it) { | |
485 scoped_ptr<DiscardableAshmemChunk> memory( | |
486 (*it)->Allocate_Locked(size, aligned_size)); | |
487 if (memory) | |
488 return memory.Pass(); | |
489 } | |
490 // The creation of the (large) ashmem region might fail if the address space | |
491 // is too fragmented. In case creation fails the allocator retries by | |
492 // repetitively dividing the size by 2. | |
493 const size_t min_region_size = std::max(kMinAshmemRegionSize, aligned_size); | |
494 for (size_t region_size = std::max(ashmem_region_size_, aligned_size); | |
495 region_size >= min_region_size; | |
496 region_size = AlignToNextPage(region_size / 2)) { | |
497 scoped_ptr<AshmemRegion> new_region( | |
498 AshmemRegion::Create(region_size, name_.c_str(), this)); | |
499 if (!new_region) | |
500 continue; | |
501 last_ashmem_region_size_ = region_size; | |
502 ashmem_regions_.push_back(new_region.release()); | |
503 return ashmem_regions_.back()->Allocate_Locked(size, aligned_size); | |
504 } | |
505 // TODO(pliard): consider adding an histogram to see how often this happens. | |
506 return scoped_ptr<DiscardableAshmemChunk>(); | |
507 } | |
508 | |
509 size_t DiscardableMemoryAshmemAllocator::last_ashmem_region_size() const { | |
510 AutoLock auto_lock(lock_); | |
511 return last_ashmem_region_size_; | |
512 } | |
513 | |
514 void DiscardableMemoryAshmemAllocator::DeleteAshmemRegion_Locked( | |
515 AshmemRegion* region) { | |
516 lock_.AssertAcquired(); | |
517 // Note that there should not be more than a couple of ashmem region instances | |
518 // in |ashmem_regions_|. | |
519 DCHECK_LE(ashmem_regions_.size(), 5U); | |
520 const ScopedVector<AshmemRegion>::iterator it = std::find( | |
521 ashmem_regions_.begin(), ashmem_regions_.end(), region); | |
522 DCHECK(ashmem_regions_.end() != it); | |
523 std::swap(*it, ashmem_regions_.back()); | |
524 ashmem_regions_.pop_back(); | |
525 } | |
526 | |
527 } // namespace internal | |
528 } // namespace base | |
OLD | NEW |