OLD | NEW |
| (Empty) |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "base/memory/discardable_memory_allocator_android.h" | |
6 | |
7 #include <algorithm> | |
8 #include <cmath> | |
9 #include <set> | |
10 #include <utility> | |
11 | |
12 #include "base/basictypes.h" | |
13 #include "base/containers/hash_tables.h" | |
14 #include "base/logging.h" | |
15 #include "base/memory/discardable_memory.h" | |
16 #include "base/memory/discardable_memory_android.h" | |
17 #include "base/memory/scoped_vector.h" | |
18 #include "base/synchronization/lock.h" | |
19 #include "base/threading/thread_checker.h" | |
20 | |
21 // The allocator consists of three parts (classes): | |
22 // - DiscardableMemoryAllocator: entry point of all allocations (through its | |
23 // Allocate() method) that are dispatched to the AshmemRegion instances (which | |
24 // it owns). | |
25 // - AshmemRegion: manages allocations and destructions inside a single large | |
26 // (e.g. 32 MBytes) ashmem region. | |
27 // - DiscardableAshmemChunk: class implementing the DiscardableMemory interface | |
28 // whose instances are returned to the client. DiscardableAshmemChunk lets the | |
29 // client seamlessly operate on a subrange of the ashmem region managed by | |
30 // AshmemRegion. | |
31 | |
32 namespace base { | |
33 namespace { | |
34 | |
35 // Only tolerate fragmentation in used chunks *caused by the client* (as opposed | |
36 // to the allocator when a free chunk is reused). The client can cause such | |
37 // fragmentation by e.g. requesting 4097 bytes. This size would be rounded up to | |
38 // 8192 by the allocator which would cause 4095 bytes of fragmentation (which is | |
39 // currently the maximum allowed). If the client requests 4096 bytes and a free | |
40 // chunk of 8192 bytes is available then the free chunk gets splitted into two | |
41 // pieces to minimize fragmentation (since 8192 - 4096 = 4096 which is greater | |
42 // than 4095). | |
43 // TODO(pliard): tune this if splitting chunks too often leads to performance | |
44 // issues. | |
45 const size_t kMaxChunkFragmentationBytes = 4096 - 1; | |
46 | |
47 } // namespace | |
48 | |
49 namespace internal { | |
50 | |
51 class DiscardableMemoryAllocator::DiscardableAshmemChunk | |
52 : public DiscardableMemory { | |
53 public: | |
54 // Note that |ashmem_region| must outlive |this|. | |
55 DiscardableAshmemChunk(AshmemRegion* ashmem_region, | |
56 int fd, | |
57 void* address, | |
58 size_t offset, | |
59 size_t size) | |
60 : ashmem_region_(ashmem_region), | |
61 fd_(fd), | |
62 address_(address), | |
63 offset_(offset), | |
64 size_(size), | |
65 locked_(true) { | |
66 } | |
67 | |
68 // Implemented below AshmemRegion since this requires the full definition of | |
69 // AshmemRegion. | |
70 virtual ~DiscardableAshmemChunk(); | |
71 | |
72 // DiscardableMemory: | |
73 virtual LockDiscardableMemoryStatus Lock() OVERRIDE { | |
74 DCHECK(!locked_); | |
75 locked_ = true; | |
76 return internal::LockAshmemRegion(fd_, offset_, size_, address_); | |
77 } | |
78 | |
79 virtual void Unlock() OVERRIDE { | |
80 DCHECK(locked_); | |
81 locked_ = false; | |
82 internal::UnlockAshmemRegion(fd_, offset_, size_, address_); | |
83 } | |
84 | |
85 virtual void* Memory() const OVERRIDE { | |
86 return address_; | |
87 } | |
88 | |
89 private: | |
90 AshmemRegion* const ashmem_region_; | |
91 const int fd_; | |
92 void* const address_; | |
93 const size_t offset_; | |
94 const size_t size_; | |
95 bool locked_; | |
96 | |
97 DISALLOW_COPY_AND_ASSIGN(DiscardableAshmemChunk); | |
98 }; | |
99 | |
100 class DiscardableMemoryAllocator::AshmemRegion { | |
101 public: | |
102 // Note that |allocator| must outlive |this|. | |
103 static scoped_ptr<AshmemRegion> Create( | |
104 size_t size, | |
105 const std::string& name, | |
106 DiscardableMemoryAllocator* allocator) { | |
107 int fd; | |
108 void* base; | |
109 if (!internal::CreateAshmemRegion(name.c_str(), size, &fd, &base)) | |
110 return scoped_ptr<AshmemRegion>(); | |
111 return make_scoped_ptr(new AshmemRegion(fd, size, base, allocator)); | |
112 } | |
113 | |
114 virtual ~AshmemRegion() { | |
115 const bool result = internal::CloseAshmemRegion(fd_, size_, base_); | |
116 DCHECK(result); | |
117 } | |
118 | |
119 // Returns a new instance of DiscardableMemory whose size is greater or equal | |
120 // than |actual_size| (which is expected to be greater or equal than | |
121 // |client_requested_size|). | |
122 // Allocation works as follows: | |
123 // 1) Reuse a previously freed chunk and return it if it succeeded. See | |
124 // ReuseFreeChunk_Locked() below for more information. | |
125 // 2) If no free chunk could be reused and the region is not big enough for | |
126 // the requested size then NULL is returned. | |
127 // 3) If there is enough room in the ashmem region then a new chunk is | |
128 // returned. This new chunk starts at |offset_| which is the end of the | |
129 // previously highest chunk in the region. | |
130 scoped_ptr<DiscardableMemory> Allocate_Locked(size_t client_requested_size, | |
131 size_t actual_size) { | |
132 DCHECK_LE(client_requested_size, actual_size); | |
133 allocator_->lock_.AssertAcquired(); | |
134 scoped_ptr<DiscardableMemory> memory = ReuseFreeChunk_Locked( | |
135 client_requested_size, actual_size); | |
136 if (memory) | |
137 return memory.Pass(); | |
138 if (size_ - offset_ < actual_size) { | |
139 // This region does not have enough space left to hold the requested size. | |
140 return scoped_ptr<DiscardableMemory>(); | |
141 } | |
142 void* const address = static_cast<char*>(base_) + offset_; | |
143 memory.reset( | |
144 new DiscardableAshmemChunk(this, fd_, address, offset_, actual_size)); | |
145 used_to_previous_chunk_map_.insert( | |
146 std::make_pair(address, highest_allocated_chunk_)); | |
147 highest_allocated_chunk_ = address; | |
148 offset_ += actual_size; | |
149 DCHECK_LE(offset_, size_); | |
150 return memory.Pass(); | |
151 } | |
152 | |
153 void OnChunkDeletion(void* chunk, size_t size) { | |
154 AutoLock auto_lock(allocator_->lock_); | |
155 MergeAndAddFreeChunk_Locked(chunk, size); | |
156 // Note that |this| might be deleted beyond this point. | |
157 } | |
158 | |
159 private: | |
160 struct FreeChunk { | |
161 FreeChunk(void* previous_chunk, void* start, size_t size) | |
162 : previous_chunk(previous_chunk), | |
163 start(start), | |
164 size(size) { | |
165 } | |
166 | |
167 void* const previous_chunk; | |
168 void* const start; | |
169 const size_t size; | |
170 | |
171 bool is_null() const { return !start; } | |
172 | |
173 bool operator<(const FreeChunk& other) const { | |
174 return size < other.size; | |
175 } | |
176 }; | |
177 | |
178 // Note that |allocator| must outlive |this|. | |
179 AshmemRegion(int fd, | |
180 size_t size, | |
181 void* base, | |
182 DiscardableMemoryAllocator* allocator) | |
183 : fd_(fd), | |
184 size_(size), | |
185 base_(base), | |
186 allocator_(allocator), | |
187 highest_allocated_chunk_(NULL), | |
188 offset_(0) { | |
189 DCHECK_GE(fd_, 0); | |
190 DCHECK_GE(size, kMinAshmemRegionSize); | |
191 DCHECK(base); | |
192 DCHECK(allocator); | |
193 } | |
194 | |
195 // Tries to reuse a previously freed chunk by doing a closest size match. | |
196 scoped_ptr<DiscardableMemory> ReuseFreeChunk_Locked( | |
197 size_t client_requested_size, | |
198 size_t actual_size) { | |
199 allocator_->lock_.AssertAcquired(); | |
200 const FreeChunk reused_chunk = RemoveFreeChunkFromIterator_Locked( | |
201 free_chunks_.lower_bound(FreeChunk(NULL, NULL, actual_size))); | |
202 if (reused_chunk.is_null()) | |
203 return scoped_ptr<DiscardableMemory>(); | |
204 | |
205 used_to_previous_chunk_map_.insert( | |
206 std::make_pair(reused_chunk.start, reused_chunk.previous_chunk)); | |
207 size_t reused_chunk_size = reused_chunk.size; | |
208 // |client_requested_size| is used below rather than |actual_size| to | |
209 // reflect the amount of bytes that would not be usable by the client (i.e. | |
210 // wasted). Using |actual_size| instead would not allow us to detect | |
211 // fragmentation caused by the client if he did misaligned allocations. | |
212 DCHECK_GE(reused_chunk.size, client_requested_size); | |
213 const size_t fragmentation_bytes = | |
214 reused_chunk.size - client_requested_size; | |
215 if (fragmentation_bytes > kMaxChunkFragmentationBytes) { | |
216 // Split the free chunk being recycled so that its unused tail doesn't get | |
217 // reused (i.e. locked) which would prevent it from being evicted under | |
218 // memory pressure. | |
219 reused_chunk_size = actual_size; | |
220 void* const new_chunk_start = | |
221 static_cast<char*>(reused_chunk.start) + actual_size; | |
222 DCHECK_GT(reused_chunk.size, actual_size); | |
223 const size_t new_chunk_size = reused_chunk.size - actual_size; | |
224 // Note that merging is not needed here since there can't be contiguous | |
225 // free chunks at this point. | |
226 AddFreeChunk_Locked( | |
227 FreeChunk(reused_chunk.start, new_chunk_start, new_chunk_size)); | |
228 } | |
229 const size_t offset = | |
230 static_cast<char*>(reused_chunk.start) - static_cast<char*>(base_); | |
231 internal::LockAshmemRegion( | |
232 fd_, offset, reused_chunk_size, reused_chunk.start); | |
233 scoped_ptr<DiscardableMemory> memory( | |
234 new DiscardableAshmemChunk(this, fd_, reused_chunk.start, offset, | |
235 reused_chunk_size)); | |
236 return memory.Pass(); | |
237 } | |
238 | |
239 // Makes the chunk identified with the provided arguments free and possibly | |
240 // merges this chunk with the previous and next contiguous ones. | |
241 // If the provided chunk is the only one used (and going to be freed) in the | |
242 // region then the internal ashmem region is closed so that the underlying | |
243 // physical pages are immediately released. | |
244 // Note that free chunks are unlocked therefore they can be reclaimed by the | |
245 // kernel if needed (under memory pressure) but they are not immediately | |
246 // released unfortunately since madvise(MADV_REMOVE) and | |
247 // fallocate(FALLOC_FL_PUNCH_HOLE) don't seem to work on ashmem. This might | |
248 // change in versions of kernel >=3.5 though. The fact that free chunks are | |
249 // not immediately released is the reason why we are trying to minimize | |
250 // fragmentation in order not to cause "artificial" memory pressure. | |
251 void MergeAndAddFreeChunk_Locked(void* chunk, size_t size) { | |
252 allocator_->lock_.AssertAcquired(); | |
253 size_t new_free_chunk_size = size; | |
254 // Merge with the previous chunk. | |
255 void* first_free_chunk = chunk; | |
256 DCHECK(!used_to_previous_chunk_map_.empty()); | |
257 const hash_map<void*, void*>::iterator previous_chunk_it = | |
258 used_to_previous_chunk_map_.find(chunk); | |
259 DCHECK(previous_chunk_it != used_to_previous_chunk_map_.end()); | |
260 void* previous_chunk = previous_chunk_it->second; | |
261 used_to_previous_chunk_map_.erase(previous_chunk_it); | |
262 if (previous_chunk) { | |
263 const FreeChunk free_chunk = RemoveFreeChunk_Locked(previous_chunk); | |
264 if (!free_chunk.is_null()) { | |
265 new_free_chunk_size += free_chunk.size; | |
266 first_free_chunk = previous_chunk; | |
267 // There should not be more contiguous previous free chunks. | |
268 DCHECK(!address_to_free_chunk_map_.count(free_chunk.previous_chunk)); | |
269 } | |
270 } | |
271 // Merge with the next chunk if free and present. | |
272 void* next_chunk = static_cast<char*>(chunk) + size; | |
273 const FreeChunk next_free_chunk = RemoveFreeChunk_Locked(next_chunk); | |
274 if (!next_free_chunk.is_null()) { | |
275 new_free_chunk_size += next_free_chunk.size; | |
276 // Same as above. | |
277 DCHECK(!address_to_free_chunk_map_.count(static_cast<char*>(next_chunk) + | |
278 next_free_chunk.size)); | |
279 } | |
280 const bool whole_ashmem_region_is_free = | |
281 used_to_previous_chunk_map_.empty(); | |
282 if (!whole_ashmem_region_is_free) { | |
283 AddFreeChunk_Locked( | |
284 FreeChunk(previous_chunk, first_free_chunk, new_free_chunk_size)); | |
285 return; | |
286 } | |
287 // The whole ashmem region is free thus it can be deleted. | |
288 DCHECK_EQ(base_, first_free_chunk); | |
289 DCHECK(free_chunks_.empty()); | |
290 DCHECK(address_to_free_chunk_map_.empty()); | |
291 DCHECK(used_to_previous_chunk_map_.empty()); | |
292 allocator_->DeleteAshmemRegion_Locked(this); // Deletes |this|. | |
293 } | |
294 | |
295 void AddFreeChunk_Locked(const FreeChunk& free_chunk) { | |
296 allocator_->lock_.AssertAcquired(); | |
297 const std::multiset<FreeChunk>::iterator it = free_chunks_.insert( | |
298 free_chunk); | |
299 address_to_free_chunk_map_.insert(std::make_pair(free_chunk.start, it)); | |
300 // Update the next used contiguous chunk, if any, since its previous chunk | |
301 // may have changed due to free chunks merging/splitting. | |
302 void* const next_used_contiguous_chunk = | |
303 static_cast<char*>(free_chunk.start) + free_chunk.size; | |
304 hash_map<void*, void*>::iterator previous_it = | |
305 used_to_previous_chunk_map_.find(next_used_contiguous_chunk); | |
306 if (previous_it != used_to_previous_chunk_map_.end()) | |
307 previous_it->second = free_chunk.start; | |
308 } | |
309 | |
310 // Finds and removes the free chunk, if any, whose start address is | |
311 // |chunk_start|. Returns a copy of the unlinked free chunk or a free chunk | |
312 // whose content is null if it was not found. | |
313 FreeChunk RemoveFreeChunk_Locked(void* chunk_start) { | |
314 allocator_->lock_.AssertAcquired(); | |
315 const hash_map< | |
316 void*, std::multiset<FreeChunk>::iterator>::iterator it = | |
317 address_to_free_chunk_map_.find(chunk_start); | |
318 if (it == address_to_free_chunk_map_.end()) | |
319 return FreeChunk(NULL, NULL, 0U); | |
320 return RemoveFreeChunkFromIterator_Locked(it->second); | |
321 } | |
322 | |
323 // Same as above but takes an iterator in. | |
324 FreeChunk RemoveFreeChunkFromIterator_Locked( | |
325 std::multiset<FreeChunk>::iterator free_chunk_it) { | |
326 allocator_->lock_.AssertAcquired(); | |
327 if (free_chunk_it == free_chunks_.end()) | |
328 return FreeChunk(NULL, NULL, 0U); | |
329 DCHECK(free_chunk_it != free_chunks_.end()); | |
330 const FreeChunk free_chunk(*free_chunk_it); | |
331 address_to_free_chunk_map_.erase(free_chunk_it->start); | |
332 free_chunks_.erase(free_chunk_it); | |
333 return free_chunk; | |
334 } | |
335 | |
336 const int fd_; | |
337 const size_t size_; | |
338 void* const base_; | |
339 DiscardableMemoryAllocator* const allocator_; | |
340 void* highest_allocated_chunk_; | |
341 // Points to the end of |highest_allocated_chunk_|. | |
342 size_t offset_; | |
343 // Allows free chunks recycling (lookup, insertion and removal) in O(log N). | |
344 // Note that FreeChunk values are indexed by their size and also note that | |
345 // multiple free chunks can have the same size (which is why multiset<> is | |
346 // used instead of e.g. set<>). | |
347 std::multiset<FreeChunk> free_chunks_; | |
348 // Used while merging free contiguous chunks to erase free chunks (from their | |
349 // start address) in constant time. Note that multiset<>::{insert,erase}() | |
350 // don't invalidate iterators (except the one for the element being removed | |
351 // obviously). | |
352 hash_map< | |
353 void*, std::multiset<FreeChunk>::iterator> address_to_free_chunk_map_; | |
354 // Maps the address of *used* chunks to the address of their previous | |
355 // contiguous chunk. | |
356 hash_map<void*, void*> used_to_previous_chunk_map_; | |
357 | |
358 DISALLOW_COPY_AND_ASSIGN(AshmemRegion); | |
359 }; | |
360 | |
361 DiscardableMemoryAllocator::DiscardableAshmemChunk::~DiscardableAshmemChunk() { | |
362 if (locked_) | |
363 internal::UnlockAshmemRegion(fd_, offset_, size_, address_); | |
364 ashmem_region_->OnChunkDeletion(address_, size_); | |
365 } | |
366 | |
367 DiscardableMemoryAllocator::DiscardableMemoryAllocator(const std::string& name) | |
368 : name_(name) { | |
369 } | |
370 | |
371 DiscardableMemoryAllocator::~DiscardableMemoryAllocator() { | |
372 DCHECK(thread_checker_.CalledOnValidThread()); | |
373 DCHECK(ashmem_regions_.empty()); | |
374 } | |
375 | |
376 scoped_ptr<DiscardableMemory> DiscardableMemoryAllocator::Allocate( | |
377 size_t size) { | |
378 const size_t aligned_size = internal::AlignToNextPage(size); | |
379 // TODO(pliard): make this function less naive by e.g. moving the free chunks | |
380 // multiset to the allocator itself in order to decrease even more | |
381 // fragmentation/speedup allocation. Note that there should not be more than a | |
382 // couple (=5) of AshmemRegion instances in practice though. | |
383 AutoLock auto_lock(lock_); | |
384 DCHECK_LE(ashmem_regions_.size(), 5U); | |
385 for (ScopedVector<AshmemRegion>::iterator it = ashmem_regions_.begin(); | |
386 it != ashmem_regions_.end(); ++it) { | |
387 scoped_ptr<DiscardableMemory> memory( | |
388 (*it)->Allocate_Locked(size, aligned_size)); | |
389 if (memory) | |
390 return memory.Pass(); | |
391 } | |
392 scoped_ptr<AshmemRegion> new_region( | |
393 AshmemRegion::Create( | |
394 std::max(static_cast<size_t>(kMinAshmemRegionSize), aligned_size), | |
395 name_.c_str(), this)); | |
396 if (!new_region) { | |
397 // TODO(pliard): consider adding an histogram to see how often this happens. | |
398 return scoped_ptr<DiscardableMemory>(); | |
399 } | |
400 ashmem_regions_.push_back(new_region.release()); | |
401 return ashmem_regions_.back()->Allocate_Locked(size, aligned_size); | |
402 } | |
403 | |
404 void DiscardableMemoryAllocator::DeleteAshmemRegion_Locked( | |
405 AshmemRegion* region) { | |
406 lock_.AssertAcquired(); | |
407 // Note that there should not be more than a couple of ashmem region instances | |
408 // in |ashmem_regions_|. | |
409 DCHECK_LE(ashmem_regions_.size(), 5U); | |
410 const ScopedVector<AshmemRegion>::iterator it = std::find( | |
411 ashmem_regions_.begin(), ashmem_regions_.end(), region); | |
412 DCHECK_NE(ashmem_regions_.end(), it); | |
413 std::swap(*it, ashmem_regions_.back()); | |
414 ashmem_regions_.pop_back(); | |
415 } | |
416 | |
417 } // namespace internal | |
418 } // namespace base | |
OLD | NEW |