OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "base/memory/discardable_memory_allocator_android.h" | |
6 | |
7 #include <algorithm> | |
8 #include <cmath> | |
9 #include <set> | |
10 #include <utility> | |
11 | |
12 #include "base/basictypes.h" | |
13 #include "base/containers/hash_tables.h" | |
14 #include "base/logging.h" | |
15 #include "base/memory/discardable_memory.h" | |
16 #include "base/memory/discardable_memory_android.h" | |
17 #include "base/memory/scoped_vector.h" | |
18 #include "base/strings/stringprintf.h" | |
19 #include "base/synchronization/lock.h" | |
20 #include "base/threading/thread_checker.h" | |
21 | |
22 // The allocator consists of three parts (classes): | |
23 // - DiscardableMemoryAllocator: entry point of all allocations (through its | |
24 // Allocate() method) that are dispatched to the AshmemRegion instances (which | |
25 // it owns). | |
26 // - AshmemRegion: manages allocations and destructions inside a single large | |
27 // (e.g. 32 MBytes) ashmem region. | |
28 // - DiscardableAshmemChunk: class implementing the DiscardableMemory interface | |
29 // whose instances are returned to the client. DiscardableAshmemChunk lets the | |
30 // client seamlessly operate on a subrange of the ashmem region managed by | |
31 // AshmemRegion. | |
32 | |
33 namespace base { | |
34 namespace { | |
35 | |
36 // Allow 8 KBytes of fragmentation inside used chunks. | |
37 const size_t kMaxChunkFragmentationBytes = 8192; | |
38 | |
39 } // namespace | |
40 | |
41 namespace internal { | |
42 | |
43 class DiscardableMemoryAllocator::DiscardableAshmemChunk | |
44 : public DiscardableMemory { | |
45 public: | |
46 // Note that |ashmem_region| must outlive |this|. | |
47 DiscardableAshmemChunk(AshmemRegion* ashmem_region, | |
48 int fd, | |
49 void* address, | |
50 size_t offset, | |
51 size_t size) | |
52 : ashmem_region_(ashmem_region), | |
53 fd_(fd), | |
54 address_(address), | |
55 offset_(offset), | |
56 size_(size), | |
57 locked_(true) { | |
58 } | |
59 | |
60 // Implemented below AshmemRegion since this requires the full definition of | |
61 // AshmemRegion. | |
62 virtual ~DiscardableAshmemChunk(); | |
63 | |
64 // DiscardableMemory: | |
65 virtual LockDiscardableMemoryStatus Lock() OVERRIDE { | |
66 DCHECK(!locked_); | |
67 locked_ = true; | |
68 return internal::LockAshmemRegion(fd_, offset_, size_, address_); | |
69 } | |
70 | |
71 virtual void Unlock() OVERRIDE { | |
72 DCHECK(locked_); | |
73 locked_ = false; | |
74 internal::UnlockAshmemRegion(fd_, offset_, size_, address_); | |
75 } | |
76 | |
77 virtual void* Memory() const OVERRIDE { | |
78 return address_; | |
79 } | |
80 | |
81 private: | |
82 AshmemRegion* const ashmem_region_; | |
83 const int fd_; | |
84 void* const address_; | |
85 const size_t offset_; | |
86 const size_t size_; | |
87 bool locked_; | |
88 | |
89 DISALLOW_COPY_AND_ASSIGN(DiscardableAshmemChunk); | |
90 }; | |
91 | |
92 class DiscardableMemoryAllocator::AshmemRegion { | |
93 public: | |
94 // Note that |allocator| must outlive |this|. | |
95 static scoped_ptr<AshmemRegion> Create( | |
96 size_t size, | |
97 const std::string& name, | |
98 DiscardableMemoryAllocator* allocator) { | |
99 int fd; | |
100 void* base; | |
101 if (!internal::CreateAshmemRegion(name.c_str(), size, &fd, &base)) | |
102 return scoped_ptr<AshmemRegion>(); | |
103 return make_scoped_ptr(new AshmemRegion(fd, size, base, allocator)); | |
104 } | |
105 | |
106 virtual ~AshmemRegion() { | |
107 const bool result = internal::CloseAshmemRegion(fd_, size_, base_); | |
108 DCHECK(result); | |
109 } | |
110 | |
111 // Returns a new instance of DiscardableMemory whose size is greater or equal | |
112 // than |actual_size| (which is expected to be greater or equal than | |
113 // |client_requested_size|). | |
willchan no longer on Chromium
2013/11/27 06:52:07
Can you add a DCHECK to enforce this invariant?
Philippe
2013/11/27 14:22:10
Done.
| |
114 // Allocation works as follows: | |
115 // 1) Recycle a previously freed chunk and return it if it succeeded. See | |
116 // RecycleFreeChunk_Locked() below for more information. | |
117 // 2) If no free chunk could be recycled and the region is not big enough for | |
118 // the requested size then NULL is returned. | |
119 // 3) If there is enough room in the ashmem region then a new chunk is | |
120 // returned. This new chunk starts at |offset_| which is the end of the | |
121 // previously highest chunk in the region. | |
122 scoped_ptr<DiscardableMemory> Allocate_Locked(size_t client_requested_size, | |
123 size_t actual_size) { | |
124 allocator_->lock_.AssertAcquired(); | |
125 scoped_ptr<DiscardableMemory> memory = RecycleFreeChunk_Locked( | |
126 client_requested_size, actual_size); | |
127 if (memory) | |
128 return memory.Pass(); | |
129 if (size_ - offset_ < actual_size) { | |
130 // This region does not have enough space left to hold the requested size. | |
131 return scoped_ptr<DiscardableMemory>(); | |
132 } | |
133 void* const address = static_cast<char*>(base_) + offset_; | |
134 memory.reset( | |
135 new DiscardableAshmemChunk(this, fd_, address, offset_, actual_size)); | |
136 previous_chunk_for_used_chunk_.insert( | |
137 std::make_pair(address, highest_allocated_chunk_)); | |
138 highest_allocated_chunk_ = address; | |
139 offset_ += actual_size; | |
140 return memory.Pass(); | |
141 } | |
142 | |
143 void OnChunkDeletion(void* chunk, size_t size) OVERRIDE { | |
144 base::AutoLock auto_lock(allocator_->lock_); | |
145 MergeAndAddFreeChunk_Locked(chunk, size); | |
146 } | |
147 | |
148 private: | |
149 struct FreeChunk { | |
150 FreeChunk(void* previous_chunk, void* start, size_t size) | |
151 : previous_chunk(previous_chunk), | |
152 start(start), | |
153 size(size) { | |
154 } | |
155 | |
156 void* const previous_chunk; | |
157 void* const start; | |
158 const size_t size; | |
159 | |
160 bool is_null() const { return !start; } | |
161 | |
162 bool operator<(const FreeChunk& other) const { | |
163 return size < other.size; | |
164 } | |
165 }; | |
166 | |
167 AshmemRegion(int fd, | |
168 size_t size, | |
169 void* base, | |
170 DiscardableMemoryAllocator* allocator) | |
171 : fd_(fd), | |
172 size_(size), | |
173 base_(base), | |
174 offset_(0), | |
175 allocator_(allocator), | |
176 highest_allocated_chunk_(NULL) { | |
177 } | |
178 | |
179 // Tries to reuse a previously freed chunk by doing a closest size match. | |
180 scoped_ptr<DiscardableMemory> RecycleFreeChunk_Locked( | |
willchan no longer on Chromium
2013/11/27 06:52:07
Nit: WDYT about s/Recycle/Reuse/? My mental model
Philippe
2013/11/27 14:22:10
Done. Egor wasn't very comfortable either with the
| |
181 size_t client_requested_size, | |
182 size_t actual_size) { | |
183 allocator_->lock_.AssertAcquired(); | |
184 const std::multiset<FreeChunk>::iterator chunk_it = | |
185 free_chunks_.lower_bound(FreeChunk(NULL, NULL, actual_size)); | |
186 if (chunk_it == free_chunks_.end()) | |
187 return scoped_ptr<DiscardableMemory>(); | |
188 size_t recycled_chunk_size = chunk_it->size; | |
189 const size_t fragmentation_bytes = chunk_it->size - client_requested_size; | |
190 if (fragmentation_bytes >= kMaxChunkFragmentationBytes) { | |
191 SplitFreeChunk_Locked(*chunk_it, actual_size); | |
192 recycled_chunk_size = actual_size; | |
193 } | |
194 const size_t offset = | |
195 static_cast<char*>(chunk_it->start) - static_cast<char*>(base_); | |
196 internal::LockAshmemRegion( | |
197 fd_, offset, recycled_chunk_size, chunk_it->start); | |
198 scoped_ptr<DiscardableMemory> memory( | |
199 new DiscardableAshmemChunk(this, fd_, chunk_it->start, offset, | |
200 recycled_chunk_size)); | |
201 previous_chunk_for_used_chunk_.insert( | |
202 std::make_pair(chunk_it->start, chunk_it->previous_chunk)); | |
willchan no longer on Chromium
2013/11/27 06:52:07
I'm trying to understand this. It looks like the p
Philippe
2013/11/27 14:22:10
I added your unit test (please make sure that it f
willchan no longer on Chromium
2013/11/28 06:16:37
I think I was too jetlagged last night and wrong :
| |
203 free_chunk_for_address_.erase(chunk_it->start); | |
204 free_chunks_.erase(chunk_it); | |
205 return memory.Pass(); | |
206 } | |
207 | |
208 // Splits the free chunk being recycled so that its unused tail doesn't get | |
209 // recycled (i.e. locked) which would prevent it from being evicted under | |
210 // memory pressure. | |
211 void SplitFreeChunk_Locked(const FreeChunk& free_chunk, | |
212 size_t allocation_size) { | |
213 allocator_->lock_.AssertAcquired(); | |
214 void* const previous_chunk = free_chunk.start; | |
215 void* const new_chunk_start = | |
216 static_cast<char*>(free_chunk.start) + allocation_size; | |
217 const size_t new_chunk_size = free_chunk.size - allocation_size; | |
218 // Note that merging is not needed here since there can't be contiguous | |
219 // free chunks at this point. | |
220 AddFreeChunk_Locked( | |
221 FreeChunk(previous_chunk, new_chunk_start, new_chunk_size)); | |
222 // Update the next used contiguous chunk, if any, since its previous chunk | |
223 // is no longer |free_chunk.start|. | |
224 void* const next_used_contiguous_chunk = | |
225 static_cast<char*>(free_chunk.start) + free_chunk.size; | |
226 base::hash_map<void*, void*>::iterator previous_it = | |
227 previous_chunk_for_used_chunk_.find(next_used_contiguous_chunk); | |
228 if (previous_it != previous_chunk_for_used_chunk_.end()) | |
229 previous_it->second = new_chunk_start; | |
230 } | |
231 | |
232 // Makes the chunk identified with the provided arguments free and possibly | |
233 // merges this chunk with the previous and next contiguous ones. | |
234 // If the provided chunk is the only one used (and going to be freed) in the | |
235 // region then the internal ashmem region is closed so that the underlying | |
236 // physical pages are immediately released. | |
237 // Note that free chunks are unlocked therefore they can be reclaimed by the | |
238 // kernel if needed (under memory pressure) but they are not immediately | |
239 // released unfortunately since madvise(MADV_REMOVE) and | |
240 // fallocate(FALLOC_FL_PUNCH_HOLE) don't seem to work on ashmem. This might | |
241 // change in versions of kernel >=3.5 though. The fact that free chunks are | |
242 // not immediately released is the reason why we are trying to minimize | |
243 // fragmentation in order not to cause "artificial" memory pressure. | |
244 void MergeAndAddFreeChunk_Locked(void* chunk, size_t size) { | |
245 allocator_->lock_.AssertAcquired(); | |
246 size_t new_free_chunk_size = size; | |
247 // Merge with the previous chunks. | |
248 void* first_free_chunk = chunk; | |
249 const base::hash_map<void*, void*>::iterator previous_chunk_it = | |
250 previous_chunk_for_used_chunk_.find(chunk); | |
251 DCHECK(previous_chunk_it != previous_chunk_for_used_chunk_.end()); | |
252 void* previous_chunk = previous_chunk_it->second; | |
253 previous_chunk_for_used_chunk_.erase(previous_chunk_it); | |
254 while (previous_chunk) { | |
255 const FreeChunk free_chunk = RemoveFreeChunk_Locked(previous_chunk); | |
256 if (free_chunk.is_null()) | |
257 break; | |
258 new_free_chunk_size += free_chunk.size; | |
259 first_free_chunk = previous_chunk; | |
260 previous_chunk = free_chunk.previous_chunk; | |
261 } | |
262 // Merge with the next chunks. | |
263 void* next_chunk = static_cast<char*>(chunk) + size; | |
264 while (true) { | |
265 const FreeChunk free_chunk = RemoveFreeChunk_Locked(next_chunk); | |
266 if (free_chunk.is_null()) | |
267 break; | |
268 new_free_chunk_size += free_chunk.size; | |
269 next_chunk = static_cast<char*>(next_chunk) + free_chunk.size; | |
270 } | |
271 const bool whole_ashmem_region_is_free = new_free_chunk_size == size_; | |
272 if (!whole_ashmem_region_is_free) { | |
273 AddFreeChunk_Locked( | |
274 FreeChunk(previous_chunk, first_free_chunk, new_free_chunk_size)); | |
275 return; | |
276 } | |
277 // The whole ashmem region is free thus it can be deleted. | |
278 DCHECK_EQ(size_, new_free_chunk_size); | |
279 DCHECK(free_chunks_.empty() && free_chunk_for_address_.empty()); | |
280 allocator_->DeleteAshmemRegion_Locked(this); | |
281 } | |
282 | |
283 void AddFreeChunk_Locked(const FreeChunk& free_chunk) { | |
284 allocator_->lock_.AssertAcquired(); | |
285 const std::multiset<FreeChunk>::iterator it = free_chunks_.insert( | |
286 free_chunk); | |
287 free_chunk_for_address_.insert(std::make_pair(free_chunk.start, it)); | |
288 } | |
289 | |
290 // Finds and removes the free chunk, if any, whose start address is | |
291 // |chunk_start|. Returns a copy of the unlinked free chunk or a free chunk | |
292 // whose content is null if it was not found. | |
293 FreeChunk RemoveFreeChunk_Locked(void* chunk_start) { | |
294 allocator_->lock_.AssertAcquired(); | |
295 const base::hash_map< | |
296 void*, std::multiset<FreeChunk>::iterator>::iterator it = | |
297 free_chunk_for_address_.find(chunk_start); | |
298 if (it == free_chunk_for_address_.end()) | |
299 return FreeChunk(NULL, NULL, 0U); | |
300 const std::multiset<FreeChunk>::iterator free_chunk_it = it->second; | |
301 const FreeChunk free_chunk(*free_chunk_it); | |
302 DCHECK_EQ(chunk_start, free_chunk.start); | |
303 free_chunk_for_address_.erase(it); | |
304 free_chunks_.erase(free_chunk_it); | |
305 return free_chunk; | |
306 } | |
307 | |
308 const int fd_; | |
309 const size_t size_; | |
310 void* const base_; | |
311 size_t offset_; | |
312 DiscardableMemoryAllocator* const allocator_; | |
313 void* highest_allocated_chunk_; | |
314 // Allows free chunks recycling (lookup, insertion and removal) in O(log N). | |
315 // Note that FreeChunk values are indexed by their size and also note that | |
316 // multiple free chunks can have the same size (which is why multiset<> is | |
317 // used instead of e.g. set<>). | |
318 std::multiset<FreeChunk> free_chunks_; | |
319 // Used while merging free contiguous chunks to erase free chunks (from their | |
320 // start address) in constant time. Note that multiset<>::{insert,erase}() | |
321 // don't invalidate iterators (except the one for the element being removed | |
322 // obviously). | |
323 base::hash_map< | |
324 void*, std::multiset<FreeChunk>::iterator> free_chunk_for_address_; | |
325 // Maps the address of *used* chunks to the address of their previous | |
326 // contiguous chunk. | |
327 base::hash_map<void*, void*> previous_chunk_for_used_chunk_; | |
328 | |
329 DISALLOW_COPY_AND_ASSIGN(AshmemRegion); | |
330 }; | |
331 | |
332 DiscardableMemoryAllocator::DiscardableAshmemChunk::~DiscardableAshmemChunk() { | |
333 if (locked_) | |
334 internal::UnlockAshmemRegion(fd_, offset_, size_, address_); | |
335 ashmem_region_->OnChunkDeletion(address_, size_); | |
336 } | |
337 | |
338 DiscardableMemoryAllocator::DiscardableMemoryAllocator(const std::string& name) | |
339 : name_(name) { | |
340 } | |
341 | |
342 DiscardableMemoryAllocator::~DiscardableMemoryAllocator() { | |
343 DCHECK(thread_checker_.CalledOnValidThread()); | |
344 } | |
345 | |
346 scoped_ptr<DiscardableMemory> DiscardableMemoryAllocator::Allocate( | |
347 size_t size) { | |
348 // TODO(pliard): make this function less naive by e.g. moving the free chunks | |
349 // multiset to the allocator itself in order to decrease even more | |
350 // fragmentation/speedup allocation. Note that there should not be more than a | |
351 // couple of AshmemRegion instances in practice though. | |
352 const size_t aligned_size = internal::AlignToNextPage(size); | |
353 base::AutoLock auto_lock(lock_); | |
354 for (ScopedVector<AshmemRegion>::iterator it = ashmem_regions_.begin(); | |
355 it != ashmem_regions_.end(); ++it) { | |
356 scoped_ptr<DiscardableMemory> memory( | |
357 (*it)->Allocate_Locked(size, aligned_size)); | |
358 if (memory) | |
359 return memory.Pass(); | |
360 } | |
361 scoped_ptr<AshmemRegion> new_region( | |
362 AshmemRegion::Create( | |
363 std::max(static_cast<size_t>(kMinAshmemRegionSize), aligned_size), | |
364 name_.c_str(), this)); | |
365 if (!new_region) { | |
366 // TODO(pliard): consider adding an histogram to see how often this happens. | |
367 return scoped_ptr<DiscardableMemory>(); | |
368 } | |
369 ashmem_regions_.push_back(new_region.release()); | |
370 return ashmem_regions_.back()->Allocate_Locked(size, aligned_size); | |
371 } | |
372 | |
373 void DiscardableMemoryAllocator::DeleteAshmemRegion_Locked( | |
374 AshmemRegion* region) { | |
375 lock_.AssertAcquired(); | |
376 // Note that there should not be more than a couple of ashmem region instances | |
377 // in |ashmem_regions_|. | |
378 const ScopedVector<AshmemRegion>::iterator it = std::find( | |
379 ashmem_regions_.begin(), ashmem_regions_.end(), region); | |
380 DCHECK_NE(ashmem_regions_.end(), it); | |
381 std::swap(*it, ashmem_regions_.back()); | |
382 ashmem_regions_.resize(ashmem_regions_.size() - 1); | |
383 } | |
384 | |
385 } // namespace internal | |
386 } // namespace base | |
OLD | NEW |