OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "base/memory/discardable_memory_allocator_android.h" | |
6 | |
7 #include <algorithm> | |
8 #include <cmath> | |
9 #include <set> | |
10 #include <utility> | |
11 | |
12 #include "base/basictypes.h" | |
13 #include "base/containers/hash_tables.h" | |
14 #include "base/logging.h" | |
15 #include "base/memory/discardable_memory.h" | |
16 #include "base/memory/discardable_memory_android.h" | |
17 #include "base/memory/scoped_vector.h" | |
18 #include "base/strings/stringprintf.h" | |
19 #include "base/synchronization/lock.h" | |
20 #include "base/threading/thread_checker.h" | |
21 | |
22 // The allocator consists of three parts (classes): | |
23 // - DiscardableMemoryAllocator: entry point of all allocations (through its | |
24 // Allocate() method) that are dispatched to the AshmemRegion instances (which | |
25 // it owns). | |
26 // - AshmemRegion: manages allocations and destructions inside a single large | |
27 // (e.g. 32 MBytes) ashmem region. | |
28 // - DiscardableAshmemChunk: class implementing the DiscardableMemory interface | |
29 // whose instances are returned to the client. DiscardableAshmemChunk lets the | |
30 // client seamlessly operate on a subrange of the ashmem region managed by | |
31 // AshmemRegion. | |
32 | |
33 namespace base { | |
34 namespace { | |
35 | |
36 // Allow 8 KBytes of fragmentation inside used chunks. | |
37 const size_t kMaxChunkFragmentationBytes = 8192; | |
38 | |
39 } // namespace | |
40 | |
41 namespace internal { | |
42 | |
43 class DiscardableMemoryAllocator::DiscardableAshmemChunk | |
44 : public DiscardableMemory { | |
45 public: | |
46 // Note that |ashmem_region| must outlive |this|. | |
47 DiscardableAshmemChunk(AshmemRegion* ashmem_region, | |
48 int fd, | |
49 void* address, | |
50 size_t offset, | |
51 size_t size) | |
52 : ashmem_region_(ashmem_region), | |
53 fd_(fd), | |
54 address_(address), | |
55 offset_(offset), | |
56 size_(size), | |
57 locked_(true) { | |
58 } | |
59 | |
60 // Implemented below AshmemRegion since this requires the full definition of | |
61 // AshmemRegion. | |
62 virtual ~DiscardableAshmemChunk(); | |
63 | |
64 // DiscardableMemory: | |
65 virtual LockDiscardableMemoryStatus Lock() OVERRIDE { | |
66 DCHECK(!locked_); | |
67 locked_ = true; | |
68 return internal::LockAshmemRegion(fd_, offset_, size_, address_); | |
69 } | |
70 | |
71 virtual void Unlock() OVERRIDE { | |
72 DCHECK(locked_); | |
73 locked_ = false; | |
74 internal::UnlockAshmemRegion(fd_, offset_, size_, address_); | |
75 } | |
76 | |
77 virtual void* Memory() const OVERRIDE { | |
78 return address_; | |
79 } | |
80 | |
81 private: | |
82 AshmemRegion* const ashmem_region_; | |
83 const int fd_; | |
84 void* const address_; | |
85 const size_t offset_; | |
86 const size_t size_; | |
87 bool locked_; | |
88 | |
89 DISALLOW_COPY_AND_ASSIGN(DiscardableAshmemChunk); | |
90 }; | |
91 | |
92 class DiscardableMemoryAllocator::AshmemRegion { | |
93 public: | |
94 // Note that |allocator| must outlive |this|. | |
95 static scoped_ptr<AshmemRegion> Create( | |
96 size_t size, | |
97 const std::string& name, | |
98 DiscardableMemoryAllocator* allocator) { | |
99 int fd; | |
100 void* base; | |
101 if (!internal::CreateAshmemRegion(name.c_str(), size, &fd, &base)) | |
102 return scoped_ptr<AshmemRegion>(); | |
103 return make_scoped_ptr(new AshmemRegion(fd, size, base, allocator)); | |
104 } | |
105 | |
106 virtual ~AshmemRegion() { | |
107 const bool result = internal::CloseAshmemRegion(fd_, size_, base_); | |
108 DCHECK(result); | |
109 } | |
110 | |
111 // Returns a new instance of DiscardableMemory whose size is greater or equal | |
112 // than |actual_size| (which is expected to be greater or equal than | |
113 // |client_requested_size|). | |
114 // Allocation works as follows: | |
115 // 1) Reuse a previously freed chunk and return it if it succeeded. See | |
116 // ReuseFreeChunk_Locked() below for more information. | |
117 // 2) If no free chunk could be reused and the region is not big enough for | |
118 // the requested size then NULL is returned. | |
119 // 3) If there is enough room in the ashmem region then a new chunk is | |
120 // returned. This new chunk starts at |offset_| which is the end of the | |
121 // previously highest chunk in the region. | |
122 scoped_ptr<DiscardableMemory> Allocate_Locked(size_t client_requested_size, | |
123 size_t actual_size) { | |
124 DCHECK_LE(client_requested_size, actual_size); | |
125 allocator_->lock_.AssertAcquired(); | |
126 scoped_ptr<DiscardableMemory> memory = ReuseFreeChunk_Locked( | |
127 client_requested_size, actual_size); | |
128 if (memory) | |
129 return memory.Pass(); | |
130 if (size_ - offset_ < actual_size) { | |
131 // This region does not have enough space left to hold the requested size. | |
132 return scoped_ptr<DiscardableMemory>(); | |
133 } | |
134 void* const address = static_cast<char*>(base_) + offset_; | |
135 memory.reset( | |
136 new DiscardableAshmemChunk(this, fd_, address, offset_, actual_size)); | |
137 previous_chunk_for_used_chunk_.insert( | |
willchan no longer on Chromium
2013/11/28 06:16:37
Nit: I don't know if my suggestion will be any bet
Philippe
2013/11/28 16:42:53
Yeah, I also renamed |free_chunk_for_address_| acc
| |
138 std::make_pair(address, highest_allocated_chunk_)); | |
139 highest_allocated_chunk_ = address; | |
140 offset_ += actual_size; | |
141 return memory.Pass(); | |
142 } | |
143 | |
144 void OnChunkDeletion(void* chunk, size_t size) OVERRIDE { | |
145 base::AutoLock auto_lock(allocator_->lock_); | |
146 MergeAndAddFreeChunk_Locked(chunk, size); | |
147 } | |
148 | |
149 private: | |
150 struct FreeChunk { | |
151 FreeChunk(void* previous_chunk, void* start, size_t size) | |
152 : previous_chunk(previous_chunk), | |
153 start(start), | |
154 size(size) { | |
155 } | |
156 | |
157 void* const previous_chunk; | |
158 void* const start; | |
159 const size_t size; | |
160 | |
161 bool is_null() const { return !start; } | |
162 | |
163 bool operator<(const FreeChunk& other) const { | |
164 return size < other.size; | |
165 } | |
166 }; | |
167 | |
168 AshmemRegion(int fd, | |
169 size_t size, | |
170 void* base, | |
171 DiscardableMemoryAllocator* allocator) | |
172 : fd_(fd), | |
173 size_(size), | |
174 base_(base), | |
175 offset_(0), | |
176 allocator_(allocator), | |
177 highest_allocated_chunk_(NULL) { | |
willchan no longer on Chromium
2013/11/28 06:16:37
Can you DCHECK the parameters and comment that |al
Philippe
2013/11/28 16:42:53
Sure. One of the good points about immutability is
| |
178 } | |
179 | |
180 // Tries to reuse a previously freed chunk by doing a closest size match. | |
181 scoped_ptr<DiscardableMemory> ReuseFreeChunk_Locked( | |
182 size_t client_requested_size, | |
183 size_t actual_size) { | |
184 allocator_->lock_.AssertAcquired(); | |
185 const std::multiset<FreeChunk>::iterator chunk_it = | |
186 free_chunks_.lower_bound(FreeChunk(NULL, NULL, actual_size)); | |
187 if (chunk_it == free_chunks_.end()) | |
188 return scoped_ptr<DiscardableMemory>(); | |
189 size_t reused_chunk_size = chunk_it->size; | |
190 const size_t fragmentation_bytes = chunk_it->size - client_requested_size; | |
willchan no longer on Chromium
2013/11/28 06:16:37
I'm trying to understand why you're subtracting th
Philippe
2013/11/28 16:42:53
I might be missing something here but my initial i
willchan no longer on Chromium
2013/11/28 17:44:19
I agree that the current calculation is more accur
Philippe
2013/11/29 12:41:05
Right, I understand now your point of view. I will
| |
191 if (fragmentation_bytes >= kMaxChunkFragmentationBytes) { | |
192 SplitFreeChunk_Locked(*chunk_it, actual_size); | |
willchan no longer on Chromium
2013/11/28 06:16:37
OK, I thought about it. Keep it if you prefer, but
Philippe
2013/11/28 16:42:53
Agreed. I had externalized free chunk splitting to
| |
193 reused_chunk_size = actual_size; | |
194 } | |
195 const size_t offset = | |
196 static_cast<char*>(chunk_it->start) - static_cast<char*>(base_); | |
197 internal::LockAshmemRegion( | |
198 fd_, offset, reused_chunk_size, chunk_it->start); | |
199 scoped_ptr<DiscardableMemory> memory( | |
200 new DiscardableAshmemChunk(this, fd_, chunk_it->start, offset, | |
201 reused_chunk_size)); | |
202 previous_chunk_for_used_chunk_.insert( | |
203 std::make_pair(chunk_it->start, chunk_it->previous_chunk)); | |
204 free_chunk_for_address_.erase(chunk_it->start); | |
205 free_chunks_.erase(chunk_it); | |
206 return memory.Pass(); | |
207 } | |
208 | |
209 // Splits the free chunk being reused so that its unused tail doesn't get | |
210 // reused (i.e. locked) which would prevent it from being evicted under memory | |
211 // pressure. | |
212 void SplitFreeChunk_Locked(const FreeChunk& free_chunk, | |
213 size_t allocation_size) { | |
214 allocator_->lock_.AssertAcquired(); | |
215 void* const previous_chunk = free_chunk.start; | |
216 void* const new_chunk_start = | |
217 static_cast<char*>(free_chunk.start) + allocation_size; | |
218 const size_t new_chunk_size = free_chunk.size - allocation_size; | |
willchan no longer on Chromium
2013/11/28 06:16:37
Please add a DCHECK_GT(free_chunk.size, allocation
Philippe
2013/11/28 16:42:53
Yeah good point.
| |
219 // Note that merging is not needed here since there can't be contiguous | |
220 // free chunks at this point. | |
221 AddFreeChunk_Locked( | |
222 FreeChunk(previous_chunk, new_chunk_start, new_chunk_size)); | |
223 // Update the next used contiguous chunk, if any, since its previous chunk | |
224 // is no longer |free_chunk.start|. | |
225 void* const next_used_contiguous_chunk = | |
226 static_cast<char*>(free_chunk.start) + free_chunk.size; | |
227 base::hash_map<void*, void*>::iterator previous_it = | |
228 previous_chunk_for_used_chunk_.find(next_used_contiguous_chunk); | |
229 if (previous_it != previous_chunk_for_used_chunk_.end()) | |
230 previous_it->second = new_chunk_start; | |
231 } | |
232 | |
233 // Makes the chunk identified with the provided arguments free and possibly | |
234 // merges this chunk with the previous and next contiguous ones. | |
235 // If the provided chunk is the only one used (and going to be freed) in the | |
236 // region then the internal ashmem region is closed so that the underlying | |
237 // physical pages are immediately released. | |
238 // Note that free chunks are unlocked therefore they can be reclaimed by the | |
239 // kernel if needed (under memory pressure) but they are not immediately | |
240 // released unfortunately since madvise(MADV_REMOVE) and | |
241 // fallocate(FALLOC_FL_PUNCH_HOLE) don't seem to work on ashmem. This might | |
242 // change in versions of kernel >=3.5 though. The fact that free chunks are | |
243 // not immediately released is the reason why we are trying to minimize | |
244 // fragmentation in order not to cause "artificial" memory pressure. | |
245 void MergeAndAddFreeChunk_Locked(void* chunk, size_t size) { | |
246 allocator_->lock_.AssertAcquired(); | |
247 size_t new_free_chunk_size = size; | |
248 // Merge with the previous chunks. | |
249 void* first_free_chunk = chunk; | |
250 const base::hash_map<void*, void*>::iterator previous_chunk_it = | |
251 previous_chunk_for_used_chunk_.find(chunk); | |
252 DCHECK(previous_chunk_it != previous_chunk_for_used_chunk_.end()); | |
253 void* previous_chunk = previous_chunk_it->second; | |
254 previous_chunk_for_used_chunk_.erase(previous_chunk_it); | |
255 while (previous_chunk) { | |
willchan no longer on Chromium
2013/11/28 06:16:37
I don't understand this while loop. There shouldn'
Philippe
2013/11/28 16:42:53
Yeah, you're right. I added a DCHECK though :)
| |
256 const FreeChunk free_chunk = RemoveFreeChunk_Locked(previous_chunk); | |
257 if (free_chunk.is_null()) | |
258 break; | |
259 new_free_chunk_size += free_chunk.size; | |
260 first_free_chunk = previous_chunk; | |
261 previous_chunk = free_chunk.previous_chunk; | |
262 } | |
263 // Merge with the next chunks. | |
264 void* next_chunk = static_cast<char*>(chunk) + size; | |
265 while (true) { | |
willchan no longer on Chromium
2013/11/28 06:16:37
Ditto, unsure if the while loop is necessary.
Philippe
2013/11/28 16:42:53
Done.
| |
266 const FreeChunk free_chunk = RemoveFreeChunk_Locked(next_chunk); | |
267 if (free_chunk.is_null()) | |
268 break; | |
269 new_free_chunk_size += free_chunk.size; | |
270 next_chunk = static_cast<char*>(next_chunk) + free_chunk.size; | |
271 } | |
272 const bool whole_ashmem_region_is_free = new_free_chunk_size == size_; | |
273 if (!whole_ashmem_region_is_free) { | |
274 AddFreeChunk_Locked( | |
275 FreeChunk(previous_chunk, first_free_chunk, new_free_chunk_size)); | |
276 return; | |
277 } | |
278 // The whole ashmem region is free thus it can be deleted. | |
279 DCHECK_EQ(size_, new_free_chunk_size); | |
willchan no longer on Chromium
2013/11/28 06:16:37
Unneeded, this was just checked above. It'd be goo
Philippe
2013/11/28 16:42:53
Yeah, good point. The DCHECK you suggested actuall
| |
280 DCHECK(free_chunks_.empty() && free_chunk_for_address_.empty()); | |
willchan no longer on Chromium
2013/11/28 06:16:37
previous_chunk_for_used_chunk_ should be empty too
Philippe
2013/11/28 16:42:53
Yes, I had forgotten to update this code when I ad
| |
281 allocator_->DeleteAshmemRegion_Locked(this); | |
282 } | |
283 | |
284 void AddFreeChunk_Locked(const FreeChunk& free_chunk) { | |
285 allocator_->lock_.AssertAcquired(); | |
286 const std::multiset<FreeChunk>::iterator it = free_chunks_.insert( | |
287 free_chunk); | |
288 free_chunk_for_address_.insert(std::make_pair(free_chunk.start, it)); | |
289 } | |
290 | |
291 // Finds and removes the free chunk, if any, whose start address is | |
292 // |chunk_start|. Returns a copy of the unlinked free chunk or a free chunk | |
293 // whose content is null if it was not found. | |
294 FreeChunk RemoveFreeChunk_Locked(void* chunk_start) { | |
295 allocator_->lock_.AssertAcquired(); | |
296 const base::hash_map< | |
297 void*, std::multiset<FreeChunk>::iterator>::iterator it = | |
298 free_chunk_for_address_.find(chunk_start); | |
299 if (it == free_chunk_for_address_.end()) | |
300 return FreeChunk(NULL, NULL, 0U); | |
301 const std::multiset<FreeChunk>::iterator free_chunk_it = it->second; | |
302 const FreeChunk free_chunk(*free_chunk_it); | |
303 DCHECK_EQ(chunk_start, free_chunk.start); | |
304 free_chunk_for_address_.erase(it); | |
305 free_chunks_.erase(free_chunk_it); | |
306 return free_chunk; | |
307 } | |
308 | |
309 const int fd_; | |
310 const size_t size_; | |
311 void* const base_; | |
312 size_t offset_; | |
willchan no longer on Chromium
2013/11/28 06:16:37
Can you move this next to highest_allocated_chunk_
Philippe
2013/11/28 16:42:53
Done.
| |
313 DiscardableMemoryAllocator* const allocator_; | |
314 void* highest_allocated_chunk_; | |
315 // Allows free chunks recycling (lookup, insertion and removal) in O(log N). | |
316 // Note that FreeChunk values are indexed by their size and also note that | |
317 // multiple free chunks can have the same size (which is why multiset<> is | |
318 // used instead of e.g. set<>). | |
319 std::multiset<FreeChunk> free_chunks_; | |
320 // Used while merging free contiguous chunks to erase free chunks (from their | |
321 // start address) in constant time. Note that multiset<>::{insert,erase}() | |
322 // don't invalidate iterators (except the one for the element being removed | |
323 // obviously). | |
324 base::hash_map< | |
325 void*, std::multiset<FreeChunk>::iterator> free_chunk_for_address_; | |
326 // Maps the address of *used* chunks to the address of their previous | |
327 // contiguous chunk. | |
328 base::hash_map<void*, void*> previous_chunk_for_used_chunk_; | |
329 | |
330 DISALLOW_COPY_AND_ASSIGN(AshmemRegion); | |
331 }; | |
332 | |
333 DiscardableMemoryAllocator::DiscardableAshmemChunk::~DiscardableAshmemChunk() { | |
334 if (locked_) | |
335 internal::UnlockAshmemRegion(fd_, offset_, size_, address_); | |
336 ashmem_region_->OnChunkDeletion(address_, size_); | |
337 } | |
338 | |
339 DiscardableMemoryAllocator::DiscardableMemoryAllocator(const std::string& name) | |
340 : name_(name) { | |
341 } | |
342 | |
343 DiscardableMemoryAllocator::~DiscardableMemoryAllocator() { | |
344 DCHECK(thread_checker_.CalledOnValidThread()); | |
willchan no longer on Chromium
2013/11/28 06:16:37
I think it'd be good to DCHECK(ashmem_regions_.emp
Philippe
2013/11/28 16:42:53
Great idea! This might also catch uses after free
| |
345 } | |
346 | |
347 scoped_ptr<DiscardableMemory> DiscardableMemoryAllocator::Allocate( | |
348 size_t size) { | |
349 // TODO(pliard): make this function less naive by e.g. moving the free chunks | |
350 // multiset to the allocator itself in order to decrease even more | |
351 // fragmentation/speedup allocation. Note that there should not be more than a | |
352 // couple of AshmemRegion instances in practice though. | |
willchan no longer on Chromium
2013/11/28 06:16:37
Can we throw up a DCHECK() for this, so people hit
Philippe
2013/11/28 16:42:53
Good idea!
| |
353 const size_t aligned_size = internal::AlignToNextPage(size); | |
354 base::AutoLock auto_lock(lock_); | |
355 for (ScopedVector<AshmemRegion>::iterator it = ashmem_regions_.begin(); | |
356 it != ashmem_regions_.end(); ++it) { | |
357 scoped_ptr<DiscardableMemory> memory( | |
358 (*it)->Allocate_Locked(size, aligned_size)); | |
359 if (memory) | |
360 return memory.Pass(); | |
361 } | |
362 scoped_ptr<AshmemRegion> new_region( | |
363 AshmemRegion::Create( | |
364 std::max(static_cast<size_t>(kMinAshmemRegionSize), aligned_size), | |
365 name_.c_str(), this)); | |
366 if (!new_region) { | |
367 // TODO(pliard): consider adding an histogram to see how often this happens. | |
368 return scoped_ptr<DiscardableMemory>(); | |
369 } | |
370 ashmem_regions_.push_back(new_region.release()); | |
371 return ashmem_regions_.back()->Allocate_Locked(size, aligned_size); | |
372 } | |
373 | |
374 void DiscardableMemoryAllocator::DeleteAshmemRegion_Locked( | |
375 AshmemRegion* region) { | |
376 lock_.AssertAcquired(); | |
377 // Note that there should not be more than a couple of ashmem region instances | |
378 // in |ashmem_regions_|. | |
379 const ScopedVector<AshmemRegion>::iterator it = std::find( | |
380 ashmem_regions_.begin(), ashmem_regions_.end(), region); | |
381 DCHECK_NE(ashmem_regions_.end(), it); | |
382 std::swap(*it, ashmem_regions_.back()); | |
383 ashmem_regions_.resize(ashmem_regions_.size() - 1); | |
willchan no longer on Chromium
2013/11/28 06:16:37
why not pop_back() instead?
Philippe
2013/11/28 16:42:53
Yeah good point :) I added ScopedVector::pop_back(
| |
384 } | |
385 | |
386 } // namespace internal | |
387 } // namespace base | |
OLD | NEW |