Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(192)

Side by Side Diff: base/memory/discardable_memory_allocator_android.cc

Issue 25293002: Add DiscardableMemoryAllocator to work around FD limit issue. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Address reviewers' comments Created 7 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/memory/discardable_memory_allocator.h"
6
7 #include <cmath>
8 #include <set>
9 #include <utility>
10
11 #include "base/basictypes.h"
12 #include "base/compiler_specific.h"
13 #include "base/containers/hash_tables.h"
14 #include "base/logging.h"
15 #include "base/memory/discardable_memory.h"
16 #include "base/memory/discardable_memory_android.h"
17 #include "base/memory/scoped_vector.h"
18 #include "base/strings/stringprintf.h"
19 #include "base/synchronization/lock.h"
20 #include "base/threading/thread_checker.h"
21
22 namespace base {
23 namespace {
24
25 const size_t kDefaultAshmemRegionSize = 32 * 1024 * 1024;
26 const int kInvalidFD = -1;
27
28 class DiscardableMemoryChunk : public DiscardableMemory {
pasko 2013/10/22 20:14:29 A top-level comment would be good since it is not
Philippe 2013/10/23 11:46:56 I added a small top-level comment introducing very
29 public:
30 struct DeletionObserver {
31 virtual void OnChunkDeletion(int fd,
32 void* previous_chunk,
33 void* chunk,
34 size_t size) = 0;
35
36 protected:
37 ~DeletionObserver() {}
38 };
39
40 DiscardableMemoryChunk(DeletionObserver* deletion_observer,
41 int fd,
42 void* previous_chunk,
43 void* address,
44 size_t offset,
45 size_t size)
46 : deletion_observer_(deletion_observer),
47 fd_(fd),
48 previous_chunk_(previous_chunk),
49 address_(address),
50 offset_(offset),
51 size_(size),
52 locked_(true) {
53 }
54
55 virtual ~DiscardableMemoryChunk() {
56 if (locked_)
57 internal::UnlockAshmemRegion(fd_, offset_, size_, address_);
58 deletion_observer_->OnChunkDeletion(fd_, previous_chunk_, address_, size_);
59 }
60
61 // DiscardableMemory:
62 virtual size_t Size() const OVERRIDE {
63 return size_;
64 }
65
66 virtual LockDiscardableMemoryStatus Lock() OVERRIDE {
67 DCHECK(!locked_);
68 locked_ = true;
69 return internal::LockAshmemRegion(fd_, offset_, size_, address_);
70 }
71
72 virtual void Unlock() OVERRIDE {
73 DCHECK(locked_);
74 locked_ = false;
75 internal::UnlockAshmemRegion(fd_, offset_, size_, address_);
76 }
77
78 virtual void* Memory() const OVERRIDE {
79 return address_;
80 }
81
82 private:
83 DeletionObserver* const deletion_observer_;
84 const int fd_;
85 void* const previous_chunk_;
86 void* const address_;
87 const size_t offset_;
88 const size_t size_;
89 bool locked_;
90
91 DISALLOW_COPY_AND_ASSIGN(DiscardableMemoryChunk);
92 };
93
94 class AshmemRegion : public DiscardableMemoryChunk::DeletionObserver {
95 public:
96 AshmemRegion(size_t size, const std::string& name)
97 : fd_(kInvalidFD),
98 base_(NULL),
99 size_(size),
100 offset_(0),
101 name_(name),
102 last_allocated_chunk_(NULL) {
103 }
104
105 ~AshmemRegion() {
106 DCHECK(thread_checker_.CalledOnValidThread());
107 base::AutoLock auto_lock(lock_);
108 if (!AshmemRegionClosed())
109 CloseAshmemRegion();
110 }
111
112 scoped_ptr<DiscardableMemory> Allocate(size_t client_requested_size,
113 size_t actual_size) {
114 base::AutoLock auto_lock(lock_);
115 if (AshmemRegionClosed())
116 if (!OpenAshmemRegion())
pasko 2013/10/22 20:14:29 did you mean: if (AshmemRegionClosed() && !OpenAsh
Philippe 2013/10/23 11:46:56 Yeah, it's the same, right? :)
117 return scoped_ptr<DiscardableMemory>();
118 scoped_ptr<DiscardableMemory> memory = RecycleFreeChunk(
119 client_requested_size, actual_size);
120 if (memory)
121 return memory.Pass();
122 if (size_ - offset_ < actual_size) {
123 // This region does not have enough space left to hold the requested size.
124 return scoped_ptr<DiscardableMemory>();
125 }
126 void* const address = static_cast<char*>(base_) + offset_;
127 memory.reset(
128 new DiscardableMemoryChunk(this, fd_, last_allocated_chunk_, address,
129 offset_, actual_size));
130 last_allocated_chunk_ = address;
131 offset_ += actual_size;
132 return memory.Pass();
133 }
134
135 private:
136 struct FreeChunk {
137 FreeChunk(void* previous_chunk, void* start, size_t size)
138 : previous_chunk(previous_chunk),
139 start(start),
140 size(size) {
141 }
142
143 void* const previous_chunk;
144 void* const start;
145 const size_t size;
146
147 bool is_null() const { return !start; }
148
149 bool operator<(const FreeChunk& other) const {
150 return size < other.size;
151 }
152 };
153
154 // DiscardableMemoryChunk::DeletionObserver:
155 virtual void OnChunkDeletion(int fd,
156 void* previous_chunk,
157 void* chunk,
158 size_t size) OVERRIDE {
159 base::AutoLock auto_lock(lock_);
160 AddFreeChunk(fd, previous_chunk, chunk, size,
161 MERGE_PREVIOUS_CHUNKS | MERGE_NEXT_CHUNKS);
162 }
163
164 // Tries to recycle a previously freed chunk by doing a closest size match.
pasko 2013/10/22 20:14:29 nit: A synonym to "recycle" would help in the comm
Philippe 2013/10/23 11:46:56 Done.
165 scoped_ptr<DiscardableMemory> RecycleFreeChunk(size_t client_requested_size,
166 size_t actual_size) {
167 lock_.AssertAcquired();
168 const std::multiset<FreeChunk>::iterator chunk_it =
169 free_chunks_.lower_bound(FreeChunk(NULL, NULL, actual_size));
170 if (chunk_it == free_chunks_.end())
171 return scoped_ptr<DiscardableMemory>();
172 size_t recycled_chunk_size = chunk_it->size;
173 const size_t fragmentation_kbytes =
174 (chunk_it->size - client_requested_size) / 1024;
175 if (fragmentation_kbytes >= 16) {
pasko 2013/10/22 20:14:29 the code has quite a few heuristics, how about mak
Philippe 2013/10/23 11:46:56 I'm not a huge fan generally of externalizing cons
176 // Split the free chunk being recycled if it's too large so that its
177 // unused tail doesn't get recycled (i.e. locked) which would prevent it
178 // from being evicted under memory pressure.
179 const int fd = -1;
180 void* const previous_chunk = chunk_it->start;
181 AddFreeChunk(
182 fd, previous_chunk, static_cast<char*>(chunk_it->start) + actual_size,
183 chunk_it->size - actual_size, MERGE_NEXT_CHUNKS);
184 recycled_chunk_size = actual_size;
185 }
186 const size_t offset =
187 static_cast<char*>(chunk_it->start) - static_cast<char*>(base_);
188 internal::LockAshmemRegion(
189 fd_, offset, recycled_chunk_size, chunk_it->start);
190 scoped_ptr<DiscardableMemory> memory(
191 new DiscardableMemoryChunk(
192 this, fd_, chunk_it->previous_chunk, chunk_it->start, offset,
193 actual_size));
194 free_chunk_for_address_.erase(reinterpret_cast<uintptr_t>(chunk_it->start));
195 free_chunks_.erase(chunk_it);
196 return memory.Pass();
197 }
198
199 enum ContiguousChunksMergingFlags {
pasko 2013/10/22 20:14:29 OK, splitting chunks is probably a good idea, but
Philippe 2013/10/23 11:46:56 I did see that merging was happening in production
Philippe 2013/10/24 08:35:53 For the record Egor and I observed offline that we
200 MERGE_PREVIOUS_CHUNKS = 1,
201 MERGE_NEXT_CHUNKS = 2,
202 };
203
204 // Makes the chunk identified with the provided arguments free and possibly
205 // merges this chunk with the previous and next contiguous ones according to
206 // the value of |chunk_merging_flags|.
207 // If the provided chunk is the only one used (and going to be freed) in the
208 // region then the internal ashmem region is closed so that the underlying
209 // physical pages are immediately released.
210 // Note that free chunks are unlocked therefore they can be reclaimed by the
211 // kernel if needed (under memory pressure) but they are not immediately
212 // released unfortunately since madvise(MADV_REMOVE) and
213 // fallocate(FALLOC_FL_PUNCH_HOLE) don't seem to work on ashmem. This might
214 // change in versions of kernel >=3.5 though.
215 void AddFreeChunk(int fd,
216 void* previous_chunk,
217 void* chunk,
218 size_t size,
219 int chunk_merging_flags) {
220 lock_.AssertAcquired();
221 size_t new_free_chunk_size = size;
222 void* first_free_chunk = chunk;
223 if (chunk_merging_flags & MERGE_PREVIOUS_CHUNKS) {
224 while (previous_chunk) {
225 const FreeChunk free_chunk = UnlinkFreeChunk(previous_chunk);
226 if (free_chunk.is_null())
227 break;
228 new_free_chunk_size += free_chunk.size;
229 first_free_chunk = previous_chunk;
230 previous_chunk = free_chunk.previous_chunk;
231 }
232 }
233 const void* next_chunk = static_cast<const char*>(chunk) + size;
234 if (chunk_merging_flags & MERGE_NEXT_CHUNKS) {
235 while (true) {
236 const FreeChunk free_chunk = UnlinkFreeChunk(next_chunk);
237 if (free_chunk.is_null())
238 break;
239 new_free_chunk_size += free_chunk.size;
240 next_chunk = static_cast<const char*>(next_chunk) + free_chunk.size;
241 }
242 }
243 const bool whole_ashmem_region_is_free = new_free_chunk_size == size_;
244 if (!whole_ashmem_region_is_free) {
245 const std::multiset<FreeChunk>::iterator it = free_chunks_.insert(
246 FreeChunk(previous_chunk, first_free_chunk, new_free_chunk_size));
247 free_chunk_for_address_.insert(
248 std::make_pair(reinterpret_cast<uintptr_t>(first_free_chunk), it));
249 return;
250 }
251 // The whole ashmem region is free thus it can be closed. Note that deleting
252 // the instance and notifying the allocator would be cleaner (it would allow
253 // |fd_| and |base_| to be immutable in particular) but this would imply
254 // some non-trivial threading interactions since this method can be called
255 // on any thread and the allocator has its own lock.
256 DCHECK_EQ(size_, new_free_chunk_size);
257 DCHECK(free_chunks_.empty() && free_chunk_for_address_.empty());
258 DCHECK(!AshmemRegionClosed());
259 CloseAshmemRegion();
260 }
261
262 // Finds and unlinks the free chunk, if any, whose start address is
263 // |chunk_start|. Returns a copy of the unlinked free chunk or a free chunk
264 // whose content is null if it was not found.
265 FreeChunk UnlinkFreeChunk(const void* chunk_start) {
266 lock_.AssertAcquired();
267 const base::hash_map<
268 uintptr_t, std::multiset<FreeChunk>::iterator>::iterator it =
269 free_chunk_for_address_.find(
270 reinterpret_cast<uintptr_t>(chunk_start));
271 if (it == free_chunk_for_address_.end())
272 return FreeChunk(NULL, NULL, 0U);
273 const std::multiset<FreeChunk>::iterator free_chunk_it = it->second;
274 const FreeChunk free_chunk(*free_chunk_it);
275 DCHECK_EQ(chunk_start, free_chunk.start);
276 free_chunk_for_address_.erase(it);
277 free_chunks_.erase(free_chunk_it);
278 return free_chunk;
279 }
280
281 bool AshmemRegionClosed() const {
282 lock_.AssertAcquired();
283 DCHECK((fd_ == kInvalidFD && !base_) || (fd_ != kInvalidFD && base_));
284 return fd_ == kInvalidFD;
285 }
286
287 void CloseAshmemRegion() {
288 lock_.AssertAcquired();
289 DCHECK(fd_ != kInvalidFD && base_);
290 const bool result = internal::DeleteAshmemRegion(fd_, size_, base_);
291 DCHECK(result);
292 fd_ = kInvalidFD;
293 base_ = NULL;
294 offset_ = 0U;
295 last_allocated_chunk_ = NULL;
296 }
297
298 bool OpenAshmemRegion() {
299 lock_.AssertAcquired();
300 DCHECK(fd_ == kInvalidFD && !base_ && !last_allocated_chunk_);
301 int fd;
302 void* address;
303 if (!internal::CreateAshmemRegion(name_.c_str(), size_, &fd, &address))
304 return false;
305 fd_ = fd;
306 base_ = address;
307 return true;
308 }
309
310 base::ThreadChecker thread_checker_;
311 base::Lock lock_; // Protects the state below.
312 int fd_;
313 void* base_;
314 const size_t size_;
315 size_t offset_;
316 const std::string name_;
317 void* last_allocated_chunk_;
318 std::multiset<FreeChunk> free_chunks_;
319 base::hash_map<
320 uintptr_t, std::multiset<FreeChunk>::iterator> free_chunk_for_address_;
321
322 DISALLOW_COPY_AND_ASSIGN(AshmemRegion);
323 };
324
325 class DiscardableMemoryAllocatorAndroid : public DiscardableMemoryAllocator {
326 public:
327 DiscardableMemoryAllocatorAndroid(const std::string& name)
328 : name_(name),
329 force_use_allocator_(false) {
330 }
331
332 // Constructor used for testing. |force_use_allocator_| specifies whether the
333 // allocator should unconditionally be used (i.e. not only when a certain
334 // amount of open ashmem file descriptors was reached). This is used for
335 // testing to make sure that the unit tests don't only exerce the fast path
336 // that doesn't use the allocator.
337 DiscardableMemoryAllocatorAndroid(const std::string& name,
338 bool force_use_allocator)
339 : name_(name),
340 force_use_allocator_(force_use_allocator) {
341 }
342
343 virtual ~DiscardableMemoryAllocatorAndroid() {
344 DCHECK(thread_checker_.CalledOnValidThread());
345 }
346
347 // DiscardableMemoryAllocator:
348 virtual scoped_ptr<DiscardableMemory> Allocate(size_t size) OVERRIDE {
349 // Use the actual allocator only once we past a certain number of open
pasko 2013/10/22 20:14:29 I find this comment a bit confusing. It requires s
Philippe 2013/10/23 11:46:56 Thanks. This part is now in discardable_memory_and
350 // ashmem file descriptors (=90% of the ashmem fd limit). Not using the
351 // allocator allows us to immediately release the pages backing allocated
352 // ashmem regions (by closing the ashmem fd) when the client requests to
353 // delete them as opposed to only unlocking (=unpinning) them as the
354 // allocator does. Only unlocking them means that they will only be released
355 // under memory pressure which could by itself cause memory pressure.
356 const bool use_allocator = force_use_allocator_ ||
357 internal::GetCurrentNumberOfAshmemFDs() >
358 (0.9 * internal::GetAshmemFDLimit());
359 if (!use_allocator)
360 return DiscardableMemory::CreateLockedMemory(size);
361 const size_t aligned_size = internal::PageAlign(size);
362 base::AutoLock auto_lock(lock_);
363 for (ScopedVector<AshmemRegion>::iterator it = ashmem_regions_.begin();
364 it != ashmem_regions_.end(); ++it) {
365 scoped_ptr<DiscardableMemory> memory((*it)->Allocate(size, aligned_size));
366 if (memory)
367 return memory.Pass();
368 }
369 ashmem_regions_.push_back(
370 new AshmemRegion(
371 std::max(kDefaultAshmemRegionSize, aligned_size), name_.c_str()));
pasko 2013/10/22 20:14:29 The "Default" part in the name is confusing, shoul
Philippe 2013/10/23 11:46:56 Done.
372 return ashmem_regions_.back()->Allocate(size, aligned_size);
373 }
374
375 private:
376 base::ThreadChecker thread_checker_;
377 const std::string name_;
378 const bool force_use_allocator_; // Used for testing.
379 base::Lock lock_; // Protects the state below.
380 ScopedVector<AshmemRegion> ashmem_regions_;
381 };
382
383 } // namespace
384
385 scoped_ptr<DiscardableMemoryAllocator> DiscardableMemoryAllocator::Create(
386 const std::string& name) {
387 return scoped_ptr<DiscardableMemoryAllocator>(
388 new DiscardableMemoryAllocatorAndroid(name));
389 }
390
391 scoped_ptr<DiscardableMemoryAllocator>
392 CreateDiscardableMemoryAllocatorForTesting(const std::string& name) {
393 const bool force_use_allocator = true;
394 return scoped_ptr<DiscardableMemoryAllocator>(
395 new DiscardableMemoryAllocatorAndroid(name, force_use_allocator));
396 }
397
398 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698