Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(111)

Side by Side Diff: base/memory/discardable_memory_allocation_ashmem_factory.cc

Issue 195863005: Use DiscardableMemoryManager on Android. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Use DiscardableMemoryController on Android Created 6 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2013 The Chromium Authors. All rights reserved. 1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/memory/discardable_memory_allocator_android.h" 5 #include "base/memory/discardable_memory_allocation_ashmem_factory.h"
6 6
7 #include <sys/mman.h> 7 #include <sys/mman.h>
8 #include <unistd.h> 8 #include <unistd.h>
9 9
10 #include <algorithm> 10 #include <algorithm>
11 #include <cmath> 11 #include <cmath>
12 #include <limits> 12 #include <limits>
13 #include <set> 13 #include <set>
14 #include <utility> 14 #include <utility>
15 15
16 #include "base/basictypes.h" 16 #include "base/basictypes.h"
17 #include "base/containers/hash_tables.h" 17 #include "base/containers/hash_tables.h"
18 #include "base/file_util.h" 18 #include "base/file_util.h"
19 #include "base/files/scoped_file.h" 19 #include "base/files/scoped_file.h"
20 #include "base/logging.h" 20 #include "base/logging.h"
21 #include "base/memory/discardable_memory.h" 21 #include "base/memory/discardable_memory.h"
22 #include "base/memory/scoped_vector.h" 22 #include "base/memory/scoped_vector.h"
23 #include "base/synchronization/lock.h"
24 #include "base/threading/thread_checker.h" 23 #include "base/threading/thread_checker.h"
25 #include "third_party/ashmem/ashmem.h" 24 #include "third_party/ashmem/ashmem.h"
26 25
27 // The allocator consists of three parts (classes): 26 // This file consists of the following parts:
28 // - DiscardableMemoryAllocator: entry point of all allocations (through its 27 // - DiscardableMemoryAllocationAshmemFactory: entry point of all allocations
29 // Allocate() method) that are dispatched to the AshmemRegion instances (which 28 // (through its Create() method) that are dispatched to the AshmemRegion
30 // it owns). 29 // instances (which it owns).
31 // - AshmemRegion: manages allocations and destructions inside a single large 30 // - AshmemRegion: manages allocations and destructions inside a single large
32 // (e.g. 32 MBytes) ashmem region. 31 // (e.g. 32 MBytes) ashmem region.
33 // - DiscardableAshmemChunk: class implementing the DiscardableMemory interface 32 // - DiscardableAshmemChunk: class implementing the DiscardableMemoryAllocation
34 // whose instances are returned to the client. DiscardableAshmemChunk lets the 33 // interface whose instances are returned to the client. DiscardableAshmemChunk
35 // client seamlessly operate on a subrange of the ashmem region managed by 34 // lets the client seamlessly operate on a subrange of the ashmem region managed
36 // AshmemRegion. 35 // by AshmemRegion.
37 36
38 namespace base { 37 namespace base {
39 namespace { 38 namespace {
40 39
41 // Only tolerate fragmentation in used chunks *caused by the client* (as opposed 40 // Only tolerate fragmentation in used chunks *caused by the client* (as opposed
42 // to the allocator when a free chunk is reused). The client can cause such 41 // to the allocator when a free chunk is reused). The client can cause such
43 // fragmentation by e.g. requesting 4097 bytes. This size would be rounded up to 42 // fragmentation by e.g. requesting 4097 bytes. This size would be rounded up to
44 // 8192 by the allocator which would cause 4095 bytes of fragmentation (which is 43 // 8192 by the allocator which would cause 4095 bytes of fragmentation (which is
45 // currently the maximum allowed). If the client requests 4096 bytes and a free 44 // currently the maximum allowed). If the client requests 4096 bytes and a free
46 // chunk of 8192 bytes is available then the free chunk gets splitted into two 45 // chunk of 8192 bytes is available then the free chunk gets splitted into two
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
95 94
96 bool CloseAshmemRegion(int fd, size_t size, void* address) { 95 bool CloseAshmemRegion(int fd, size_t size, void* address) {
97 if (munmap(address, size) == -1) { 96 if (munmap(address, size) == -1) {
98 DPLOG(ERROR) << "Failed to unmap memory."; 97 DPLOG(ERROR) << "Failed to unmap memory.";
99 close(fd); 98 close(fd);
100 return false; 99 return false;
101 } 100 }
102 return close(fd) == 0; 101 return close(fd) == 0;
103 } 102 }
104 103
105 DiscardableMemoryLockStatus LockAshmemRegion(int fd, 104 bool LockAshmemRegion(int fd, size_t off, size_t size, const void* address) {
106 size_t off,
107 size_t size,
108 const void* address) {
109 const int result = ashmem_pin_region(fd, off, size); 105 const int result = ashmem_pin_region(fd, off, size);
110 DCHECK_EQ(0, mprotect(address, size, PROT_READ | PROT_WRITE)); 106 DCHECK_EQ(0, mprotect(address, size, PROT_READ | PROT_WRITE));
111 return result == ASHMEM_WAS_PURGED ? DISCARDABLE_MEMORY_LOCK_STATUS_PURGED 107 return result != ASHMEM_WAS_PURGED;
112 : DISCARDABLE_MEMORY_LOCK_STATUS_SUCCESS;
113 } 108 }
114 109
115 bool UnlockAshmemRegion(int fd, size_t off, size_t size, const void* address) { 110 bool UnlockAshmemRegion(int fd, size_t off, size_t size, const void* address) {
116 const int failed = ashmem_unpin_region(fd, off, size); 111 const int failed = ashmem_unpin_region(fd, off, size);
117 if (failed) 112 if (failed)
118 DLOG(ERROR) << "Failed to unpin memory."; 113 DLOG(ERROR) << "Failed to unpin memory.";
119 // This allows us to catch accesses to unlocked memory. 114 // This allows us to catch accesses to unlocked memory.
120 DCHECK_EQ(0, mprotect(address, size, PROT_NONE)); 115 DCHECK_EQ(0, mprotect(address, size, PROT_NONE));
121 return !failed; 116 return !failed;
122 } 117 }
123 118
124 } // namespace 119 } // namespace
125 120
126 namespace internal { 121 namespace internal {
127 122
128 class DiscardableMemoryAllocator::DiscardableAshmemChunk 123 class DiscardableMemoryAllocationAshmemFactory::DiscardableAshmemChunk
129 : public DiscardableMemory { 124 : public DiscardableMemoryAllocation {
130 public: 125 public:
131 // Note that |ashmem_region| must outlive |this|. 126 // Note that |ashmem_region| must outlive |this|.
132 DiscardableAshmemChunk(AshmemRegion* ashmem_region, 127 DiscardableAshmemChunk(AshmemRegion* ashmem_region,
133 int fd, 128 int fd,
134 void* address, 129 void* address,
135 size_t offset, 130 size_t offset,
136 size_t size) 131 size_t size)
137 : ashmem_region_(ashmem_region), 132 : ashmem_region_(ashmem_region),
138 fd_(fd), 133 fd_(fd),
139 address_(address), 134 address_(address),
140 offset_(offset), 135 offset_(offset),
141 size_(size), 136 size_(size),
142 locked_(true) { 137 locked_(true) {
143 } 138 }
144 139
145 // Implemented below AshmemRegion since this requires the full definition of 140 // Implemented below AshmemRegion since this requires the full definition of
146 // AshmemRegion. 141 // AshmemRegion.
147 virtual ~DiscardableAshmemChunk(); 142 virtual ~DiscardableAshmemChunk();
148 143
149 // DiscardableMemory: 144 // DiscardableMemoryAllocation:
150 virtual DiscardableMemoryLockStatus Lock() OVERRIDE { 145 virtual bool Lock() OVERRIDE {
151 DCHECK(!locked_); 146 DCHECK(!locked_);
152 locked_ = true; 147 locked_ = true;
153 return LockAshmemRegion(fd_, offset_, size_, address_); 148 return LockAshmemRegion(fd_, offset_, size_, address_);
154 } 149 }
155 150
156 virtual void Unlock() OVERRIDE { 151 virtual void Unlock() OVERRIDE {
157 DCHECK(locked_); 152 DCHECK(locked_);
158 locked_ = false; 153 locked_ = false;
159 UnlockAshmemRegion(fd_, offset_, size_, address_); 154 UnlockAshmemRegion(fd_, offset_, size_, address_);
160 } 155 }
161 156
162 virtual void* Memory() const OVERRIDE { 157 virtual void* Memory() OVERRIDE {
163 return address_; 158 return address_;
164 } 159 }
165 160
166 private: 161 private:
167 AshmemRegion* const ashmem_region_; 162 AshmemRegion* const ashmem_region_;
168 const int fd_; 163 const int fd_;
169 void* const address_; 164 void* const address_;
170 const size_t offset_; 165 const size_t offset_;
171 const size_t size_; 166 const size_t size_;
172 bool locked_; 167 bool locked_;
173 168
174 DISALLOW_COPY_AND_ASSIGN(DiscardableAshmemChunk); 169 DISALLOW_COPY_AND_ASSIGN(DiscardableAshmemChunk);
175 }; 170 };
176 171
177 class DiscardableMemoryAllocator::AshmemRegion { 172 class DiscardableMemoryAllocationAshmemFactory::AshmemRegion {
178 public: 173 public:
179 // Note that |allocator| must outlive |this|. 174 // Note that |allocator| must outlive |this|.
180 static scoped_ptr<AshmemRegion> Create( 175 static scoped_ptr<AshmemRegion> Create(
181 size_t size, 176 size_t size,
182 const std::string& name, 177 const std::string& name,
183 DiscardableMemoryAllocator* allocator) { 178 DiscardableMemoryAllocationAshmemFactory* allocator) {
184 DCHECK_EQ(size, AlignToNextPage(size)); 179 DCHECK_EQ(size, AlignToNextPage(size));
185 int fd; 180 int fd;
186 void* base; 181 void* base;
187 if (!CreateAshmemRegion(name.c_str(), size, &fd, &base)) 182 if (!CreateAshmemRegion(name.c_str(), size, &fd, &base))
188 return scoped_ptr<AshmemRegion>(); 183 return scoped_ptr<AshmemRegion>();
189 return make_scoped_ptr(new AshmemRegion(fd, size, base, allocator)); 184 return make_scoped_ptr(new AshmemRegion(fd, size, base, allocator));
190 } 185 }
191 186
192 ~AshmemRegion() { 187 ~AshmemRegion() {
193 const bool result = CloseAshmemRegion(fd_, size_, base_); 188 const bool result = CloseAshmemRegion(fd_, size_, base_);
194 DCHECK(result); 189 DCHECK(result);
195 DCHECK(!highest_allocated_chunk_); 190 DCHECK(!highest_allocated_chunk_);
196 } 191 }
197 192
198 // Returns a new instance of DiscardableMemory whose size is greater or equal 193 // Returns a new instance of DiscardableMemoryAllocation whose size is greater
199 // than |actual_size| (which is expected to be greater or equal than 194 // or equal than |actual_size| (which is expected to be greater or equal than
200 // |client_requested_size|). 195 // |client_requested_size|).
201 // Allocation works as follows: 196 // Allocation works as follows:
202 // 1) Reuse a previously freed chunk and return it if it succeeded. See 197 // 1) Reuse a previously freed chunk and return it if it succeeded. See
203 // ReuseFreeChunk_Locked() below for more information. 198 // ReuseFreeChunk() below for more information.
204 // 2) If no free chunk could be reused and the region is not big enough for 199 // 2) If no free chunk could be reused and the region is not big enough for
205 // the requested size then NULL is returned. 200 // the requested size then NULL is returned.
206 // 3) If there is enough room in the ashmem region then a new chunk is 201 // 3) If there is enough room in the ashmem region then a new chunk is
207 // returned. This new chunk starts at |offset_| which is the end of the 202 // returned. This new chunk starts at |offset_| which is the end of the
208 // previously highest chunk in the region. 203 // previously highest chunk in the region.
209 scoped_ptr<DiscardableMemory> Allocate_Locked(size_t client_requested_size, 204 scoped_ptr<DiscardableMemoryAllocation> Allocate(size_t client_requested_size,
210 size_t actual_size) { 205 size_t actual_size) {
211 DCHECK_LE(client_requested_size, actual_size); 206 DCHECK_LE(client_requested_size, actual_size);
212 allocator_->lock_.AssertAcquired();
213 207
214 // Check that the |highest_allocated_chunk_| field doesn't contain a stale 208 // Check that the |highest_allocated_chunk_| field doesn't contain a stale
215 // pointer. It should point to either a free chunk or a used chunk. 209 // pointer. It should point to either a free chunk or a used chunk.
216 DCHECK(!highest_allocated_chunk_ || 210 DCHECK(!highest_allocated_chunk_ ||
217 address_to_free_chunk_map_.find(highest_allocated_chunk_) != 211 address_to_free_chunk_map_.find(highest_allocated_chunk_) !=
218 address_to_free_chunk_map_.end() || 212 address_to_free_chunk_map_.end() ||
219 used_to_previous_chunk_map_.find(highest_allocated_chunk_) != 213 used_to_previous_chunk_map_.find(highest_allocated_chunk_) !=
220 used_to_previous_chunk_map_.end()); 214 used_to_previous_chunk_map_.end());
221 215
222 scoped_ptr<DiscardableMemory> memory = ReuseFreeChunk_Locked( 216 scoped_ptr<DiscardableMemoryAllocation> memory = ReuseFreeChunk(
223 client_requested_size, actual_size); 217 client_requested_size, actual_size);
224 if (memory) 218 if (memory)
225 return memory.Pass(); 219 return memory.Pass();
226 220
227 if (size_ - offset_ < actual_size) { 221 if (size_ - offset_ < actual_size) {
228 // This region does not have enough space left to hold the requested size. 222 // This region does not have enough space left to hold the requested size.
229 return scoped_ptr<DiscardableMemory>(); 223 return scoped_ptr<DiscardableMemoryAllocation>();
230 } 224 }
231 225
232 void* const address = static_cast<char*>(base_) + offset_; 226 void* const address = static_cast<char*>(base_) + offset_;
233 memory.reset( 227 memory.reset(
234 new DiscardableAshmemChunk(this, fd_, address, offset_, actual_size)); 228 new DiscardableAshmemChunk(this, fd_, address, offset_, actual_size));
235 229
236 used_to_previous_chunk_map_.insert( 230 used_to_previous_chunk_map_.insert(
237 std::make_pair(address, highest_allocated_chunk_)); 231 std::make_pair(address, highest_allocated_chunk_));
238 highest_allocated_chunk_ = address; 232 highest_allocated_chunk_ = address;
239 offset_ += actual_size; 233 offset_ += actual_size;
240 DCHECK_LE(offset_, size_); 234 DCHECK_LE(offset_, size_);
241 return memory.Pass(); 235 return memory.Pass();
242 } 236 }
243 237
244 void OnChunkDeletion(void* chunk, size_t size) { 238 void OnChunkDeletion(void* chunk, size_t size) {
245 AutoLock auto_lock(allocator_->lock_); 239 MergeAndAddFreeChunk(chunk, size);
246 MergeAndAddFreeChunk_Locked(chunk, size);
247 // Note that |this| might be deleted beyond this point. 240 // Note that |this| might be deleted beyond this point.
248 } 241 }
249 242
250 private: 243 private:
251 struct FreeChunk { 244 struct FreeChunk {
252 FreeChunk() : previous_chunk(NULL), start(NULL), size(0) {} 245 FreeChunk() : previous_chunk(NULL), start(NULL), size(0) {}
253 246
254 explicit FreeChunk(size_t size) 247 explicit FreeChunk(size_t size)
255 : previous_chunk(NULL), 248 : previous_chunk(NULL),
256 start(NULL), 249 start(NULL),
(...skipping 15 matching lines...) Expand all
272 265
273 bool operator<(const FreeChunk& other) const { 266 bool operator<(const FreeChunk& other) const {
274 return size < other.size; 267 return size < other.size;
275 } 268 }
276 }; 269 };
277 270
278 // Note that |allocator| must outlive |this|. 271 // Note that |allocator| must outlive |this|.
279 AshmemRegion(int fd, 272 AshmemRegion(int fd,
280 size_t size, 273 size_t size,
281 void* base, 274 void* base,
282 DiscardableMemoryAllocator* allocator) 275 DiscardableMemoryAllocationAshmemFactory* allocator)
283 : fd_(fd), 276 : fd_(fd),
284 size_(size), 277 size_(size),
285 base_(base), 278 base_(base),
286 allocator_(allocator), 279 allocator_(allocator),
287 highest_allocated_chunk_(NULL), 280 highest_allocated_chunk_(NULL),
288 offset_(0) { 281 offset_(0) {
289 DCHECK_GE(fd_, 0); 282 DCHECK_GE(fd_, 0);
290 DCHECK_GE(size, kMinAshmemRegionSize); 283 DCHECK_GE(size, kMinAshmemRegionSize);
291 DCHECK(base); 284 DCHECK(base);
292 DCHECK(allocator); 285 DCHECK(allocator);
293 } 286 }
294 287
295 // Tries to reuse a previously freed chunk by doing a closest size match. 288 // Tries to reuse a previously freed chunk by doing a closest size match.
296 scoped_ptr<DiscardableMemory> ReuseFreeChunk_Locked( 289 scoped_ptr<DiscardableMemoryAllocation> ReuseFreeChunk(
297 size_t client_requested_size, 290 size_t client_requested_size,
298 size_t actual_size) { 291 size_t actual_size) {
299 allocator_->lock_.AssertAcquired(); 292 const FreeChunk reused_chunk = RemoveFreeChunkFromIterator(
300 const FreeChunk reused_chunk = RemoveFreeChunkFromIterator_Locked(
301 free_chunks_.lower_bound(FreeChunk(actual_size))); 293 free_chunks_.lower_bound(FreeChunk(actual_size)));
302 if (reused_chunk.is_null()) 294 if (reused_chunk.is_null())
303 return scoped_ptr<DiscardableMemory>(); 295 return scoped_ptr<DiscardableMemoryAllocation>();
304 296
305 used_to_previous_chunk_map_.insert( 297 used_to_previous_chunk_map_.insert(
306 std::make_pair(reused_chunk.start, reused_chunk.previous_chunk)); 298 std::make_pair(reused_chunk.start, reused_chunk.previous_chunk));
307 size_t reused_chunk_size = reused_chunk.size; 299 size_t reused_chunk_size = reused_chunk.size;
308 // |client_requested_size| is used below rather than |actual_size| to 300 // |client_requested_size| is used below rather than |actual_size| to
309 // reflect the amount of bytes that would not be usable by the client (i.e. 301 // reflect the amount of bytes that would not be usable by the client (i.e.
310 // wasted). Using |actual_size| instead would not allow us to detect 302 // wasted). Using |actual_size| instead would not allow us to detect
311 // fragmentation caused by the client if he did misaligned allocations. 303 // fragmentation caused by the client if he did misaligned allocations.
312 DCHECK_GE(reused_chunk.size, client_requested_size); 304 DCHECK_GE(reused_chunk.size, client_requested_size);
313 const size_t fragmentation_bytes = 305 const size_t fragmentation_bytes =
314 reused_chunk.size - client_requested_size; 306 reused_chunk.size - client_requested_size;
315 307
316 if (fragmentation_bytes > kMaxChunkFragmentationBytes) { 308 if (fragmentation_bytes > kMaxChunkFragmentationBytes) {
317 // Split the free chunk being recycled so that its unused tail doesn't get 309 // Split the free chunk being recycled so that its unused tail doesn't get
318 // reused (i.e. locked) which would prevent it from being evicted under 310 // reused (i.e. locked) which would prevent it from being evicted under
319 // memory pressure. 311 // memory pressure.
320 reused_chunk_size = actual_size; 312 reused_chunk_size = actual_size;
321 void* const new_chunk_start = 313 void* const new_chunk_start =
322 static_cast<char*>(reused_chunk.start) + actual_size; 314 static_cast<char*>(reused_chunk.start) + actual_size;
323 if (reused_chunk.start == highest_allocated_chunk_) { 315 if (reused_chunk.start == highest_allocated_chunk_) {
324 // We also need to update the pointer to the highest allocated chunk in 316 // We also need to update the pointer to the highest allocated chunk in
325 // case we are splitting the highest chunk. 317 // case we are splitting the highest chunk.
326 highest_allocated_chunk_ = new_chunk_start; 318 highest_allocated_chunk_ = new_chunk_start;
327 } 319 }
328 DCHECK_GT(reused_chunk.size, actual_size); 320 DCHECK_GT(reused_chunk.size, actual_size);
329 const size_t new_chunk_size = reused_chunk.size - actual_size; 321 const size_t new_chunk_size = reused_chunk.size - actual_size;
330 // Note that merging is not needed here since there can't be contiguous 322 // Note that merging is not needed here since there can't be contiguous
331 // free chunks at this point. 323 // free chunks at this point.
332 AddFreeChunk_Locked( 324 AddFreeChunk(
333 FreeChunk(reused_chunk.start, new_chunk_start, new_chunk_size)); 325 FreeChunk(reused_chunk.start, new_chunk_start,new_chunk_size));
334 } 326 }
335 327
336 const size_t offset = 328 const size_t offset =
337 static_cast<char*>(reused_chunk.start) - static_cast<char*>(base_); 329 static_cast<char*>(reused_chunk.start) - static_cast<char*>(base_);
338 LockAshmemRegion(fd_, offset, reused_chunk_size, reused_chunk.start); 330 LockAshmemRegion(fd_, offset, reused_chunk_size, reused_chunk.start);
339 scoped_ptr<DiscardableMemory> memory( 331 scoped_ptr<DiscardableMemoryAllocation> memory(
340 new DiscardableAshmemChunk(this, fd_, reused_chunk.start, offset, 332 new DiscardableAshmemChunk(this, fd_, reused_chunk.start, offset,
341 reused_chunk_size)); 333 reused_chunk_size));
342 return memory.Pass(); 334 return memory.Pass();
343 } 335 }
344 336
345 // Makes the chunk identified with the provided arguments free and possibly 337 // Makes the chunk identified with the provided arguments free and possibly
346 // merges this chunk with the previous and next contiguous ones. 338 // merges this chunk with the previous and next contiguous ones.
347 // If the provided chunk is the only one used (and going to be freed) in the 339 // If the provided chunk is the only one used (and going to be freed) in the
348 // region then the internal ashmem region is closed so that the underlying 340 // region then the internal ashmem region is closed so that the underlying
349 // physical pages are immediately released. 341 // physical pages are immediately released.
350 // Note that free chunks are unlocked therefore they can be reclaimed by the 342 // Note that free chunks are unlocked therefore they can be reclaimed by the
351 // kernel if needed (under memory pressure) but they are not immediately 343 // kernel if needed (under memory pressure) but they are not immediately
352 // released unfortunately since madvise(MADV_REMOVE) and 344 // released unfortunately since madvise(MADV_REMOVE) and
353 // fallocate(FALLOC_FL_PUNCH_HOLE) don't seem to work on ashmem. This might 345 // fallocate(FALLOC_FL_PUNCH_HOLE) don't seem to work on ashmem. This might
354 // change in versions of kernel >=3.5 though. The fact that free chunks are 346 // change in versions of kernel >=3.5 though. The fact that free chunks are
355 // not immediately released is the reason why we are trying to minimize 347 // not immediately released is the reason why we are trying to minimize
356 // fragmentation in order not to cause "artificial" memory pressure. 348 // fragmentation in order not to cause "artificial" memory pressure.
357 void MergeAndAddFreeChunk_Locked(void* chunk, size_t size) { 349 void MergeAndAddFreeChunk(void* chunk, size_t size) {
358 allocator_->lock_.AssertAcquired();
359 size_t new_free_chunk_size = size; 350 size_t new_free_chunk_size = size;
360 // Merge with the previous chunk. 351 // Merge with the previous chunk.
361 void* first_free_chunk = chunk; 352 void* first_free_chunk = chunk;
362 DCHECK(!used_to_previous_chunk_map_.empty()); 353 DCHECK(!used_to_previous_chunk_map_.empty());
363 const hash_map<void*, void*>::iterator previous_chunk_it = 354 const hash_map<void*, void*>::iterator previous_chunk_it =
364 used_to_previous_chunk_map_.find(chunk); 355 used_to_previous_chunk_map_.find(chunk);
365 DCHECK(previous_chunk_it != used_to_previous_chunk_map_.end()); 356 DCHECK(previous_chunk_it != used_to_previous_chunk_map_.end());
366 void* previous_chunk = previous_chunk_it->second; 357 void* previous_chunk = previous_chunk_it->second;
367 used_to_previous_chunk_map_.erase(previous_chunk_it); 358 used_to_previous_chunk_map_.erase(previous_chunk_it);
368 359
369 if (previous_chunk) { 360 if (previous_chunk) {
370 const FreeChunk free_chunk = RemoveFreeChunk_Locked(previous_chunk); 361 const FreeChunk free_chunk = RemoveFreeChunk(previous_chunk);
371 if (!free_chunk.is_null()) { 362 if (!free_chunk.is_null()) {
372 new_free_chunk_size += free_chunk.size; 363 new_free_chunk_size += free_chunk.size;
373 first_free_chunk = previous_chunk; 364 first_free_chunk = previous_chunk;
374 if (chunk == highest_allocated_chunk_) 365 if (chunk == highest_allocated_chunk_)
375 highest_allocated_chunk_ = previous_chunk; 366 highest_allocated_chunk_ = previous_chunk;
376 367
377 // There should not be more contiguous previous free chunks. 368 // There should not be more contiguous previous free chunks.
378 previous_chunk = free_chunk.previous_chunk; 369 previous_chunk = free_chunk.previous_chunk;
379 DCHECK(!address_to_free_chunk_map_.count(previous_chunk)); 370 DCHECK(!address_to_free_chunk_map_.count(previous_chunk));
380 } 371 }
381 } 372 }
382 373
383 // Merge with the next chunk if free and present. 374 // Merge with the next chunk if free and present.
384 void* next_chunk = static_cast<char*>(chunk) + size; 375 void* next_chunk = static_cast<char*>(chunk) + size;
385 const FreeChunk next_free_chunk = RemoveFreeChunk_Locked(next_chunk); 376 const FreeChunk next_free_chunk = RemoveFreeChunk(next_chunk);
386 if (!next_free_chunk.is_null()) { 377 if (!next_free_chunk.is_null()) {
387 new_free_chunk_size += next_free_chunk.size; 378 new_free_chunk_size += next_free_chunk.size;
388 if (next_free_chunk.start == highest_allocated_chunk_) 379 if (next_free_chunk.start == highest_allocated_chunk_)
389 highest_allocated_chunk_ = first_free_chunk; 380 highest_allocated_chunk_ = first_free_chunk;
390 381
391 // Same as above. 382 // Same as above.
392 DCHECK(!address_to_free_chunk_map_.count(static_cast<char*>(next_chunk) + 383 DCHECK(!address_to_free_chunk_map_.count(static_cast<char*>(next_chunk) +
393 next_free_chunk.size)); 384 next_free_chunk.size));
394 } 385 }
395 386
396 const bool whole_ashmem_region_is_free = 387 const bool whole_ashmem_region_is_free =
397 used_to_previous_chunk_map_.empty(); 388 used_to_previous_chunk_map_.empty();
398 if (!whole_ashmem_region_is_free) { 389 if (!whole_ashmem_region_is_free) {
399 AddFreeChunk_Locked( 390 AddFreeChunk(
400 FreeChunk(previous_chunk, first_free_chunk, new_free_chunk_size)); 391 FreeChunk(previous_chunk, first_free_chunk, new_free_chunk_size));
401 return; 392 return;
402 } 393 }
403 394
404 // The whole ashmem region is free thus it can be deleted. 395 // The whole ashmem region is free thus it can be deleted.
405 DCHECK_EQ(base_, first_free_chunk); 396 DCHECK_EQ(base_, first_free_chunk);
406 DCHECK_EQ(base_, highest_allocated_chunk_); 397 DCHECK_EQ(base_, highest_allocated_chunk_);
407 DCHECK(free_chunks_.empty()); 398 DCHECK(free_chunks_.empty());
408 DCHECK(address_to_free_chunk_map_.empty()); 399 DCHECK(address_to_free_chunk_map_.empty());
409 DCHECK(used_to_previous_chunk_map_.empty()); 400 DCHECK(used_to_previous_chunk_map_.empty());
410 highest_allocated_chunk_ = NULL; 401 highest_allocated_chunk_ = NULL;
411 allocator_->DeleteAshmemRegion_Locked(this); // Deletes |this|. 402 allocator_->DeleteAshmemRegion(this); // Deletes |this|.
412 } 403 }
413 404
414 void AddFreeChunk_Locked(const FreeChunk& free_chunk) { 405 void AddFreeChunk(const FreeChunk& free_chunk) {
415 allocator_->lock_.AssertAcquired();
416 const std::multiset<FreeChunk>::iterator it = free_chunks_.insert( 406 const std::multiset<FreeChunk>::iterator it = free_chunks_.insert(
417 free_chunk); 407 free_chunk);
418 address_to_free_chunk_map_.insert(std::make_pair(free_chunk.start, it)); 408 address_to_free_chunk_map_.insert(std::make_pair(free_chunk.start, it));
419 // Update the next used contiguous chunk, if any, since its previous chunk 409 // Update the next used contiguous chunk, if any, since its previous chunk
420 // may have changed due to free chunks merging/splitting. 410 // may have changed due to free chunks merging/splitting.
421 void* const next_used_contiguous_chunk = 411 void* const next_used_contiguous_chunk =
422 static_cast<char*>(free_chunk.start) + free_chunk.size; 412 static_cast<char*>(free_chunk.start) + free_chunk.size;
423 hash_map<void*, void*>::iterator previous_it = 413 hash_map<void*, void*>::iterator previous_it =
424 used_to_previous_chunk_map_.find(next_used_contiguous_chunk); 414 used_to_previous_chunk_map_.find(next_used_contiguous_chunk);
425 if (previous_it != used_to_previous_chunk_map_.end()) 415 if (previous_it != used_to_previous_chunk_map_.end())
426 previous_it->second = free_chunk.start; 416 previous_it->second = free_chunk.start;
427 } 417 }
428 418
429 // Finds and removes the free chunk, if any, whose start address is 419 // Finds and removes the free chunk, if any, whose start address is
430 // |chunk_start|. Returns a copy of the unlinked free chunk or a free chunk 420 // |chunk_start|. Returns a copy of the unlinked free chunk or a free chunk
431 // whose content is null if it was not found. 421 // whose content is null if it was not found.
432 FreeChunk RemoveFreeChunk_Locked(void* chunk_start) { 422 FreeChunk RemoveFreeChunk(void* chunk_start) {
433 allocator_->lock_.AssertAcquired();
434 const hash_map< 423 const hash_map<
435 void*, std::multiset<FreeChunk>::iterator>::iterator it = 424 void*, std::multiset<FreeChunk>::iterator>::iterator it =
436 address_to_free_chunk_map_.find(chunk_start); 425 address_to_free_chunk_map_.find(chunk_start);
437 if (it == address_to_free_chunk_map_.end()) 426 if (it == address_to_free_chunk_map_.end())
438 return FreeChunk(); 427 return FreeChunk();
439 return RemoveFreeChunkFromIterator_Locked(it->second); 428 return RemoveFreeChunkFromIterator(it->second);
440 } 429 }
441 430
442 // Same as above but takes an iterator in. 431 // Same as above but takes an iterator in.
443 FreeChunk RemoveFreeChunkFromIterator_Locked( 432 FreeChunk RemoveFreeChunkFromIterator(
444 std::multiset<FreeChunk>::iterator free_chunk_it) { 433 std::multiset<FreeChunk>::iterator free_chunk_it) {
445 allocator_->lock_.AssertAcquired();
446 if (free_chunk_it == free_chunks_.end()) 434 if (free_chunk_it == free_chunks_.end())
447 return FreeChunk(); 435 return FreeChunk();
448 DCHECK(free_chunk_it != free_chunks_.end()); 436 DCHECK(free_chunk_it != free_chunks_.end());
449 const FreeChunk free_chunk(*free_chunk_it); 437 const FreeChunk free_chunk(*free_chunk_it);
450 address_to_free_chunk_map_.erase(free_chunk_it->start); 438 address_to_free_chunk_map_.erase(free_chunk_it->start);
451 free_chunks_.erase(free_chunk_it); 439 free_chunks_.erase(free_chunk_it);
452 return free_chunk; 440 return free_chunk;
453 } 441 }
454 442
455 const int fd_; 443 const int fd_;
456 const size_t size_; 444 const size_t size_;
457 void* const base_; 445 void* const base_;
458 DiscardableMemoryAllocator* const allocator_; 446 DiscardableMemoryAllocationAshmemFactory* const allocator_;
459 // Points to the chunk with the highest address in the region. This pointer 447 // Points to the chunk with the highest address in the region. This pointer
460 // needs to be carefully updated when chunks are merged/split. 448 // needs to be carefully updated when chunks are merged/split.
461 void* highest_allocated_chunk_; 449 void* highest_allocated_chunk_;
462 // Points to the end of |highest_allocated_chunk_|. 450 // Points to the end of |highest_allocated_chunk_|.
463 size_t offset_; 451 size_t offset_;
464 // Allows free chunks recycling (lookup, insertion and removal) in O(log N). 452 // Allows free chunks recycling (lookup, insertion and removal) in O(log N).
465 // Note that FreeChunk values are indexed by their size and also note that 453 // Note that FreeChunk values are indexed by their size and also note that
466 // multiple free chunks can have the same size (which is why multiset<> is 454 // multiple free chunks can have the same size (which is why multiset<> is
467 // used instead of e.g. set<>). 455 // used instead of e.g. set<>).
468 std::multiset<FreeChunk> free_chunks_; 456 std::multiset<FreeChunk> free_chunks_;
469 // Used while merging free contiguous chunks to erase free chunks (from their 457 // Used while merging free contiguous chunks to erase free chunks (from their
470 // start address) in constant time. Note that multiset<>::{insert,erase}() 458 // start address) in constant time. Note that multiset<>::{insert,erase}()
471 // don't invalidate iterators (except the one for the element being removed 459 // don't invalidate iterators (except the one for the element being removed
472 // obviously). 460 // obviously).
473 hash_map< 461 hash_map<
474 void*, std::multiset<FreeChunk>::iterator> address_to_free_chunk_map_; 462 void*, std::multiset<FreeChunk>::iterator> address_to_free_chunk_map_;
475 // Maps the address of *used* chunks to the address of their previous 463 // Maps the address of *used* chunks to the address of their previous
476 // contiguous chunk. 464 // contiguous chunk.
477 hash_map<void*, void*> used_to_previous_chunk_map_; 465 hash_map<void*, void*> used_to_previous_chunk_map_;
478 466
479 DISALLOW_COPY_AND_ASSIGN(AshmemRegion); 467 DISALLOW_COPY_AND_ASSIGN(AshmemRegion);
480 }; 468 };
481 469
482 DiscardableMemoryAllocator::DiscardableAshmemChunk::~DiscardableAshmemChunk() { 470 DiscardableMemoryAllocationAshmemFactory::DiscardableAshmemChunk::
471 ~DiscardableAshmemChunk() {
483 if (locked_) 472 if (locked_)
484 UnlockAshmemRegion(fd_, offset_, size_, address_); 473 UnlockAshmemRegion(fd_, offset_, size_, address_);
485 ashmem_region_->OnChunkDeletion(address_, size_); 474 ashmem_region_->OnChunkDeletion(address_, size_);
486 } 475 }
487 476
488 DiscardableMemoryAllocator::DiscardableMemoryAllocator( 477 DiscardableMemoryAllocationAshmemFactory::
478 DiscardableMemoryAllocationAshmemFactory(
489 const std::string& name, 479 const std::string& name,
490 size_t ashmem_region_size) 480 size_t ashmem_region_size)
491 : name_(name), 481 : name_(name),
492 ashmem_region_size_( 482 ashmem_region_size_(
493 std::max(kMinAshmemRegionSize, AlignToNextPage(ashmem_region_size))), 483 std::max(kMinAshmemRegionSize, AlignToNextPage(ashmem_region_size))),
494 last_ashmem_region_size_(0) { 484 last_ashmem_region_size_(0) {
495 DCHECK_GE(ashmem_region_size_, kMinAshmemRegionSize); 485 DCHECK_GE(ashmem_region_size_, kMinAshmemRegionSize);
496 } 486 }
497 487
498 DiscardableMemoryAllocator::~DiscardableMemoryAllocator() { 488 DiscardableMemoryAllocationAshmemFactory::
489 ~DiscardableMemoryAllocationAshmemFactory() {
499 DCHECK(thread_checker_.CalledOnValidThread()); 490 DCHECK(thread_checker_.CalledOnValidThread());
500 DCHECK(ashmem_regions_.empty()); 491 DCHECK(ashmem_regions_.empty());
501 } 492 }
502 493
503 scoped_ptr<DiscardableMemory> DiscardableMemoryAllocator::Allocate( 494 scoped_ptr<DiscardableMemoryAllocation>
495 DiscardableMemoryAllocationAshmemFactory::CreateLockedAllocation(
504 size_t size) { 496 size_t size) {
505 const size_t aligned_size = AlignToNextPage(size); 497 const size_t aligned_size = AlignToNextPage(size);
506 if (!aligned_size) 498 if (!aligned_size)
507 return scoped_ptr<DiscardableMemory>(); 499 return scoped_ptr<DiscardableMemoryAllocation>();
508 // TODO(pliard): make this function less naive by e.g. moving the free chunks 500 // TODO(pliard): make this function less naive by e.g. moving the free chunks
509 // multiset to the allocator itself in order to decrease even more 501 // multiset to the allocator itself in order to decrease even more
510 // fragmentation/speedup allocation. Note that there should not be more than a 502 // fragmentation/speedup allocation. Note that there should not be more than a
511 // couple (=5) of AshmemRegion instances in practice though. 503 // couple (=5) of AshmemRegion instances in practice though.
512 AutoLock auto_lock(lock_);
513 DCHECK_LE(ashmem_regions_.size(), 5U); 504 DCHECK_LE(ashmem_regions_.size(), 5U);
514 for (ScopedVector<AshmemRegion>::iterator it = ashmem_regions_.begin(); 505 for (ScopedVector<AshmemRegion>::iterator it = ashmem_regions_.begin();
515 it != ashmem_regions_.end(); ++it) { 506 it != ashmem_regions_.end(); ++it) {
516 scoped_ptr<DiscardableMemory> memory( 507 scoped_ptr<DiscardableMemoryAllocation> memory(
517 (*it)->Allocate_Locked(size, aligned_size)); 508 (*it)->Allocate(size, aligned_size));
518 if (memory) 509 if (memory)
519 return memory.Pass(); 510 return memory.Pass();
520 } 511 }
521 // The creation of the (large) ashmem region might fail if the address space 512 // The creation of the (large) ashmem region might fail if the address space
522 // is too fragmented. In case creation fails the allocator retries by 513 // is too fragmented. In case creation fails the allocator retries by
523 // repetitively dividing the size by 2. 514 // repetitively dividing the size by 2.
524 const size_t min_region_size = std::max(kMinAshmemRegionSize, aligned_size); 515 const size_t min_region_size = std::max(kMinAshmemRegionSize, aligned_size);
525 for (size_t region_size = std::max(ashmem_region_size_, aligned_size); 516 for (size_t region_size = std::max(ashmem_region_size_, aligned_size);
526 region_size >= min_region_size; 517 region_size >= min_region_size;
527 region_size = AlignToNextPage(region_size / 2)) { 518 region_size = AlignToNextPage(region_size / 2)) {
528 scoped_ptr<AshmemRegion> new_region( 519 scoped_ptr<AshmemRegion> new_region(
529 AshmemRegion::Create(region_size, name_.c_str(), this)); 520 AshmemRegion::Create(region_size, name_.c_str(), this));
530 if (!new_region) 521 if (!new_region)
531 continue; 522 continue;
532 last_ashmem_region_size_ = region_size; 523 last_ashmem_region_size_ = region_size;
533 ashmem_regions_.push_back(new_region.release()); 524 ashmem_regions_.push_back(new_region.release());
534 return ashmem_regions_.back()->Allocate_Locked(size, aligned_size); 525 return ashmem_regions_.back()->Allocate(size, aligned_size);
535 } 526 }
536 // TODO(pliard): consider adding an histogram to see how often this happens. 527 // TODO(pliard): consider adding an histogram to see how often this happens.
537 return scoped_ptr<DiscardableMemory>(); 528 return scoped_ptr<DiscardableMemoryAllocation>();
538 } 529 }
539 530
540 size_t DiscardableMemoryAllocator::last_ashmem_region_size() const { 531 size_t
541 AutoLock auto_lock(lock_); 532 DiscardableMemoryAllocationAshmemFactory::last_ashmem_region_size() const {
542 return last_ashmem_region_size_; 533 return last_ashmem_region_size_;
543 } 534 }
544 535
545 void DiscardableMemoryAllocator::DeleteAshmemRegion_Locked( 536 void DiscardableMemoryAllocationAshmemFactory::DeleteAshmemRegion(
546 AshmemRegion* region) { 537 AshmemRegion* region) {
547 lock_.AssertAcquired();
548 // Note that there should not be more than a couple of ashmem region instances 538 // Note that there should not be more than a couple of ashmem region instances
549 // in |ashmem_regions_|. 539 // in |ashmem_regions_|.
550 DCHECK_LE(ashmem_regions_.size(), 5U); 540 DCHECK_LE(ashmem_regions_.size(), 5U);
551 const ScopedVector<AshmemRegion>::iterator it = std::find( 541 const ScopedVector<AshmemRegion>::iterator it = std::find(
552 ashmem_regions_.begin(), ashmem_regions_.end(), region); 542 ashmem_regions_.begin(), ashmem_regions_.end(), region);
553 DCHECK_NE(ashmem_regions_.end(), it); 543 DCHECK_NE(ashmem_regions_.end(), it);
554 std::swap(*it, ashmem_regions_.back()); 544 std::swap(*it, ashmem_regions_.back());
555 ashmem_regions_.pop_back(); 545 ashmem_regions_.pop_back();
556 } 546 }
557 547
558 } // namespace internal 548 } // namespace internal
559 } // namespace base 549 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698