OLD | NEW |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/memory/discardable_memory_allocator_android.h" | 5 #include "base/memory/discardable_memory_allocation_ashmem_factory.h" |
6 | 6 |
7 #include <sys/mman.h> | 7 #include <sys/mman.h> |
8 #include <unistd.h> | 8 #include <unistd.h> |
9 | 9 |
10 #include <algorithm> | 10 #include <algorithm> |
11 #include <cmath> | 11 #include <cmath> |
12 #include <limits> | 12 #include <limits> |
13 #include <set> | 13 #include <set> |
14 #include <utility> | 14 #include <utility> |
15 | 15 |
16 #include "base/basictypes.h" | 16 #include "base/basictypes.h" |
17 #include "base/containers/hash_tables.h" | 17 #include "base/containers/hash_tables.h" |
18 #include "base/file_util.h" | 18 #include "base/file_util.h" |
19 #include "base/files/scoped_file.h" | 19 #include "base/files/scoped_file.h" |
20 #include "base/logging.h" | 20 #include "base/logging.h" |
21 #include "base/memory/discardable_memory.h" | |
22 #include "base/memory/scoped_vector.h" | 21 #include "base/memory/scoped_vector.h" |
23 #include "base/synchronization/lock.h" | |
24 #include "base/threading/thread_checker.h" | |
25 #include "third_party/ashmem/ashmem.h" | 22 #include "third_party/ashmem/ashmem.h" |
26 | 23 |
27 // The allocator consists of three parts (classes): | 24 // This file consists of the following parts: |
28 // - DiscardableMemoryAllocator: entry point of all allocations (through its | 25 // - DiscardableMemoryAllocationAshmemFactory: entry point of all allocations |
29 // Allocate() method) that are dispatched to the AshmemRegion instances (which | 26 // (through its CreateLockedAllocation() method) that are dispatched to the |
30 // it owns). | 27 // AshmemRegion instances (which it owns). |
31 // - AshmemRegion: manages allocations and destructions inside a single large | 28 // - AshmemRegion: manages allocations and destructions inside a single large |
32 // (e.g. 32 MBytes) ashmem region. | 29 // (e.g. 32 MBytes) ashmem region. |
33 // - DiscardableAshmemChunk: class implementing the DiscardableMemory interface | 30 // - DiscardableAshmemChunk: class implementing the DiscardableMemoryAllocation |
34 // whose instances are returned to the client. DiscardableAshmemChunk lets the | 31 // interface whose instances are returned to the client. DiscardableAshmemChunk |
35 // client seamlessly operate on a subrange of the ashmem region managed by | 32 // lets the client seamlessly operate on a subrange of the ashmem region managed |
36 // AshmemRegion. | 33 // by AshmemRegion. |
37 | 34 |
38 namespace base { | 35 namespace base { |
39 namespace { | 36 namespace { |
40 | 37 |
41 // Only tolerate fragmentation in used chunks *caused by the client* (as opposed | 38 // Only tolerate fragmentation in used chunks *caused by the client* (as opposed |
42 // to the allocator when a free chunk is reused). The client can cause such | 39 // to the allocator when a free chunk is reused). The client can cause such |
43 // fragmentation by e.g. requesting 4097 bytes. This size would be rounded up to | 40 // fragmentation by e.g. requesting 4097 bytes. This size would be rounded up to |
44 // 8192 by the allocator which would cause 4095 bytes of fragmentation (which is | 41 // 8192 by the allocator which would cause 4095 bytes of fragmentation (which is |
45 // currently the maximum allowed). If the client requests 4096 bytes and a free | 42 // currently the maximum allowed). If the client requests 4096 bytes and a free |
46 // chunk of 8192 bytes is available then the free chunk gets splitted into two | 43 // chunk of 8192 bytes is available then the free chunk gets splitted into two |
(...skipping 12 matching lines...) Expand all Loading... |
59 if (size > std::numeric_limits<size_t>::max() - kPageSize + 1) | 56 if (size > std::numeric_limits<size_t>::max() - kPageSize + 1) |
60 return 0; | 57 return 0; |
61 const size_t mask = ~(kPageSize - 1); | 58 const size_t mask = ~(kPageSize - 1); |
62 return (size + kPageSize - 1) & mask; | 59 return (size + kPageSize - 1) & mask; |
63 } | 60 } |
64 | 61 |
65 bool CreateAshmemRegion(const char* name, | 62 bool CreateAshmemRegion(const char* name, |
66 size_t size, | 63 size_t size, |
67 int* out_fd, | 64 int* out_fd, |
68 void** out_address) { | 65 void** out_address) { |
69 base::ScopedFD fd(ashmem_create_region(name, size)); | 66 ScopedFD fd(ashmem_create_region(name, size)); |
70 if (!fd.is_valid()) { | 67 if (!fd.is_valid()) { |
71 DLOG(ERROR) << "ashmem_create_region() failed"; | 68 DLOG(ERROR) << "ashmem_create_region() failed"; |
72 return false; | 69 return false; |
73 } | 70 } |
74 | 71 |
75 const int err = ashmem_set_prot_region(fd.get(), PROT_READ | PROT_WRITE); | 72 const int err = ashmem_set_prot_region(fd.get(), PROT_READ | PROT_WRITE); |
76 if (err < 0) { | 73 if (err < 0) { |
77 DLOG(ERROR) << "Error " << err << " when setting protection of ashmem"; | 74 DLOG(ERROR) << "Error " << err << " when setting protection of ashmem"; |
78 return false; | 75 return false; |
79 } | 76 } |
(...skipping 15 matching lines...) Expand all Loading... |
95 | 92 |
96 bool CloseAshmemRegion(int fd, size_t size, void* address) { | 93 bool CloseAshmemRegion(int fd, size_t size, void* address) { |
97 if (munmap(address, size) == -1) { | 94 if (munmap(address, size) == -1) { |
98 DPLOG(ERROR) << "Failed to unmap memory."; | 95 DPLOG(ERROR) << "Failed to unmap memory."; |
99 close(fd); | 96 close(fd); |
100 return false; | 97 return false; |
101 } | 98 } |
102 return close(fd) == 0; | 99 return close(fd) == 0; |
103 } | 100 } |
104 | 101 |
105 DiscardableMemoryLockStatus LockAshmemRegion(int fd, | 102 bool LockAshmemRegion(int fd, size_t off, size_t size, const void* address) { |
106 size_t off, | |
107 size_t size, | |
108 const void* address) { | |
109 const int result = ashmem_pin_region(fd, off, size); | 103 const int result = ashmem_pin_region(fd, off, size); |
110 DCHECK_EQ(0, mprotect(address, size, PROT_READ | PROT_WRITE)); | 104 DCHECK_EQ(0, mprotect(address, size, PROT_READ | PROT_WRITE)); |
111 return result == ASHMEM_WAS_PURGED ? DISCARDABLE_MEMORY_LOCK_STATUS_PURGED | 105 return result != ASHMEM_WAS_PURGED; |
112 : DISCARDABLE_MEMORY_LOCK_STATUS_SUCCESS; | |
113 } | 106 } |
114 | 107 |
115 bool UnlockAshmemRegion(int fd, size_t off, size_t size, const void* address) { | 108 bool UnlockAshmemRegion(int fd, size_t off, size_t size, const void* address) { |
116 const int failed = ashmem_unpin_region(fd, off, size); | 109 const int failed = ashmem_unpin_region(fd, off, size); |
117 if (failed) | 110 if (failed) |
118 DLOG(ERROR) << "Failed to unpin memory."; | 111 DLOG(ERROR) << "Failed to unpin memory."; |
119 // This allows us to catch accesses to unlocked memory. | 112 // This allows us to catch accesses to unlocked memory. |
120 DCHECK_EQ(0, mprotect(address, size, PROT_NONE)); | 113 DCHECK_EQ(0, mprotect(address, size, PROT_NONE)); |
121 return !failed; | 114 return !failed; |
122 } | 115 } |
123 | 116 |
124 } // namespace | 117 } // namespace |
125 | 118 |
126 namespace internal { | 119 namespace internal { |
127 | 120 |
128 class DiscardableMemoryAllocator::DiscardableAshmemChunk | 121 class DiscardableMemoryAllocationAshmemFactory::DiscardableAshmemChunk |
129 : public DiscardableMemory { | 122 : public DiscardableMemoryAllocation { |
130 public: | 123 public: |
131 // Note that |ashmem_region| must outlive |this|. | 124 // Note that |ashmem_region| must outlive |this|. |
132 DiscardableAshmemChunk(AshmemRegion* ashmem_region, | 125 DiscardableAshmemChunk(AshmemRegion* ashmem_region, |
133 int fd, | 126 int fd, |
134 void* address, | 127 void* address, |
135 size_t offset, | 128 size_t offset, |
136 size_t size) | 129 size_t size) |
137 : ashmem_region_(ashmem_region), | 130 : ashmem_region_(ashmem_region), |
138 fd_(fd), | 131 fd_(fd), |
139 address_(address), | 132 address_(address), |
140 offset_(offset), | 133 offset_(offset), |
141 size_(size), | 134 size_(size), |
142 locked_(true) { | 135 locked_(true) { |
143 } | 136 } |
144 | 137 |
145 // Implemented below AshmemRegion since this requires the full definition of | 138 // Implemented below AshmemRegion since this requires the full definition of |
146 // AshmemRegion. | 139 // AshmemRegion. |
147 virtual ~DiscardableAshmemChunk(); | 140 virtual ~DiscardableAshmemChunk(); |
148 | 141 |
149 // DiscardableMemory: | 142 // DiscardableMemoryAllocation: |
150 virtual DiscardableMemoryLockStatus Lock() OVERRIDE { | 143 virtual bool Lock() OVERRIDE { |
151 DCHECK(!locked_); | 144 DCHECK(!locked_); |
152 locked_ = true; | 145 locked_ = true; |
153 return LockAshmemRegion(fd_, offset_, size_, address_); | 146 return LockAshmemRegion(fd_, offset_, size_, address_); |
154 } | 147 } |
155 | 148 |
156 virtual void Unlock() OVERRIDE { | 149 virtual void Unlock() OVERRIDE { |
157 DCHECK(locked_); | 150 DCHECK(locked_); |
158 locked_ = false; | 151 locked_ = false; |
159 UnlockAshmemRegion(fd_, offset_, size_, address_); | 152 UnlockAshmemRegion(fd_, offset_, size_, address_); |
160 } | 153 } |
161 | 154 |
162 virtual void* Memory() const OVERRIDE { | 155 virtual void* Memory() OVERRIDE { |
163 return address_; | 156 return address_; |
164 } | 157 } |
165 | 158 |
166 private: | 159 private: |
167 AshmemRegion* const ashmem_region_; | 160 AshmemRegion* const ashmem_region_; |
168 const int fd_; | 161 const int fd_; |
169 void* const address_; | 162 void* const address_; |
170 const size_t offset_; | 163 const size_t offset_; |
171 const size_t size_; | 164 const size_t size_; |
172 bool locked_; | 165 bool locked_; |
173 | 166 |
174 DISALLOW_COPY_AND_ASSIGN(DiscardableAshmemChunk); | 167 DISALLOW_COPY_AND_ASSIGN(DiscardableAshmemChunk); |
175 }; | 168 }; |
176 | 169 |
177 class DiscardableMemoryAllocator::AshmemRegion { | 170 class DiscardableMemoryAllocationAshmemFactory::AshmemRegion { |
178 public: | 171 public: |
179 // Note that |allocator| must outlive |this|. | 172 // Note that |allocator| must outlive |this|. |
180 static scoped_ptr<AshmemRegion> Create( | 173 static scoped_ptr<AshmemRegion> Create( |
181 size_t size, | 174 size_t size, |
182 const std::string& name, | 175 const std::string& name, |
183 DiscardableMemoryAllocator* allocator) { | 176 DiscardableMemoryAllocationAshmemFactory* allocator) { |
184 DCHECK_EQ(size, AlignToNextPage(size)); | 177 DCHECK_EQ(size, AlignToNextPage(size)); |
185 int fd; | 178 int fd; |
186 void* base; | 179 void* base; |
187 if (!CreateAshmemRegion(name.c_str(), size, &fd, &base)) | 180 if (!CreateAshmemRegion(name.c_str(), size, &fd, &base)) |
188 return scoped_ptr<AshmemRegion>(); | 181 return scoped_ptr<AshmemRegion>(); |
189 return make_scoped_ptr(new AshmemRegion(fd, size, base, allocator)); | 182 return make_scoped_ptr(new AshmemRegion(fd, size, base, allocator)); |
190 } | 183 } |
191 | 184 |
192 ~AshmemRegion() { | 185 ~AshmemRegion() { |
193 const bool result = CloseAshmemRegion(fd_, size_, base_); | 186 const bool result = CloseAshmemRegion(fd_, size_, base_); |
194 DCHECK(result); | 187 DCHECK(result); |
195 DCHECK(!highest_allocated_chunk_); | 188 DCHECK(!highest_allocated_chunk_); |
196 } | 189 } |
197 | 190 |
198 // Returns a new instance of DiscardableMemory whose size is greater or equal | 191 // Returns a new instance of DiscardableMemoryAllocation whose size is greater |
199 // than |actual_size| (which is expected to be greater or equal than | 192 // or equal than |actual_size| (which is expected to be greater or equal than |
200 // |client_requested_size|). | 193 // |client_requested_size|). |
201 // Allocation works as follows: | 194 // Allocation works as follows: |
202 // 1) Reuse a previously freed chunk and return it if it succeeded. See | 195 // 1) Reuse a previously freed chunk and return it if it succeeded. See |
203 // ReuseFreeChunk_Locked() below for more information. | 196 // ReuseFreeChunk() below for more information. |
204 // 2) If no free chunk could be reused and the region is not big enough for | 197 // 2) If no free chunk could be reused and the region is not big enough for |
205 // the requested size then NULL is returned. | 198 // the requested size then NULL is returned. |
206 // 3) If there is enough room in the ashmem region then a new chunk is | 199 // 3) If there is enough room in the ashmem region then a new chunk is |
207 // returned. This new chunk starts at |offset_| which is the end of the | 200 // returned. This new chunk starts at |offset_| which is the end of the |
208 // previously highest chunk in the region. | 201 // previously highest chunk in the region. |
209 scoped_ptr<DiscardableMemory> Allocate_Locked(size_t client_requested_size, | 202 scoped_ptr<DiscardableMemoryAllocation> Allocate(size_t client_requested_size, |
210 size_t actual_size) { | 203 size_t actual_size) { |
211 DCHECK_LE(client_requested_size, actual_size); | 204 DCHECK_LE(client_requested_size, actual_size); |
212 allocator_->lock_.AssertAcquired(); | |
213 | 205 |
214 // Check that the |highest_allocated_chunk_| field doesn't contain a stale | 206 // Check that the |highest_allocated_chunk_| field doesn't contain a stale |
215 // pointer. It should point to either a free chunk or a used chunk. | 207 // pointer. It should point to either a free chunk or a used chunk. |
216 DCHECK(!highest_allocated_chunk_ || | 208 DCHECK(!highest_allocated_chunk_ || |
217 address_to_free_chunk_map_.find(highest_allocated_chunk_) != | 209 address_to_free_chunk_map_.find(highest_allocated_chunk_) != |
218 address_to_free_chunk_map_.end() || | 210 address_to_free_chunk_map_.end() || |
219 used_to_previous_chunk_map_.find(highest_allocated_chunk_) != | 211 used_to_previous_chunk_map_.find(highest_allocated_chunk_) != |
220 used_to_previous_chunk_map_.end()); | 212 used_to_previous_chunk_map_.end()); |
221 | 213 |
222 scoped_ptr<DiscardableMemory> memory = ReuseFreeChunk_Locked( | 214 scoped_ptr<DiscardableMemoryAllocation> memory = ReuseFreeChunk( |
223 client_requested_size, actual_size); | 215 client_requested_size, actual_size); |
224 if (memory) | 216 if (memory) |
225 return memory.Pass(); | 217 return memory.Pass(); |
226 | 218 |
227 if (size_ - offset_ < actual_size) { | 219 if (size_ - offset_ < actual_size) { |
228 // This region does not have enough space left to hold the requested size. | 220 // This region does not have enough space left to hold the requested size. |
229 return scoped_ptr<DiscardableMemory>(); | 221 return scoped_ptr<DiscardableMemoryAllocation>(); |
230 } | 222 } |
231 | 223 |
232 void* const address = static_cast<char*>(base_) + offset_; | 224 void* const address = static_cast<char*>(base_) + offset_; |
233 memory.reset( | 225 memory.reset( |
234 new DiscardableAshmemChunk(this, fd_, address, offset_, actual_size)); | 226 new DiscardableAshmemChunk(this, fd_, address, offset_, actual_size)); |
235 | 227 |
236 used_to_previous_chunk_map_.insert( | 228 used_to_previous_chunk_map_.insert( |
237 std::make_pair(address, highest_allocated_chunk_)); | 229 std::make_pair(address, highest_allocated_chunk_)); |
238 highest_allocated_chunk_ = address; | 230 highest_allocated_chunk_ = address; |
239 offset_ += actual_size; | 231 offset_ += actual_size; |
240 DCHECK_LE(offset_, size_); | 232 DCHECK_LE(offset_, size_); |
241 return memory.Pass(); | 233 return memory.Pass(); |
242 } | 234 } |
243 | 235 |
244 void OnChunkDeletion(void* chunk, size_t size) { | 236 void OnChunkDeletion(void* chunk, size_t size) { |
245 AutoLock auto_lock(allocator_->lock_); | 237 MergeAndAddFreeChunk(chunk, size); |
246 MergeAndAddFreeChunk_Locked(chunk, size); | |
247 // Note that |this| might be deleted beyond this point. | 238 // Note that |this| might be deleted beyond this point. |
248 } | 239 } |
249 | 240 |
250 private: | 241 private: |
251 struct FreeChunk { | 242 struct FreeChunk { |
252 FreeChunk() : previous_chunk(NULL), start(NULL), size(0) {} | 243 FreeChunk() : previous_chunk(NULL), start(NULL), size(0) {} |
253 | 244 |
254 explicit FreeChunk(size_t size) | 245 explicit FreeChunk(size_t size) |
255 : previous_chunk(NULL), | 246 : previous_chunk(NULL), |
256 start(NULL), | 247 start(NULL), |
(...skipping 15 matching lines...) Expand all Loading... |
272 | 263 |
273 bool operator<(const FreeChunk& other) const { | 264 bool operator<(const FreeChunk& other) const { |
274 return size < other.size; | 265 return size < other.size; |
275 } | 266 } |
276 }; | 267 }; |
277 | 268 |
278 // Note that |allocator| must outlive |this|. | 269 // Note that |allocator| must outlive |this|. |
279 AshmemRegion(int fd, | 270 AshmemRegion(int fd, |
280 size_t size, | 271 size_t size, |
281 void* base, | 272 void* base, |
282 DiscardableMemoryAllocator* allocator) | 273 DiscardableMemoryAllocationAshmemFactory* allocator) |
283 : fd_(fd), | 274 : fd_(fd), |
284 size_(size), | 275 size_(size), |
285 base_(base), | 276 base_(base), |
286 allocator_(allocator), | 277 allocator_(allocator), |
287 highest_allocated_chunk_(NULL), | 278 highest_allocated_chunk_(NULL), |
288 offset_(0) { | 279 offset_(0) { |
289 DCHECK_GE(fd_, 0); | 280 DCHECK_GE(fd_, 0); |
290 DCHECK_GE(size, kMinAshmemRegionSize); | 281 DCHECK_GE(size, kMinAshmemRegionSize); |
291 DCHECK(base); | 282 DCHECK(base); |
292 DCHECK(allocator); | 283 DCHECK(allocator); |
293 } | 284 } |
294 | 285 |
295 // Tries to reuse a previously freed chunk by doing a closest size match. | 286 // Tries to reuse a previously freed chunk by doing a closest size match. |
296 scoped_ptr<DiscardableMemory> ReuseFreeChunk_Locked( | 287 scoped_ptr<DiscardableMemoryAllocation> ReuseFreeChunk( |
297 size_t client_requested_size, | 288 size_t client_requested_size, |
298 size_t actual_size) { | 289 size_t actual_size) { |
299 allocator_->lock_.AssertAcquired(); | 290 const FreeChunk reused_chunk = RemoveFreeChunkFromIterator( |
300 const FreeChunk reused_chunk = RemoveFreeChunkFromIterator_Locked( | |
301 free_chunks_.lower_bound(FreeChunk(actual_size))); | 291 free_chunks_.lower_bound(FreeChunk(actual_size))); |
302 if (reused_chunk.is_null()) | 292 if (reused_chunk.is_null()) |
303 return scoped_ptr<DiscardableMemory>(); | 293 return scoped_ptr<DiscardableMemoryAllocation>(); |
304 | 294 |
305 used_to_previous_chunk_map_.insert( | 295 used_to_previous_chunk_map_.insert( |
306 std::make_pair(reused_chunk.start, reused_chunk.previous_chunk)); | 296 std::make_pair(reused_chunk.start, reused_chunk.previous_chunk)); |
307 size_t reused_chunk_size = reused_chunk.size; | 297 size_t reused_chunk_size = reused_chunk.size; |
308 // |client_requested_size| is used below rather than |actual_size| to | 298 // |client_requested_size| is used below rather than |actual_size| to |
309 // reflect the amount of bytes that would not be usable by the client (i.e. | 299 // reflect the amount of bytes that would not be usable by the client (i.e. |
310 // wasted). Using |actual_size| instead would not allow us to detect | 300 // wasted). Using |actual_size| instead would not allow us to detect |
311 // fragmentation caused by the client if he did misaligned allocations. | 301 // fragmentation caused by the client if he did misaligned allocations. |
312 DCHECK_GE(reused_chunk.size, client_requested_size); | 302 DCHECK_GE(reused_chunk.size, client_requested_size); |
313 const size_t fragmentation_bytes = | 303 const size_t fragmentation_bytes = |
314 reused_chunk.size - client_requested_size; | 304 reused_chunk.size - client_requested_size; |
315 | 305 |
316 if (fragmentation_bytes > kMaxChunkFragmentationBytes) { | 306 if (fragmentation_bytes > kMaxChunkFragmentationBytes) { |
317 // Split the free chunk being recycled so that its unused tail doesn't get | 307 // Split the free chunk being recycled so that its unused tail doesn't get |
318 // reused (i.e. locked) which would prevent it from being evicted under | 308 // reused (i.e. locked) which would prevent it from being evicted under |
319 // memory pressure. | 309 // memory pressure. |
320 reused_chunk_size = actual_size; | 310 reused_chunk_size = actual_size; |
321 void* const new_chunk_start = | 311 void* const new_chunk_start = |
322 static_cast<char*>(reused_chunk.start) + actual_size; | 312 static_cast<char*>(reused_chunk.start) + actual_size; |
323 if (reused_chunk.start == highest_allocated_chunk_) { | 313 if (reused_chunk.start == highest_allocated_chunk_) { |
324 // We also need to update the pointer to the highest allocated chunk in | 314 // We also need to update the pointer to the highest allocated chunk in |
325 // case we are splitting the highest chunk. | 315 // case we are splitting the highest chunk. |
326 highest_allocated_chunk_ = new_chunk_start; | 316 highest_allocated_chunk_ = new_chunk_start; |
327 } | 317 } |
328 DCHECK_GT(reused_chunk.size, actual_size); | 318 DCHECK_GT(reused_chunk.size, actual_size); |
329 const size_t new_chunk_size = reused_chunk.size - actual_size; | 319 const size_t new_chunk_size = reused_chunk.size - actual_size; |
330 // Note that merging is not needed here since there can't be contiguous | 320 // Note that merging is not needed here since there can't be contiguous |
331 // free chunks at this point. | 321 // free chunks at this point. |
332 AddFreeChunk_Locked( | 322 AddFreeChunk( |
333 FreeChunk(reused_chunk.start, new_chunk_start, new_chunk_size)); | 323 FreeChunk(reused_chunk.start, new_chunk_start,new_chunk_size)); |
334 } | 324 } |
335 | 325 |
336 const size_t offset = | 326 const size_t offset = |
337 static_cast<char*>(reused_chunk.start) - static_cast<char*>(base_); | 327 static_cast<char*>(reused_chunk.start) - static_cast<char*>(base_); |
338 LockAshmemRegion(fd_, offset, reused_chunk_size, reused_chunk.start); | 328 LockAshmemRegion(fd_, offset, reused_chunk_size, reused_chunk.start); |
339 scoped_ptr<DiscardableMemory> memory( | 329 scoped_ptr<DiscardableMemoryAllocation> memory( |
340 new DiscardableAshmemChunk(this, fd_, reused_chunk.start, offset, | 330 new DiscardableAshmemChunk(this, fd_, reused_chunk.start, offset, |
341 reused_chunk_size)); | 331 reused_chunk_size)); |
342 return memory.Pass(); | 332 return memory.Pass(); |
343 } | 333 } |
344 | 334 |
345 // Makes the chunk identified with the provided arguments free and possibly | 335 // Makes the chunk identified with the provided arguments free and possibly |
346 // merges this chunk with the previous and next contiguous ones. | 336 // merges this chunk with the previous and next contiguous ones. |
347 // If the provided chunk is the only one used (and going to be freed) in the | 337 // If the provided chunk is the only one used (and going to be freed) in the |
348 // region then the internal ashmem region is closed so that the underlying | 338 // region then the internal ashmem region is closed so that the underlying |
349 // physical pages are immediately released. | 339 // physical pages are immediately released. |
350 // Note that free chunks are unlocked therefore they can be reclaimed by the | 340 // Note that free chunks are unlocked therefore they can be reclaimed by the |
351 // kernel if needed (under memory pressure) but they are not immediately | 341 // kernel if needed (under memory pressure) but they are not immediately |
352 // released unfortunately since madvise(MADV_REMOVE) and | 342 // released unfortunately since madvise(MADV_REMOVE) and |
353 // fallocate(FALLOC_FL_PUNCH_HOLE) don't seem to work on ashmem. This might | 343 // fallocate(FALLOC_FL_PUNCH_HOLE) don't seem to work on ashmem. This might |
354 // change in versions of kernel >=3.5 though. The fact that free chunks are | 344 // change in versions of kernel >=3.5 though. The fact that free chunks are |
355 // not immediately released is the reason why we are trying to minimize | 345 // not immediately released is the reason why we are trying to minimize |
356 // fragmentation in order not to cause "artificial" memory pressure. | 346 // fragmentation in order not to cause "artificial" memory pressure. |
357 void MergeAndAddFreeChunk_Locked(void* chunk, size_t size) { | 347 void MergeAndAddFreeChunk(void* chunk, size_t size) { |
358 allocator_->lock_.AssertAcquired(); | |
359 size_t new_free_chunk_size = size; | 348 size_t new_free_chunk_size = size; |
360 // Merge with the previous chunk. | 349 // Merge with the previous chunk. |
361 void* first_free_chunk = chunk; | 350 void* first_free_chunk = chunk; |
362 DCHECK(!used_to_previous_chunk_map_.empty()); | 351 DCHECK(!used_to_previous_chunk_map_.empty()); |
363 const hash_map<void*, void*>::iterator previous_chunk_it = | 352 const hash_map<void*, void*>::iterator previous_chunk_it = |
364 used_to_previous_chunk_map_.find(chunk); | 353 used_to_previous_chunk_map_.find(chunk); |
365 DCHECK(previous_chunk_it != used_to_previous_chunk_map_.end()); | 354 DCHECK(previous_chunk_it != used_to_previous_chunk_map_.end()); |
366 void* previous_chunk = previous_chunk_it->second; | 355 void* previous_chunk = previous_chunk_it->second; |
367 used_to_previous_chunk_map_.erase(previous_chunk_it); | 356 used_to_previous_chunk_map_.erase(previous_chunk_it); |
368 | 357 |
369 if (previous_chunk) { | 358 if (previous_chunk) { |
370 const FreeChunk free_chunk = RemoveFreeChunk_Locked(previous_chunk); | 359 const FreeChunk free_chunk = RemoveFreeChunk(previous_chunk); |
371 if (!free_chunk.is_null()) { | 360 if (!free_chunk.is_null()) { |
372 new_free_chunk_size += free_chunk.size; | 361 new_free_chunk_size += free_chunk.size; |
373 first_free_chunk = previous_chunk; | 362 first_free_chunk = previous_chunk; |
374 if (chunk == highest_allocated_chunk_) | 363 if (chunk == highest_allocated_chunk_) |
375 highest_allocated_chunk_ = previous_chunk; | 364 highest_allocated_chunk_ = previous_chunk; |
376 | 365 |
377 // There should not be more contiguous previous free chunks. | 366 // There should not be more contiguous previous free chunks. |
378 previous_chunk = free_chunk.previous_chunk; | 367 previous_chunk = free_chunk.previous_chunk; |
379 DCHECK(!address_to_free_chunk_map_.count(previous_chunk)); | 368 DCHECK(!address_to_free_chunk_map_.count(previous_chunk)); |
380 } | 369 } |
381 } | 370 } |
382 | 371 |
383 // Merge with the next chunk if free and present. | 372 // Merge with the next chunk if free and present. |
384 void* next_chunk = static_cast<char*>(chunk) + size; | 373 void* next_chunk = static_cast<char*>(chunk) + size; |
385 const FreeChunk next_free_chunk = RemoveFreeChunk_Locked(next_chunk); | 374 const FreeChunk next_free_chunk = RemoveFreeChunk(next_chunk); |
386 if (!next_free_chunk.is_null()) { | 375 if (!next_free_chunk.is_null()) { |
387 new_free_chunk_size += next_free_chunk.size; | 376 new_free_chunk_size += next_free_chunk.size; |
388 if (next_free_chunk.start == highest_allocated_chunk_) | 377 if (next_free_chunk.start == highest_allocated_chunk_) |
389 highest_allocated_chunk_ = first_free_chunk; | 378 highest_allocated_chunk_ = first_free_chunk; |
390 | 379 |
391 // Same as above. | 380 // Same as above. |
392 DCHECK(!address_to_free_chunk_map_.count(static_cast<char*>(next_chunk) + | 381 DCHECK(!address_to_free_chunk_map_.count(static_cast<char*>(next_chunk) + |
393 next_free_chunk.size)); | 382 next_free_chunk.size)); |
394 } | 383 } |
395 | 384 |
396 const bool whole_ashmem_region_is_free = | 385 const bool whole_ashmem_region_is_free = |
397 used_to_previous_chunk_map_.empty(); | 386 used_to_previous_chunk_map_.empty(); |
398 if (!whole_ashmem_region_is_free) { | 387 if (!whole_ashmem_region_is_free) { |
399 AddFreeChunk_Locked( | 388 AddFreeChunk( |
400 FreeChunk(previous_chunk, first_free_chunk, new_free_chunk_size)); | 389 FreeChunk(previous_chunk, first_free_chunk, new_free_chunk_size)); |
401 return; | 390 return; |
402 } | 391 } |
403 | 392 |
404 // The whole ashmem region is free thus it can be deleted. | 393 // The whole ashmem region is free thus it can be deleted. |
405 DCHECK_EQ(base_, first_free_chunk); | 394 DCHECK_EQ(base_, first_free_chunk); |
406 DCHECK_EQ(base_, highest_allocated_chunk_); | 395 DCHECK_EQ(base_, highest_allocated_chunk_); |
407 DCHECK(free_chunks_.empty()); | 396 DCHECK(free_chunks_.empty()); |
408 DCHECK(address_to_free_chunk_map_.empty()); | 397 DCHECK(address_to_free_chunk_map_.empty()); |
409 DCHECK(used_to_previous_chunk_map_.empty()); | 398 DCHECK(used_to_previous_chunk_map_.empty()); |
410 highest_allocated_chunk_ = NULL; | 399 highest_allocated_chunk_ = NULL; |
411 allocator_->DeleteAshmemRegion_Locked(this); // Deletes |this|. | 400 allocator_->DeleteAshmemRegion(this); // Deletes |this|. |
412 } | 401 } |
413 | 402 |
414 void AddFreeChunk_Locked(const FreeChunk& free_chunk) { | 403 void AddFreeChunk(const FreeChunk& free_chunk) { |
415 allocator_->lock_.AssertAcquired(); | |
416 const std::multiset<FreeChunk>::iterator it = free_chunks_.insert( | 404 const std::multiset<FreeChunk>::iterator it = free_chunks_.insert( |
417 free_chunk); | 405 free_chunk); |
418 address_to_free_chunk_map_.insert(std::make_pair(free_chunk.start, it)); | 406 address_to_free_chunk_map_.insert(std::make_pair(free_chunk.start, it)); |
419 // Update the next used contiguous chunk, if any, since its previous chunk | 407 // Update the next used contiguous chunk, if any, since its previous chunk |
420 // may have changed due to free chunks merging/splitting. | 408 // may have changed due to free chunks merging/splitting. |
421 void* const next_used_contiguous_chunk = | 409 void* const next_used_contiguous_chunk = |
422 static_cast<char*>(free_chunk.start) + free_chunk.size; | 410 static_cast<char*>(free_chunk.start) + free_chunk.size; |
423 hash_map<void*, void*>::iterator previous_it = | 411 hash_map<void*, void*>::iterator previous_it = |
424 used_to_previous_chunk_map_.find(next_used_contiguous_chunk); | 412 used_to_previous_chunk_map_.find(next_used_contiguous_chunk); |
425 if (previous_it != used_to_previous_chunk_map_.end()) | 413 if (previous_it != used_to_previous_chunk_map_.end()) |
426 previous_it->second = free_chunk.start; | 414 previous_it->second = free_chunk.start; |
427 } | 415 } |
428 | 416 |
429 // Finds and removes the free chunk, if any, whose start address is | 417 // Finds and removes the free chunk, if any, whose start address is |
430 // |chunk_start|. Returns a copy of the unlinked free chunk or a free chunk | 418 // |chunk_start|. Returns a copy of the unlinked free chunk or a free chunk |
431 // whose content is null if it was not found. | 419 // whose content is null if it was not found. |
432 FreeChunk RemoveFreeChunk_Locked(void* chunk_start) { | 420 FreeChunk RemoveFreeChunk(void* chunk_start) { |
433 allocator_->lock_.AssertAcquired(); | |
434 const hash_map< | 421 const hash_map< |
435 void*, std::multiset<FreeChunk>::iterator>::iterator it = | 422 void*, std::multiset<FreeChunk>::iterator>::iterator it = |
436 address_to_free_chunk_map_.find(chunk_start); | 423 address_to_free_chunk_map_.find(chunk_start); |
437 if (it == address_to_free_chunk_map_.end()) | 424 if (it == address_to_free_chunk_map_.end()) |
438 return FreeChunk(); | 425 return FreeChunk(); |
439 return RemoveFreeChunkFromIterator_Locked(it->second); | 426 return RemoveFreeChunkFromIterator(it->second); |
440 } | 427 } |
441 | 428 |
442 // Same as above but takes an iterator in. | 429 // Same as above but takes an iterator in. |
443 FreeChunk RemoveFreeChunkFromIterator_Locked( | 430 FreeChunk RemoveFreeChunkFromIterator( |
444 std::multiset<FreeChunk>::iterator free_chunk_it) { | 431 std::multiset<FreeChunk>::iterator free_chunk_it) { |
445 allocator_->lock_.AssertAcquired(); | |
446 if (free_chunk_it == free_chunks_.end()) | 432 if (free_chunk_it == free_chunks_.end()) |
447 return FreeChunk(); | 433 return FreeChunk(); |
448 DCHECK(free_chunk_it != free_chunks_.end()); | 434 DCHECK(free_chunk_it != free_chunks_.end()); |
449 const FreeChunk free_chunk(*free_chunk_it); | 435 const FreeChunk free_chunk(*free_chunk_it); |
450 address_to_free_chunk_map_.erase(free_chunk_it->start); | 436 address_to_free_chunk_map_.erase(free_chunk_it->start); |
451 free_chunks_.erase(free_chunk_it); | 437 free_chunks_.erase(free_chunk_it); |
452 return free_chunk; | 438 return free_chunk; |
453 } | 439 } |
454 | 440 |
455 const int fd_; | 441 const int fd_; |
456 const size_t size_; | 442 const size_t size_; |
457 void* const base_; | 443 void* const base_; |
458 DiscardableMemoryAllocator* const allocator_; | 444 DiscardableMemoryAllocationAshmemFactory* const allocator_; |
459 // Points to the chunk with the highest address in the region. This pointer | 445 // Points to the chunk with the highest address in the region. This pointer |
460 // needs to be carefully updated when chunks are merged/split. | 446 // needs to be carefully updated when chunks are merged/split. |
461 void* highest_allocated_chunk_; | 447 void* highest_allocated_chunk_; |
462 // Points to the end of |highest_allocated_chunk_|. | 448 // Points to the end of |highest_allocated_chunk_|. |
463 size_t offset_; | 449 size_t offset_; |
464 // Allows free chunks recycling (lookup, insertion and removal) in O(log N). | 450 // Allows free chunks recycling (lookup, insertion and removal) in O(log N). |
465 // Note that FreeChunk values are indexed by their size and also note that | 451 // Note that FreeChunk values are indexed by their size and also note that |
466 // multiple free chunks can have the same size (which is why multiset<> is | 452 // multiple free chunks can have the same size (which is why multiset<> is |
467 // used instead of e.g. set<>). | 453 // used instead of e.g. set<>). |
468 std::multiset<FreeChunk> free_chunks_; | 454 std::multiset<FreeChunk> free_chunks_; |
469 // Used while merging free contiguous chunks to erase free chunks (from their | 455 // Used while merging free contiguous chunks to erase free chunks (from their |
470 // start address) in constant time. Note that multiset<>::{insert,erase}() | 456 // start address) in constant time. Note that multiset<>::{insert,erase}() |
471 // don't invalidate iterators (except the one for the element being removed | 457 // don't invalidate iterators (except the one for the element being removed |
472 // obviously). | 458 // obviously). |
473 hash_map< | 459 hash_map< |
474 void*, std::multiset<FreeChunk>::iterator> address_to_free_chunk_map_; | 460 void*, std::multiset<FreeChunk>::iterator> address_to_free_chunk_map_; |
475 // Maps the address of *used* chunks to the address of their previous | 461 // Maps the address of *used* chunks to the address of their previous |
476 // contiguous chunk. | 462 // contiguous chunk. |
477 hash_map<void*, void*> used_to_previous_chunk_map_; | 463 hash_map<void*, void*> used_to_previous_chunk_map_; |
478 | 464 |
479 DISALLOW_COPY_AND_ASSIGN(AshmemRegion); | 465 DISALLOW_COPY_AND_ASSIGN(AshmemRegion); |
480 }; | 466 }; |
481 | 467 |
482 DiscardableMemoryAllocator::DiscardableAshmemChunk::~DiscardableAshmemChunk() { | 468 DiscardableMemoryAllocationAshmemFactory::DiscardableAshmemChunk:: |
| 469 ~DiscardableAshmemChunk() { |
483 if (locked_) | 470 if (locked_) |
484 UnlockAshmemRegion(fd_, offset_, size_, address_); | 471 UnlockAshmemRegion(fd_, offset_, size_, address_); |
485 ashmem_region_->OnChunkDeletion(address_, size_); | 472 ashmem_region_->OnChunkDeletion(address_, size_); |
486 } | 473 } |
487 | 474 |
488 DiscardableMemoryAllocator::DiscardableMemoryAllocator( | 475 DiscardableMemoryAllocationAshmemFactory:: |
| 476 DiscardableMemoryAllocationAshmemFactory( |
489 const std::string& name, | 477 const std::string& name, |
490 size_t ashmem_region_size) | 478 size_t ashmem_region_size) |
491 : name_(name), | 479 : name_(name), |
492 ashmem_region_size_( | 480 ashmem_region_size_( |
493 std::max(kMinAshmemRegionSize, AlignToNextPage(ashmem_region_size))), | 481 std::max(kMinAshmemRegionSize, AlignToNextPage(ashmem_region_size))), |
494 last_ashmem_region_size_(0) { | 482 last_ashmem_region_size_(0) { |
495 DCHECK_GE(ashmem_region_size_, kMinAshmemRegionSize); | 483 DCHECK_GE(ashmem_region_size_, kMinAshmemRegionSize); |
496 } | 484 } |
497 | 485 |
498 DiscardableMemoryAllocator::~DiscardableMemoryAllocator() { | 486 DiscardableMemoryAllocationAshmemFactory:: |
499 DCHECK(thread_checker_.CalledOnValidThread()); | 487 ~DiscardableMemoryAllocationAshmemFactory() { |
500 DCHECK(ashmem_regions_.empty()); | 488 DCHECK(ashmem_regions_.empty()); |
501 } | 489 } |
502 | 490 |
503 scoped_ptr<DiscardableMemory> DiscardableMemoryAllocator::Allocate( | 491 scoped_ptr<DiscardableMemoryAllocation> |
| 492 DiscardableMemoryAllocationAshmemFactory::CreateLockedAllocation( |
504 size_t size) { | 493 size_t size) { |
505 const size_t aligned_size = AlignToNextPage(size); | 494 const size_t aligned_size = AlignToNextPage(size); |
506 if (!aligned_size) | 495 if (!aligned_size) |
507 return scoped_ptr<DiscardableMemory>(); | 496 return scoped_ptr<DiscardableMemoryAllocation>(); |
508 // TODO(pliard): make this function less naive by e.g. moving the free chunks | 497 // TODO(pliard): make this function less naive by e.g. moving the free chunks |
509 // multiset to the allocator itself in order to decrease even more | 498 // multiset to the allocator itself in order to decrease even more |
510 // fragmentation/speedup allocation. Note that there should not be more than a | 499 // fragmentation/speedup allocation. Note that there should not be more than a |
511 // couple (=5) of AshmemRegion instances in practice though. | 500 // couple (=5) of AshmemRegion instances in practice though. |
512 AutoLock auto_lock(lock_); | |
513 DCHECK_LE(ashmem_regions_.size(), 5U); | 501 DCHECK_LE(ashmem_regions_.size(), 5U); |
514 for (ScopedVector<AshmemRegion>::iterator it = ashmem_regions_.begin(); | 502 for (ScopedVector<AshmemRegion>::iterator it = ashmem_regions_.begin(); |
515 it != ashmem_regions_.end(); ++it) { | 503 it != ashmem_regions_.end(); ++it) { |
516 scoped_ptr<DiscardableMemory> memory( | 504 scoped_ptr<DiscardableMemoryAllocation> memory( |
517 (*it)->Allocate_Locked(size, aligned_size)); | 505 (*it)->Allocate(size, aligned_size)); |
518 if (memory) | 506 if (memory) |
519 return memory.Pass(); | 507 return memory.Pass(); |
520 } | 508 } |
521 // The creation of the (large) ashmem region might fail if the address space | 509 // The creation of the (large) ashmem region might fail if the address space |
522 // is too fragmented. In case creation fails the allocator retries by | 510 // is too fragmented. In case creation fails the allocator retries by |
523 // repetitively dividing the size by 2. | 511 // repetitively dividing the size by 2. |
524 const size_t min_region_size = std::max(kMinAshmemRegionSize, aligned_size); | 512 const size_t min_region_size = std::max(kMinAshmemRegionSize, aligned_size); |
525 for (size_t region_size = std::max(ashmem_region_size_, aligned_size); | 513 for (size_t region_size = std::max(ashmem_region_size_, aligned_size); |
526 region_size >= min_region_size; | 514 region_size >= min_region_size; |
527 region_size = AlignToNextPage(region_size / 2)) { | 515 region_size = AlignToNextPage(region_size / 2)) { |
528 scoped_ptr<AshmemRegion> new_region( | 516 scoped_ptr<AshmemRegion> new_region( |
529 AshmemRegion::Create(region_size, name_.c_str(), this)); | 517 AshmemRegion::Create(region_size, name_.c_str(), this)); |
530 if (!new_region) | 518 if (!new_region) |
531 continue; | 519 continue; |
532 last_ashmem_region_size_ = region_size; | 520 last_ashmem_region_size_ = region_size; |
533 ashmem_regions_.push_back(new_region.release()); | 521 ashmem_regions_.push_back(new_region.release()); |
534 return ashmem_regions_.back()->Allocate_Locked(size, aligned_size); | 522 return ashmem_regions_.back()->Allocate(size, aligned_size); |
535 } | 523 } |
536 // TODO(pliard): consider adding an histogram to see how often this happens. | 524 // TODO(pliard): consider adding an histogram to see how often this happens. |
537 return scoped_ptr<DiscardableMemory>(); | 525 return scoped_ptr<DiscardableMemoryAllocation>(); |
538 } | 526 } |
539 | 527 |
540 size_t DiscardableMemoryAllocator::last_ashmem_region_size() const { | 528 size_t |
541 AutoLock auto_lock(lock_); | 529 DiscardableMemoryAllocationAshmemFactory::last_ashmem_region_size() const { |
542 return last_ashmem_region_size_; | 530 return last_ashmem_region_size_; |
543 } | 531 } |
544 | 532 |
545 void DiscardableMemoryAllocator::DeleteAshmemRegion_Locked( | 533 void DiscardableMemoryAllocationAshmemFactory::DeleteAshmemRegion( |
546 AshmemRegion* region) { | 534 AshmemRegion* region) { |
547 lock_.AssertAcquired(); | |
548 // Note that there should not be more than a couple of ashmem region instances | 535 // Note that there should not be more than a couple of ashmem region instances |
549 // in |ashmem_regions_|. | 536 // in |ashmem_regions_|. |
550 DCHECK_LE(ashmem_regions_.size(), 5U); | 537 DCHECK_LE(ashmem_regions_.size(), 5U); |
551 const ScopedVector<AshmemRegion>::iterator it = std::find( | 538 const ScopedVector<AshmemRegion>::iterator it = std::find( |
552 ashmem_regions_.begin(), ashmem_regions_.end(), region); | 539 ashmem_regions_.begin(), ashmem_regions_.end(), region); |
553 DCHECK_NE(ashmem_regions_.end(), it); | 540 DCHECK_NE(ashmem_regions_.end(), it); |
554 std::swap(*it, ashmem_regions_.back()); | 541 std::swap(*it, ashmem_regions_.back()); |
555 ashmem_regions_.pop_back(); | 542 ashmem_regions_.pop_back(); |
556 } | 543 } |
557 | 544 |
558 } // namespace internal | 545 } // namespace internal |
559 } // namespace base | 546 } // namespace base |
OLD | NEW |