OLD | NEW |
1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/memory/discardable_shared_memory.h" | 5 #include "base/memory/discardable_shared_memory.h" |
6 | 6 |
7 #if defined(OS_POSIX) && !defined(OS_NACL) | 7 #if defined(OS_POSIX) && !defined(OS_NACL) |
8 // For madvise() which is available on all POSIX compatible systems. | 8 // For madvise() which is available on all POSIX compatible systems. |
9 #include <sys/mman.h> | 9 #include <sys/mman.h> |
10 #endif | 10 #endif |
11 | 11 |
12 #include <algorithm> | 12 #include <algorithm> |
13 | 13 |
14 #include "base/atomicops.h" | 14 #include "base/atomicops.h" |
15 #include "base/bits.h" | 15 #include "base/bits.h" |
16 #include "base/logging.h" | 16 #include "base/logging.h" |
17 #include "base/numerics/safe_math.h" | 17 #include "base/numerics/safe_math.h" |
18 #include "base/process/process_metrics.h" | 18 #include "base/process/process_metrics.h" |
19 | 19 |
20 #if defined(OS_ANDROID) | 20 #if defined(OS_ANDROID) |
21 #include "third_party/ashmem/ashmem.h" | 21 #include "third_party/ashmem/ashmem.h" |
22 #endif | 22 #endif |
23 | 23 |
24 #if defined(OS_WIN) | |
25 #include "base/win/windows_version.h" | |
26 #endif | |
27 | |
28 namespace base { | 24 namespace base { |
29 namespace { | 25 namespace { |
30 | 26 |
31 // Use a machine-sized pointer as atomic type. It will use the Atomic32 or | 27 // Use a machine-sized pointer as atomic type. It will use the Atomic32 or |
32 // Atomic64 routines, depending on the architecture. | 28 // Atomic64 routines, depending on the architecture. |
33 typedef intptr_t AtomicType; | 29 typedef intptr_t AtomicType; |
34 typedef uintptr_t UAtomicType; | 30 typedef uintptr_t UAtomicType; |
35 | 31 |
36 // Template specialization for timestamp serialization/deserialization. This | 32 // Template specialization for timestamp serialization/deserialization. This |
37 // is used to serialize timestamps using Unix time on systems where AtomicType | 33 // is used to serialize timestamps using Unix time on systems where AtomicType |
(...skipping 173 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
211 locked_page_count_ += end - start; | 207 locked_page_count_ += end - start; |
212 #if DCHECK_IS_ON() | 208 #if DCHECK_IS_ON() |
213 // Detect incorrect usage by keeping track of exactly what pages are locked. | 209 // Detect incorrect usage by keeping track of exactly what pages are locked. |
214 for (auto page = start; page < end; ++page) { | 210 for (auto page = start; page < end; ++page) { |
215 auto result = locked_pages_.insert(page); | 211 auto result = locked_pages_.insert(page); |
216 DCHECK(result.second); | 212 DCHECK(result.second); |
217 } | 213 } |
218 DCHECK_EQ(locked_pages_.size(), locked_page_count_); | 214 DCHECK_EQ(locked_pages_.size(), locked_page_count_); |
219 #endif | 215 #endif |
220 | 216 |
221 // Pin pages if supported. | |
222 #if defined(OS_ANDROID) | 217 #if defined(OS_ANDROID) |
223 SharedMemoryHandle handle = shared_memory_.handle(); | 218 SharedMemoryHandle handle = shared_memory_.handle(); |
224 if (SharedMemory::IsHandleValid(handle)) { | 219 if (SharedMemory::IsHandleValid(handle)) { |
225 if (ashmem_pin_region( | 220 if (ashmem_pin_region( |
226 handle.fd, AlignToPageSize(sizeof(SharedState)) + offset, length)) { | 221 handle.fd, AlignToPageSize(sizeof(SharedState)) + offset, length)) { |
227 return PURGED; | 222 return PURGED; |
228 } | 223 } |
229 } | 224 } |
230 #elif defined(OS_WIN) | |
231 if (base::win::GetVersion() >= base::win::VERSION_WIN8) { | |
232 if (!VirtualAlloc(reinterpret_cast<char*>(shared_memory_.memory()) + | |
233 AlignToPageSize(sizeof(SharedState)) + offset, | |
234 length, MEM_RESET_UNDO, PAGE_READWRITE)) { | |
235 return PURGED; | |
236 } | |
237 } | |
238 #endif | 225 #endif |
239 | 226 |
240 return SUCCESS; | 227 return SUCCESS; |
241 } | 228 } |
242 | 229 |
243 void DiscardableSharedMemory::Unlock(size_t offset, size_t length) { | 230 void DiscardableSharedMemory::Unlock(size_t offset, size_t length) { |
244 DCHECK_EQ(AlignToPageSize(offset), offset); | 231 DCHECK_EQ(AlignToPageSize(offset), offset); |
245 DCHECK_EQ(AlignToPageSize(length), length); | 232 DCHECK_EQ(AlignToPageSize(length), length); |
246 | 233 |
247 // Calls to this function must be synchronized properly. | 234 // Calls to this function must be synchronized properly. |
248 DFAKE_SCOPED_LOCK(thread_collision_warner_); | 235 DFAKE_SCOPED_LOCK(thread_collision_warner_); |
249 | 236 |
250 // Zero for length means "everything onward". | 237 // Zero for length means "everything onward". |
251 if (!length) | 238 if (!length) |
252 length = AlignToPageSize(mapped_size_) - offset; | 239 length = AlignToPageSize(mapped_size_) - offset; |
253 | 240 |
254 DCHECK(shared_memory_.memory()); | 241 DCHECK(shared_memory_.memory()); |
255 | 242 |
256 // Unpin pages if supported. | |
257 #if defined(OS_ANDROID) | 243 #if defined(OS_ANDROID) |
258 SharedMemoryHandle handle = shared_memory_.handle(); | 244 SharedMemoryHandle handle = shared_memory_.handle(); |
259 if (SharedMemory::IsHandleValid(handle)) { | 245 if (SharedMemory::IsHandleValid(handle)) { |
260 if (ashmem_unpin_region( | 246 if (ashmem_unpin_region( |
261 handle.fd, AlignToPageSize(sizeof(SharedState)) + offset, length)) { | 247 handle.fd, AlignToPageSize(sizeof(SharedState)) + offset, length)) { |
262 DPLOG(ERROR) << "ashmem_unpin_region() failed"; | 248 DPLOG(ERROR) << "ashmem_unpin_region() failed"; |
263 } | 249 } |
264 } | 250 } |
265 #elif defined(OS_WIN) | |
266 if (base::win::GetVersion() >= base::win::VERSION_WIN8) { | |
267 // Note: MEM_RESET is not technically gated on Win8. However, this Unlock | |
268 // function needs to match the Lock behaviour (MEM_RESET_UNDO) to properly | |
269 // implement memory pinning. It needs to bias towards preserving the | |
270 // contents of memory between an Unlock and next Lock. | |
271 if (!VirtualAlloc(reinterpret_cast<char*>(shared_memory_.memory()) + | |
272 AlignToPageSize(sizeof(SharedState)) + offset, | |
273 length, MEM_RESET, PAGE_READWRITE)) { | |
274 DPLOG(ERROR) << "VirtualAlloc() MEM_RESET failed in Unlock()"; | |
275 } | |
276 } | |
277 #endif | 251 #endif |
278 | 252 |
279 size_t start = offset / base::GetPageSize(); | 253 size_t start = offset / base::GetPageSize(); |
280 size_t end = start + length / base::GetPageSize(); | 254 size_t end = start + length / base::GetPageSize(); |
281 DCHECK_LT(start, end); | 255 DCHECK_LT(start, end); |
282 DCHECK_LE(end, AlignToPageSize(mapped_size_) / base::GetPageSize()); | 256 DCHECK_LE(end, AlignToPageSize(mapped_size_) / base::GetPageSize()); |
283 | 257 |
284 // Remove pages from |locked_page_count_|. | 258 // Remove pages from |locked_page_count_|. |
285 // Note: Unlocking a page that is not locked is an error. | 259 // Note: Unlocking a page that is not locked is an error. |
286 DCHECK_GE(locked_page_count_, end - start); | 260 DCHECK_GE(locked_page_count_, end - start); |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
342 // was incorrect or memory was locked. In the second case, the caller should | 316 // was incorrect or memory was locked. In the second case, the caller should |
343 // most likely wait for some amount of time before attempting to purge the | 317 // most likely wait for some amount of time before attempting to purge the |
344 // the memory again. | 318 // the memory again. |
345 if (result.value.u != old_state.value.u) { | 319 if (result.value.u != old_state.value.u) { |
346 last_known_usage_ = result.GetLockState() == SharedState::LOCKED | 320 last_known_usage_ = result.GetLockState() == SharedState::LOCKED |
347 ? current_time | 321 ? current_time |
348 : result.GetTimestamp(); | 322 : result.GetTimestamp(); |
349 return false; | 323 return false; |
350 } | 324 } |
351 | 325 |
352 // The next section will release as much resource as can be done | |
353 // from the purging process, until the client process notices the | |
354 // purge and releases its own references. | |
355 // Note: this memory will not be accessed again. The segment will be | |
356 // freed asynchronously at a later time, so just do the best | |
357 // immediately. | |
358 #if defined(OS_POSIX) && !defined(OS_NACL) | 326 #if defined(OS_POSIX) && !defined(OS_NACL) |
359 // Linux and Android provide MADV_REMOVE which is preferred as it has a | 327 // Linux and Android provide MADV_REMOVE which is preferred as it has a |
360 // behavior that can be verified in tests. Other POSIX flavors (MacOSX, BSDs), | 328 // behavior that can be verified in tests. Other POSIX flavors (MacOSX, BSDs), |
361 // provide MADV_FREE which has the same result but memory is purged lazily. | 329 // provide MADV_FREE which has the same result but memory is purged lazily. |
362 #if defined(OS_LINUX) || defined(OS_ANDROID) | 330 #if defined(OS_LINUX) || defined(OS_ANDROID) |
363 #define MADV_PURGE_ARGUMENT MADV_REMOVE | 331 #define MADV_PURGE_ARGUMENT MADV_REMOVE |
364 #else | 332 #else |
365 #define MADV_PURGE_ARGUMENT MADV_FREE | 333 #define MADV_PURGE_ARGUMENT MADV_FREE |
366 #endif | 334 #endif |
367 // Advise the kernel to remove resources associated with purged pages. | 335 // Advise the kernel to remove resources associated with purged pages. |
368 // Subsequent accesses of memory pages will succeed, but might result in | 336 // Subsequent accesses of memory pages will succeed, but might result in |
369 // zero-fill-on-demand pages. | 337 // zero-fill-on-demand pages. |
370 if (madvise(reinterpret_cast<char*>(shared_memory_.memory()) + | 338 if (madvise(reinterpret_cast<char*>(shared_memory_.memory()) + |
371 AlignToPageSize(sizeof(SharedState)), | 339 AlignToPageSize(sizeof(SharedState)), |
372 AlignToPageSize(mapped_size_), MADV_PURGE_ARGUMENT)) { | 340 AlignToPageSize(mapped_size_), MADV_PURGE_ARGUMENT)) { |
373 DPLOG(ERROR) << "madvise() failed"; | 341 DPLOG(ERROR) << "madvise() failed"; |
374 } | 342 } |
375 #elif defined(OS_WIN) | |
376 // MEM_DECOMMIT the purged pages to release the physical storage, | |
377 // either in memory or in the paging file on disk. Pages remain RESERVED. | |
378 if (!VirtualFree(reinterpret_cast<char*>(shared_memory_.memory()) + | |
379 AlignToPageSize(sizeof(SharedState)), | |
380 AlignToPageSize(mapped_size_), MEM_DECOMMIT)) { | |
381 DPLOG(ERROR) << "VirtualFree() MEM_DECOMMIT failed in Purge()"; | |
382 } | |
383 #endif | 343 #endif |
384 | 344 |
385 last_known_usage_ = Time(); | 345 last_known_usage_ = Time(); |
386 return true; | 346 return true; |
387 } | 347 } |
388 | 348 |
389 bool DiscardableSharedMemory::IsMemoryResident() const { | 349 bool DiscardableSharedMemory::IsMemoryResident() const { |
390 DCHECK(shared_memory_.memory()); | 350 DCHECK(shared_memory_.memory()); |
391 | 351 |
392 SharedState result(subtle::NoBarrier_Load( | 352 SharedState result(subtle::NoBarrier_Load( |
(...skipping 14 matching lines...) Expand all Loading... |
407 | 367 |
408 void DiscardableSharedMemory::Close() { | 368 void DiscardableSharedMemory::Close() { |
409 shared_memory_.Close(); | 369 shared_memory_.Close(); |
410 } | 370 } |
411 | 371 |
412 Time DiscardableSharedMemory::Now() const { | 372 Time DiscardableSharedMemory::Now() const { |
413 return Time::Now(); | 373 return Time::Now(); |
414 } | 374 } |
415 | 375 |
416 } // namespace base | 376 } // namespace base |
OLD | NEW |