Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/memory/discardable_shared_memory.h" | 5 #include "base/memory/discardable_shared_memory.h" |
| 6 | 6 |
| 7 #if defined(OS_POSIX) | 7 #if defined(OS_POSIX) && !defined(OS_NACL) |
| 8 #include <unistd.h> | 8 #include <sys/mman.h> |
| 9 #endif | 9 #endif |
| 10 | 10 |
| 11 #include <algorithm> | 11 #include <algorithm> |
| 12 | 12 |
| 13 #include "base/atomicops.h" | 13 #include "base/atomicops.h" |
| 14 #include "base/bits.h" | 14 #include "base/bits.h" |
| 15 #include "base/logging.h" | 15 #include "base/logging.h" |
| 16 #include "base/numerics/safe_math.h" | 16 #include "base/numerics/safe_math.h" |
| 17 #include "base/process/process_metrics.h" | 17 #include "base/process/process_metrics.h" |
| 18 | 18 |
| (...skipping 275 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 294 } | 294 } |
| 295 | 295 |
| 296 void* DiscardableSharedMemory::memory() const { | 296 void* DiscardableSharedMemory::memory() const { |
| 297 return reinterpret_cast<uint8*>(shared_memory_.memory()) + | 297 return reinterpret_cast<uint8*>(shared_memory_.memory()) + |
| 298 AlignToPageSize(sizeof(SharedState)); | 298 AlignToPageSize(sizeof(SharedState)); |
| 299 } | 299 } |
| 300 | 300 |
| 301 bool DiscardableSharedMemory::Purge(Time current_time) { | 301 bool DiscardableSharedMemory::Purge(Time current_time) { |
| 302 // Calls to this function must be synchronized properly. | 302 // Calls to this function must be synchronized properly. |
| 303 DFAKE_SCOPED_LOCK(thread_collision_warner_); | 303 DFAKE_SCOPED_LOCK(thread_collision_warner_); |
| 304 | 304 DCHECK(shared_memory_.memory()); |
| 305 // Early out if not mapped. This can happen if the segment was previously | |
| 306 // unmapped using a call to Close(). | |
| 307 if (!shared_memory_.memory()) | |
| 308 return true; | |
| 309 | 305 |
| 310 SharedState old_state(SharedState::UNLOCKED, last_known_usage_); | 306 SharedState old_state(SharedState::UNLOCKED, last_known_usage_); |
| 311 SharedState new_state(SharedState::UNLOCKED, Time()); | 307 SharedState new_state(SharedState::UNLOCKED, Time()); |
| 312 SharedState result(subtle::Acquire_CompareAndSwap( | 308 SharedState result(subtle::Acquire_CompareAndSwap( |
| 313 &SharedStateFromSharedMemory(shared_memory_)->value.i, | 309 &SharedStateFromSharedMemory(shared_memory_)->value.i, |
| 314 old_state.value.i, | 310 old_state.value.i, |
| 315 new_state.value.i)); | 311 new_state.value.i)); |
| 316 | 312 |
| 317 // Update |last_known_usage_| to |current_time| if the memory is locked. This | 313 // Update |last_known_usage_| to |current_time| if the memory is locked. This |
| 318 // allows the caller to determine if purging failed because last known usage | 314 // allows the caller to determine if purging failed because last known usage |
| 319 // was incorrect or memory was locked. In the second case, the caller should | 315 // was incorrect or memory was locked. In the second case, the caller should |
| 320 // most likely wait for some amount of time before attempting to purge the | 316 // most likely wait for some amount of time before attempting to purge the |
| 321 // the memory again. | 317 // the memory again. |
| 322 if (result.value.u != old_state.value.u) { | 318 if (result.value.u != old_state.value.u) { |
| 323 last_known_usage_ = result.GetLockState() == SharedState::LOCKED | 319 last_known_usage_ = result.GetLockState() == SharedState::LOCKED |
| 324 ? current_time | 320 ? current_time |
| 325 : result.GetTimestamp(); | 321 : result.GetTimestamp(); |
| 326 return false; | 322 return false; |
| 327 } | 323 } |
| 328 | 324 |
| 325 #if defined(OS_POSIX) && !defined(OS_NACL) | |
| 326 // Advise the kernel to free resources associated with purged pages. | |
| 327 // Subsequent accesses of memory pages will succeed, but will result in | |
| 328 // zero-fill-on-demand pages. | |
| 329 if (HANDLE_EINTR(madvise(reinterpret_cast<char*>(shared_memory_.memory()) + | |
|
Primiano Tucci (use gerrit)
2015/10/16 09:31:43
madvise shouldn't be interruptible (manpage doesn'
reveman
2015/10/16 14:24:42
Good catch. I removed HANDLE_EINTR.
| |
| 330 AlignToPageSize(sizeof(SharedState)), | |
| 331 AlignToPageSize(mapped_size_), MADV_DONTNEED))) { | |
|
Primiano Tucci (use gerrit)
2015/10/16 09:31:43
Just doublechecking: isn't mapped_size_ including
reveman
2015/10/16 14:24:42
|mapped_size_| is the size exposed to the user of
| |
| 332 DPLOG(ERROR) << "madvise(MADV_DONTNEED) failed"; | |
| 333 } | |
| 334 #endif | |
| 335 | |
| 329 last_known_usage_ = Time(); | 336 last_known_usage_ = Time(); |
| 330 return true; | 337 return true; |
| 331 } | 338 } |
| 332 | 339 |
| 333 bool DiscardableSharedMemory::IsMemoryResident() const { | 340 bool DiscardableSharedMemory::IsMemoryResident() const { |
| 334 DCHECK(shared_memory_.memory()); | 341 DCHECK(shared_memory_.memory()); |
| 335 | 342 |
| 336 SharedState result(subtle::NoBarrier_Load( | 343 SharedState result(subtle::NoBarrier_Load( |
| 337 &SharedStateFromSharedMemory(shared_memory_)->value.i)); | 344 &SharedStateFromSharedMemory(shared_memory_)->value.i)); |
| 338 | 345 |
| 339 return result.GetLockState() == SharedState::LOCKED || | 346 return result.GetLockState() == SharedState::LOCKED || |
| 340 !result.GetTimestamp().is_null(); | 347 !result.GetTimestamp().is_null(); |
| 341 } | 348 } |
| 342 | 349 |
| 343 bool DiscardableSharedMemory::IsMemoryLocked() const { | 350 bool DiscardableSharedMemory::IsMemoryLocked() const { |
| 344 DCHECK(shared_memory_.memory()); | 351 DCHECK(shared_memory_.memory()); |
| 345 | 352 |
| 346 SharedState result(subtle::NoBarrier_Load( | 353 SharedState result(subtle::NoBarrier_Load( |
| 347 &SharedStateFromSharedMemory(shared_memory_)->value.i)); | 354 &SharedStateFromSharedMemory(shared_memory_)->value.i)); |
| 348 | 355 |
| 349 return result.GetLockState() == SharedState::LOCKED; | 356 return result.GetLockState() == SharedState::LOCKED; |
| 350 } | 357 } |
| 351 | 358 |
| 352 void DiscardableSharedMemory::Close() { | 359 void DiscardableSharedMemory::Close() { |
| 353 shared_memory_.Close(); | 360 shared_memory_.Close(); |
| 354 } | 361 } |
| 355 | 362 |
| 356 #if defined(DISCARDABLE_SHARED_MEMORY_SHRINKING) | |
| 357 void DiscardableSharedMemory::Shrink() { | |
| 358 #if defined(OS_POSIX) | |
| 359 SharedMemoryHandle handle = shared_memory_.handle(); | |
| 360 if (!SharedMemory::IsHandleValid(handle)) | |
| 361 return; | |
| 362 | |
| 363 // Truncate shared memory to size of SharedState. | |
| 364 if (HANDLE_EINTR(ftruncate(SharedMemory::GetFdFromSharedMemoryHandle(handle), | |
| 365 AlignToPageSize(sizeof(SharedState)))) != 0) { | |
| 366 DPLOG(ERROR) << "ftruncate() failed"; | |
| 367 return; | |
| 368 } | |
| 369 mapped_size_ = 0; | |
| 370 #else | |
| 371 NOTIMPLEMENTED(); | |
| 372 #endif | |
| 373 } | |
| 374 #endif | |
| 375 | |
| 376 Time DiscardableSharedMemory::Now() const { | 363 Time DiscardableSharedMemory::Now() const { |
| 377 return Time::Now(); | 364 return Time::Now(); |
| 378 } | 365 } |
| 379 | 366 |
| 380 } // namespace base | 367 } // namespace base |
| OLD | NEW |