| OLD | NEW |
| 1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/memory/discardable_shared_memory.h" | 5 #include "base/memory/discardable_shared_memory.h" |
| 6 | 6 |
| 7 #if defined(OS_POSIX) && !defined(OS_NACL) | 7 #if defined(OS_POSIX) && !defined(OS_NACL) |
| 8 // For madvise() which is available on all POSIX compatible systems. | 8 // For madvise() which is available on all POSIX compatible systems. |
| 9 #include <sys/mman.h> | 9 #include <sys/mman.h> |
| 10 #endif | 10 #endif |
| 11 | 11 |
| 12 #include <algorithm> | 12 #include <algorithm> |
| 13 | 13 |
| 14 #include "base/atomicops.h" | 14 #include "base/atomicops.h" |
| 15 #include "base/bits.h" | 15 #include "base/bits.h" |
| 16 #include "base/logging.h" | 16 #include "base/logging.h" |
| 17 #include "base/numerics/safe_math.h" | 17 #include "base/numerics/safe_math.h" |
| 18 #include "base/process/process_metrics.h" | 18 #include "base/process/process_metrics.h" |
| 19 | 19 |
| 20 #if defined(OS_ANDROID) | 20 #if defined(OS_ANDROID) |
| 21 #include "third_party/ashmem/ashmem.h" | 21 #include "third_party/ashmem/ashmem.h" |
| 22 #endif | 22 #endif |
| 23 | 23 |
| 24 #if defined(OS_WIN) |
| 25 #include "base/win/windows_version.h" |
| 26 #endif |
| 27 |
| 24 namespace base { | 28 namespace base { |
| 25 namespace { | 29 namespace { |
| 26 | 30 |
| 27 // Use a machine-sized pointer as atomic type. It will use the Atomic32 or | 31 // Use a machine-sized pointer as atomic type. It will use the Atomic32 or |
| 28 // Atomic64 routines, depending on the architecture. | 32 // Atomic64 routines, depending on the architecture. |
| 29 typedef intptr_t AtomicType; | 33 typedef intptr_t AtomicType; |
| 30 typedef uintptr_t UAtomicType; | 34 typedef uintptr_t UAtomicType; |
| 31 | 35 |
| 32 // Template specialization for timestamp serialization/deserialization. This | 36 // Template specialization for timestamp serialization/deserialization. This |
| 33 // is used to serialize timestamps using Unix time on systems where AtomicType | 37 // is used to serialize timestamps using Unix time on systems where AtomicType |
| (...skipping 180 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 214 locked_page_count_ += end - start; | 218 locked_page_count_ += end - start; |
| 215 #if DCHECK_IS_ON() | 219 #if DCHECK_IS_ON() |
| 216 // Detect incorrect usage by keeping track of exactly what pages are locked. | 220 // Detect incorrect usage by keeping track of exactly what pages are locked. |
| 217 for (auto page = start; page < end; ++page) { | 221 for (auto page = start; page < end; ++page) { |
| 218 auto result = locked_pages_.insert(page); | 222 auto result = locked_pages_.insert(page); |
| 219 DCHECK(result.second); | 223 DCHECK(result.second); |
| 220 } | 224 } |
| 221 DCHECK_EQ(locked_pages_.size(), locked_page_count_); | 225 DCHECK_EQ(locked_pages_.size(), locked_page_count_); |
| 222 #endif | 226 #endif |
| 223 | 227 |
| 228 // Pin pages if supported. |
| 224 #if defined(OS_ANDROID) | 229 #if defined(OS_ANDROID) |
| 225 SharedMemoryHandle handle = shared_memory_.handle(); | 230 SharedMemoryHandle handle = shared_memory_.handle(); |
| 226 if (SharedMemory::IsHandleValid(handle)) { | 231 if (SharedMemory::IsHandleValid(handle)) { |
| 227 if (ashmem_pin_region( | 232 if (ashmem_pin_region( |
| 228 handle.fd, AlignToPageSize(sizeof(SharedState)) + offset, length)) { | 233 handle.fd, AlignToPageSize(sizeof(SharedState)) + offset, length)) { |
| 229 return PURGED; | 234 return PURGED; |
| 230 } | 235 } |
| 231 } | 236 } |
| 237 #elif defined(OS_WIN) |
| 238 if (base::win::GetVersion() >= base::win::VERSION_WIN8) { |
| 239 if (!VirtualAlloc(reinterpret_cast<char*>(shared_memory_.memory()) + |
| 240 AlignToPageSize(sizeof(SharedState)) + offset, |
| 241 length, MEM_RESET_UNDO, PAGE_READWRITE)) { |
| 242 return PURGED; |
| 243 } |
| 244 } |
| 232 #endif | 245 #endif |
| 233 | 246 |
| 234 return SUCCESS; | 247 return SUCCESS; |
| 235 } | 248 } |
| 236 | 249 |
| 237 void DiscardableSharedMemory::Unlock(size_t offset, size_t length) { | 250 void DiscardableSharedMemory::Unlock(size_t offset, size_t length) { |
| 238 DCHECK_EQ(AlignToPageSize(offset), offset); | 251 DCHECK_EQ(AlignToPageSize(offset), offset); |
| 239 DCHECK_EQ(AlignToPageSize(length), length); | 252 DCHECK_EQ(AlignToPageSize(length), length); |
| 240 | 253 |
| 241 // Calls to this function must be synchronized properly. | 254 // Calls to this function must be synchronized properly. |
| 242 DFAKE_SCOPED_LOCK(thread_collision_warner_); | 255 DFAKE_SCOPED_LOCK(thread_collision_warner_); |
| 243 | 256 |
| 244 // Zero for length means "everything onward". | 257 // Zero for length means "everything onward". |
| 245 if (!length) | 258 if (!length) |
| 246 length = AlignToPageSize(mapped_size_) - offset; | 259 length = AlignToPageSize(mapped_size_) - offset; |
| 247 | 260 |
| 248 DCHECK(shared_memory_.memory()); | 261 DCHECK(shared_memory_.memory()); |
| 249 | 262 |
| 263 // Unpin pages if supported. |
| 250 #if defined(OS_ANDROID) | 264 #if defined(OS_ANDROID) |
| 251 SharedMemoryHandle handle = shared_memory_.handle(); | 265 SharedMemoryHandle handle = shared_memory_.handle(); |
| 252 if (SharedMemory::IsHandleValid(handle)) { | 266 if (SharedMemory::IsHandleValid(handle)) { |
| 253 if (ashmem_unpin_region( | 267 if (ashmem_unpin_region( |
| 254 handle.fd, AlignToPageSize(sizeof(SharedState)) + offset, length)) { | 268 handle.fd, AlignToPageSize(sizeof(SharedState)) + offset, length)) { |
| 255 DPLOG(ERROR) << "ashmem_unpin_region() failed"; | 269 DPLOG(ERROR) << "ashmem_unpin_region() failed"; |
| 256 } | 270 } |
| 257 } | 271 } |
| 272 #elif defined(OS_WIN) |
| 273 if (base::win::GetVersion() >= base::win::VERSION_WIN8) { |
| 274 // Note: MEM_RESET is not technically gated on Win8. However, this Unlock |
| 275 // function needs to match the Lock behaviour (MEM_RESET_UNDO) to properly |
| 276 // implement memory pinning. It needs to bias towards preserving the |
| 277 // contents of memory between an Unlock and next Lock. |
| 278 if (!VirtualAlloc(reinterpret_cast<char*>(shared_memory_.memory()) + |
| 279 AlignToPageSize(sizeof(SharedState)) + offset, |
| 280 length, MEM_RESET, PAGE_READWRITE)) { |
| 281 DPLOG(ERROR) << "VirtualAlloc() MEM_RESET failed in Unlock()"; |
| 282 } |
| 283 } |
| 258 #endif | 284 #endif |
| 259 | 285 |
| 260 size_t start = offset / base::GetPageSize(); | 286 size_t start = offset / base::GetPageSize(); |
| 261 size_t end = start + length / base::GetPageSize(); | 287 size_t end = start + length / base::GetPageSize(); |
| 262 DCHECK_LT(start, end); | 288 DCHECK_LT(start, end); |
| 263 DCHECK_LE(end, AlignToPageSize(mapped_size_) / base::GetPageSize()); | 289 DCHECK_LE(end, AlignToPageSize(mapped_size_) / base::GetPageSize()); |
| 264 | 290 |
| 265 // Remove pages from |locked_page_count_|. | 291 // Remove pages from |locked_page_count_|. |
| 266 // Note: Unlocking a page that is not locked is an error. | 292 // Note: Unlocking a page that is not locked is an error. |
| 267 DCHECK_GE(locked_page_count_, end - start); | 293 DCHECK_GE(locked_page_count_, end - start); |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 323 // was incorrect or memory was locked. In the second case, the caller should | 349 // was incorrect or memory was locked. In the second case, the caller should |
| 324 // most likely wait for some amount of time before attempting to purge the | 350 // most likely wait for some amount of time before attempting to purge the |
| 325 // the memory again. | 351 // the memory again. |
| 326 if (result.value.u != old_state.value.u) { | 352 if (result.value.u != old_state.value.u) { |
| 327 last_known_usage_ = result.GetLockState() == SharedState::LOCKED | 353 last_known_usage_ = result.GetLockState() == SharedState::LOCKED |
| 328 ? current_time | 354 ? current_time |
| 329 : result.GetTimestamp(); | 355 : result.GetTimestamp(); |
| 330 return false; | 356 return false; |
| 331 } | 357 } |
| 332 | 358 |
| 359 // The next section will release as much resource as can be done |
| 360 // from the purging process, until the client process notices the |
| 361 // purge and releases its own references. |
| 362 // Note: this memory will not be accessed again. The segment will be |
| 363 // freed asynchronously at a later time, so just do the best |
| 364 // immediately. |
| 333 #if defined(OS_POSIX) && !defined(OS_NACL) | 365 #if defined(OS_POSIX) && !defined(OS_NACL) |
| 334 // Linux and Android provide MADV_REMOVE which is preferred as it has a | 366 // Linux and Android provide MADV_REMOVE which is preferred as it has a |
| 335 // behavior that can be verified in tests. Other POSIX flavors (MacOSX, BSDs), | 367 // behavior that can be verified in tests. Other POSIX flavors (MacOSX, BSDs), |
| 336 // provide MADV_FREE which has the same result but memory is purged lazily. | 368 // provide MADV_FREE which has the same result but memory is purged lazily. |
| 337 #if defined(OS_LINUX) || defined(OS_ANDROID) | 369 #if defined(OS_LINUX) || defined(OS_ANDROID) |
| 338 #define MADV_PURGE_ARGUMENT MADV_REMOVE | 370 #define MADV_PURGE_ARGUMENT MADV_REMOVE |
| 339 #else | 371 #else |
| 340 #define MADV_PURGE_ARGUMENT MADV_FREE | 372 #define MADV_PURGE_ARGUMENT MADV_FREE |
| 341 #endif | 373 #endif |
| 342 // Advise the kernel to remove resources associated with purged pages. | 374 // Advise the kernel to remove resources associated with purged pages. |
| 343 // Subsequent accesses of memory pages will succeed, but might result in | 375 // Subsequent accesses of memory pages will succeed, but might result in |
| 344 // zero-fill-on-demand pages. | 376 // zero-fill-on-demand pages. |
| 345 if (madvise(reinterpret_cast<char*>(shared_memory_.memory()) + | 377 if (madvise(reinterpret_cast<char*>(shared_memory_.memory()) + |
| 346 AlignToPageSize(sizeof(SharedState)), | 378 AlignToPageSize(sizeof(SharedState)), |
| 347 AlignToPageSize(mapped_size_), MADV_PURGE_ARGUMENT)) { | 379 AlignToPageSize(mapped_size_), MADV_PURGE_ARGUMENT)) { |
| 348 DPLOG(ERROR) << "madvise() failed"; | 380 DPLOG(ERROR) << "madvise() failed"; |
| 349 } | 381 } |
| 382 #elif defined(OS_WIN) |
| 383 // MEM_DECOMMIT the purged pages to release the physical storage, |
| 384 // either in memory or in the paging file on disk. Pages remain RESERVED. |
| 385 if (!VirtualFree(reinterpret_cast<char*>(shared_memory_.memory()) + |
| 386 AlignToPageSize(sizeof(SharedState)), |
| 387 AlignToPageSize(mapped_size_), MEM_DECOMMIT)) { |
| 388 DPLOG(ERROR) << "VirtualFree() MEM_DECOMMIT failed in Purge()"; |
| 389 } |
| 350 #endif | 390 #endif |
| 351 | 391 |
| 352 last_known_usage_ = Time(); | 392 last_known_usage_ = Time(); |
| 353 return true; | 393 return true; |
| 354 } | 394 } |
| 355 | 395 |
| 356 bool DiscardableSharedMemory::IsMemoryResident() const { | 396 bool DiscardableSharedMemory::IsMemoryResident() const { |
| 357 DCHECK(shared_memory_.memory()); | 397 DCHECK(shared_memory_.memory()); |
| 358 | 398 |
| 359 SharedState result(subtle::NoBarrier_Load( | 399 SharedState result(subtle::NoBarrier_Load( |
| (...skipping 14 matching lines...) Expand all Loading... |
| 374 | 414 |
| 375 void DiscardableSharedMemory::Close() { | 415 void DiscardableSharedMemory::Close() { |
| 376 shared_memory_.Close(); | 416 shared_memory_.Close(); |
| 377 } | 417 } |
| 378 | 418 |
| 379 Time DiscardableSharedMemory::Now() const { | 419 Time DiscardableSharedMemory::Now() const { |
| 380 return Time::Now(); | 420 return Time::Now(); |
| 381 } | 421 } |
| 382 | 422 |
| 383 } // namespace base | 423 } // namespace base |
| OLD | NEW |