Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/memory/discardable_shared_memory.h" | 5 #include "base/memory/discardable_shared_memory.h" |
| 6 | 6 |
| 7 #if defined(OS_POSIX) && !defined(OS_NACL) | 7 #if defined(OS_POSIX) && !defined(OS_NACL) |
| 8 // For madvise() which is available on all POSIX compatible systems. | 8 // For madvise() which is available on all POSIX compatible systems. |
| 9 #include <sys/mman.h> | 9 #include <sys/mman.h> |
| 10 #endif | 10 #endif |
| (...skipping 196 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 207 locked_page_count_ += end - start; | 207 locked_page_count_ += end - start; |
| 208 #if DCHECK_IS_ON() | 208 #if DCHECK_IS_ON() |
| 209 // Detect incorrect usage by keeping track of exactly what pages are locked. | 209 // Detect incorrect usage by keeping track of exactly what pages are locked. |
| 210 for (auto page = start; page < end; ++page) { | 210 for (auto page = start; page < end; ++page) { |
| 211 auto result = locked_pages_.insert(page); | 211 auto result = locked_pages_.insert(page); |
| 212 DCHECK(result.second); | 212 DCHECK(result.second); |
| 213 } | 213 } |
| 214 DCHECK_EQ(locked_pages_.size(), locked_page_count_); | 214 DCHECK_EQ(locked_pages_.size(), locked_page_count_); |
| 215 #endif | 215 #endif |
| 216 | 216 |
| 217 // Pin pages if supported. | |
| 217 #if defined(OS_ANDROID) | 218 #if defined(OS_ANDROID) |
| 218 SharedMemoryHandle handle = shared_memory_.handle(); | 219 SharedMemoryHandle handle = shared_memory_.handle(); |
| 219 if (SharedMemory::IsHandleValid(handle)) { | 220 if (SharedMemory::IsHandleValid(handle)) { |
| 220 if (ashmem_pin_region( | 221 if (ashmem_pin_region( |
| 221 handle.fd, AlignToPageSize(sizeof(SharedState)) + offset, length)) { | 222 handle.fd, AlignToPageSize(sizeof(SharedState)) + offset, length)) { |
| 222 return PURGED; | 223 return PURGED; |
| 223 } | 224 } |
| 224 } | 225 } |
| 226 #elif defined(OS_WIN) | |
| 227 // If osVersion is 0, we need to lazy init. | |
| 228 if ((osVersion >= base::win::VERSION_WIN8) || | |
| 229 ((!osVersion) && | |
| 230 ((osVersion = base::win::GetVersion()) >= base::win::VERSION_WIN8))) { | |
|
reveman
2015/11/05 19:13:47
I'm not a fan of this. It's hard to read. There's
penny
2015/11/05 19:51:29
Bruce, Justin and I don't feel strongly about this
| |
| 231 if (!VirtualAlloc(reinterpret_cast<char*>(shared_memory_.memory()) + | |
| 232 AlignToPageSize(sizeof(SharedState)) + offset, | |
| 233 length, MEM_RESET_UNDO, PAGE_READWRITE)) { | |
| 234 return PURGED; | |
| 235 } | |
| 236 } | |
| 225 #endif | 237 #endif |
| 226 | 238 |
| 227 return SUCCESS; | 239 return SUCCESS; |
| 228 } | 240 } |
| 229 | 241 |
| 230 void DiscardableSharedMemory::Unlock(size_t offset, size_t length) { | 242 void DiscardableSharedMemory::Unlock(size_t offset, size_t length) { |
| 231 DCHECK_EQ(AlignToPageSize(offset), offset); | 243 DCHECK_EQ(AlignToPageSize(offset), offset); |
| 232 DCHECK_EQ(AlignToPageSize(length), length); | 244 DCHECK_EQ(AlignToPageSize(length), length); |
| 233 | 245 |
| 234 // Calls to this function must be synchronized properly. | 246 // Calls to this function must be synchronized properly. |
| 235 DFAKE_SCOPED_LOCK(thread_collision_warner_); | 247 DFAKE_SCOPED_LOCK(thread_collision_warner_); |
| 236 | 248 |
| 237 // Zero for length means "everything onward". | 249 // Zero for length means "everything onward". |
| 238 if (!length) | 250 if (!length) |
| 239 length = AlignToPageSize(mapped_size_) - offset; | 251 length = AlignToPageSize(mapped_size_) - offset; |
| 240 | 252 |
| 241 DCHECK(shared_memory_.memory()); | 253 DCHECK(shared_memory_.memory()); |
| 242 | 254 |
| 255 // Unpin pages if supported. | |
| 243 #if defined(OS_ANDROID) | 256 #if defined(OS_ANDROID) |
| 244 SharedMemoryHandle handle = shared_memory_.handle(); | 257 SharedMemoryHandle handle = shared_memory_.handle(); |
| 245 if (SharedMemory::IsHandleValid(handle)) { | 258 if (SharedMemory::IsHandleValid(handle)) { |
| 246 if (ashmem_unpin_region( | 259 if (ashmem_unpin_region( |
| 247 handle.fd, AlignToPageSize(sizeof(SharedState)) + offset, length)) { | 260 handle.fd, AlignToPageSize(sizeof(SharedState)) + offset, length)) { |
| 248 DPLOG(ERROR) << "ashmem_unpin_region() failed"; | 261 DPLOG(ERROR) << "ashmem_unpin_region() failed"; |
| 249 } | 262 } |
| 250 } | 263 } |
| 264 #elif defined(OS_WIN) | |
| 265 // If osVersion is 0, we need to lazy init. | |
| 266 if ((osVersion >= base::win::VERSION_WIN8) || | |
| 267 ((!osVersion) && | |
| 268 ((osVersion = base::win::GetVersion()) >= base::win::VERSION_WIN8))) { | |
| 269 // Note: MEM_RESET is not technically gated on Win8. However, this Unlock | |
| 270 // function needs to match the Lock behaviour (MEM_RESET_UNDO) to properly | |
| 271 // implement memory pinning. It needs to bias towards preserving the | |
| 272 // contents of memory between an Unlock and next Lock. | |
| 273 if (!VirtualAlloc(reinterpret_cast<char*>(shared_memory_.memory()) + | |
| 274 AlignToPageSize(sizeof(SharedState)) + offset, | |
| 275 length, MEM_RESET, PAGE_READWRITE)) { | |
| 276 DPLOG(ERROR) << "VirtualAlloc() MEM_RESET failed in Unlock()"; | |
| 277 } | |
| 278 } | |
| 251 #endif | 279 #endif |
| 252 | 280 |
| 253 size_t start = offset / base::GetPageSize(); | 281 size_t start = offset / base::GetPageSize(); |
| 254 size_t end = start + length / base::GetPageSize(); | 282 size_t end = start + length / base::GetPageSize(); |
| 255 DCHECK_LT(start, end); | 283 DCHECK_LT(start, end); |
| 256 DCHECK_LE(end, AlignToPageSize(mapped_size_) / base::GetPageSize()); | 284 DCHECK_LE(end, AlignToPageSize(mapped_size_) / base::GetPageSize()); |
| 257 | 285 |
| 258 // Remove pages from |locked_page_count_|. | 286 // Remove pages from |locked_page_count_|. |
| 259 // Note: Unlocking a page that is not locked is an error. | 287 // Note: Unlocking a page that is not locked is an error. |
| 260 DCHECK_GE(locked_page_count_, end - start); | 288 DCHECK_GE(locked_page_count_, end - start); |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 316 // was incorrect or memory was locked. In the second case, the caller should | 344 // was incorrect or memory was locked. In the second case, the caller should |
| 317 // most likely wait for some amount of time before attempting to purge the | 345 // most likely wait for some amount of time before attempting to purge the |
| 318 // the memory again. | 346 // the memory again. |
| 319 if (result.value.u != old_state.value.u) { | 347 if (result.value.u != old_state.value.u) { |
| 320 last_known_usage_ = result.GetLockState() == SharedState::LOCKED | 348 last_known_usage_ = result.GetLockState() == SharedState::LOCKED |
| 321 ? current_time | 349 ? current_time |
| 322 : result.GetTimestamp(); | 350 : result.GetTimestamp(); |
| 323 return false; | 351 return false; |
| 324 } | 352 } |
| 325 | 353 |
| 354 // The next section will release as much resource as can be done | |
| 355 // from the purging process, until the client process notices the | |
| 356 // purge and releases its own references. | |
| 357 // Note: this memory will not be accessed again. The segment will be | |
| 358 // freed asynchronously at a later time, so just do the best | |
| 359 // immediately. | |
| 326 #if defined(OS_POSIX) && !defined(OS_NACL) | 360 #if defined(OS_POSIX) && !defined(OS_NACL) |
| 327 // Linux and Android provide MADV_REMOVE which is preferred as it has a | 361 // Linux and Android provide MADV_REMOVE which is preferred as it has a |
| 328 // behavior that can be verified in tests. Other POSIX flavors (MacOSX, BSDs), | 362 // behavior that can be verified in tests. Other POSIX flavors (MacOSX, BSDs), |
| 329 // provide MADV_FREE which has the same result but memory is purged lazily. | 363 // provide MADV_FREE which has the same result but memory is purged lazily. |
| 330 #if defined(OS_LINUX) || defined(OS_ANDROID) | 364 #if defined(OS_LINUX) || defined(OS_ANDROID) |
| 331 #define MADV_PURGE_ARGUMENT MADV_REMOVE | 365 #define MADV_PURGE_ARGUMENT MADV_REMOVE |
| 332 #else | 366 #else |
| 333 #define MADV_PURGE_ARGUMENT MADV_FREE | 367 #define MADV_PURGE_ARGUMENT MADV_FREE |
| 334 #endif | 368 #endif |
| 335 // Advise the kernel to remove resources associated with purged pages. | 369 // Advise the kernel to remove resources associated with purged pages. |
| 336 // Subsequent accesses of memory pages will succeed, but might result in | 370 // Subsequent accesses of memory pages will succeed, but might result in |
| 337 // zero-fill-on-demand pages. | 371 // zero-fill-on-demand pages. |
| 338 if (madvise(reinterpret_cast<char*>(shared_memory_.memory()) + | 372 if (madvise(reinterpret_cast<char*>(shared_memory_.memory()) + |
| 339 AlignToPageSize(sizeof(SharedState)), | 373 AlignToPageSize(sizeof(SharedState)), |
| 340 AlignToPageSize(mapped_size_), MADV_PURGE_ARGUMENT)) { | 374 AlignToPageSize(mapped_size_), MADV_PURGE_ARGUMENT)) { |
| 341 DPLOG(ERROR) << "madvise() failed"; | 375 DPLOG(ERROR) << "madvise() failed"; |
| 342 } | 376 } |
| 377 #elif defined(OS_WIN) | |
| 378 // MEM_DECOMMIT the purged pages to release the physical storage, | |
| 379 // either in memory or in the paging file on disk. Pages remain RESERVED. | |
| 380 if (!VirtualFree(reinterpret_cast<char*>(shared_memory_.memory()) + | |
| 381 AlignToPageSize(sizeof(SharedState)), | |
| 382 AlignToPageSize(mapped_size_), MEM_DECOMMIT)) { | |
| 383 DPLOG(ERROR) << "VirtualFree() MEM_DECOMMIT failed in Purge()"; | |
| 384 } | |
| 343 #endif | 385 #endif |
| 344 | 386 |
| 345 last_known_usage_ = Time(); | 387 last_known_usage_ = Time(); |
| 346 return true; | 388 return true; |
| 347 } | 389 } |
| 348 | 390 |
| 349 bool DiscardableSharedMemory::IsMemoryResident() const { | 391 bool DiscardableSharedMemory::IsMemoryResident() const { |
| 350 DCHECK(shared_memory_.memory()); | 392 DCHECK(shared_memory_.memory()); |
| 351 | 393 |
| 352 SharedState result(subtle::NoBarrier_Load( | 394 SharedState result(subtle::NoBarrier_Load( |
| (...skipping 13 matching lines...) Expand all Loading... | |
| 366 } | 408 } |
| 367 | 409 |
| 368 void DiscardableSharedMemory::Close() { | 410 void DiscardableSharedMemory::Close() { |
| 369 shared_memory_.Close(); | 411 shared_memory_.Close(); |
| 370 } | 412 } |
| 371 | 413 |
| 372 Time DiscardableSharedMemory::Now() const { | 414 Time DiscardableSharedMemory::Now() const { |
| 373 return Time::Now(); | 415 return Time::Now(); |
| 374 } | 416 } |
| 375 | 417 |
| 418 #if defined(OS_WIN) | |
| 419 // Initialize our static Windows version to 0. | |
| 420 // Lazy init will happen when first checked in Lock/Unlock. | |
| 421 // static | |
| 422 base::win::Version DiscardableSharedMemory::osVersion = | |
| 423 base::win::VERSION_PRE_XP; | |
| 424 #endif | |
| 425 | |
| 376 } // namespace base | 426 } // namespace base |
| OLD | NEW |