OLD | NEW |
1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/memory/discardable_memory.h" | 5 #include "base/memory/discardable_memory.h" |
6 | 6 |
7 #include <mach/mach.h> | 7 #include <mach/mach.h> |
8 #include <sys/mman.h> | |
9 | 8 |
10 #include "base/basictypes.h" | 9 #include "base/basictypes.h" |
11 #include "base/compiler_specific.h" | 10 #include "base/compiler_specific.h" |
12 #include "base/lazy_instance.h" | 11 #include "base/lazy_instance.h" |
13 #include "base/logging.h" | 12 #include "base/logging.h" |
| 13 #include "base/mac/mach_logging.h" |
| 14 #include "base/mac/scoped_mach_vm.h" |
14 #include "base/memory/discardable_memory_emulated.h" | 15 #include "base/memory/discardable_memory_emulated.h" |
15 #include "base/memory/discardable_memory_malloc.h" | 16 #include "base/memory/discardable_memory_malloc.h" |
16 #include "base/memory/discardable_memory_manager.h" | 17 #include "base/memory/discardable_memory_manager.h" |
17 #include "base/memory/scoped_ptr.h" | 18 #include "base/memory/scoped_ptr.h" |
18 | 19 |
19 namespace base { | 20 namespace base { |
20 namespace { | 21 namespace { |
21 | 22 |
22 // For Mac, have the DiscardableMemoryManager trigger userspace eviction when | 23 // For Mac, have the DiscardableMemoryManager trigger userspace eviction when |
23 // address space usage gets too high (e.g. 512 MBytes). | 24 // address space usage gets too high (e.g. 512 MBytes). |
24 const size_t kMacMemoryLimit = 512 * 1024 * 1024; | 25 const size_t kMacMemoryLimit = 512 * 1024 * 1024; |
25 | 26 |
26 struct SharedState { | 27 struct SharedState { |
27 SharedState() : manager(kMacMemoryLimit, kMacMemoryLimit) {} | 28 SharedState() : manager(kMacMemoryLimit, kMacMemoryLimit) {} |
28 | 29 |
29 internal::DiscardableMemoryManager manager; | 30 internal::DiscardableMemoryManager manager; |
30 }; | 31 }; |
31 LazyInstance<SharedState>::Leaky g_shared_state = LAZY_INSTANCE_INITIALIZER; | 32 LazyInstance<SharedState>::Leaky g_shared_state = LAZY_INSTANCE_INITIALIZER; |
32 | 33 |
33 // The VM subsystem allows tagging of memory and 240-255 is reserved for | 34 // The VM subsystem allows tagging of memory and 240-255 is reserved for |
34 // application use (see mach/vm_statistics.h). Pick 252 (after chromium's atomic | 35 // application use (see mach/vm_statistics.h). Pick 252 (after chromium's atomic |
35 // weight of ~52). | 36 // weight of ~52). |
36 const int kDiscardableMemoryTag = VM_MAKE_TAG(252); | 37 const int kDiscardableMemoryTag = VM_MAKE_TAG(252); |
37 | 38 |
38 class DiscardableMemoryMac | 39 class DiscardableMemoryMac |
39 : public DiscardableMemory, | 40 : public DiscardableMemory, |
40 public internal::DiscardableMemoryManagerAllocation { | 41 public internal::DiscardableMemoryManagerAllocation { |
41 public: | 42 public: |
42 explicit DiscardableMemoryMac(size_t bytes) | 43 explicit DiscardableMemoryMac(size_t bytes) |
43 : buffer_(0), bytes_(bytes), is_locked_(false) { | 44 : memory_(0, 0), |
| 45 bytes_(mach_vm_round_page(bytes)), |
| 46 is_locked_(false) { |
44 g_shared_state.Pointer()->manager.Register(this, bytes); | 47 g_shared_state.Pointer()->manager.Register(this, bytes); |
45 } | 48 } |
46 | 49 |
47 bool Initialize() { return Lock() == DISCARDABLE_MEMORY_LOCK_STATUS_PURGED; } | 50 bool Initialize() { return Lock() == DISCARDABLE_MEMORY_LOCK_STATUS_PURGED; } |
48 | 51 |
49 virtual ~DiscardableMemoryMac() { | 52 virtual ~DiscardableMemoryMac() { |
50 if (is_locked_) | 53 if (is_locked_) |
51 Unlock(); | 54 Unlock(); |
52 g_shared_state.Pointer()->manager.Unregister(this); | 55 g_shared_state.Pointer()->manager.Unregister(this); |
53 if (buffer_) | |
54 vm_deallocate(mach_task_self(), buffer_, bytes_); | |
55 } | 56 } |
56 | 57 |
57 // Overridden from DiscardableMemory: | 58 // Overridden from DiscardableMemory: |
58 virtual DiscardableMemoryLockStatus Lock() OVERRIDE { | 59 virtual DiscardableMemoryLockStatus Lock() OVERRIDE { |
59 DCHECK(!is_locked_); | 60 DCHECK(!is_locked_); |
60 | 61 |
61 bool purged = false; | 62 bool purged = false; |
62 if (!g_shared_state.Pointer()->manager.AcquireLock(this, &purged)) | 63 if (!g_shared_state.Pointer()->manager.AcquireLock(this, &purged)) |
63 return DISCARDABLE_MEMORY_LOCK_STATUS_FAILED; | 64 return DISCARDABLE_MEMORY_LOCK_STATUS_FAILED; |
64 | 65 |
65 is_locked_ = true; | 66 is_locked_ = true; |
66 return purged ? DISCARDABLE_MEMORY_LOCK_STATUS_PURGED | 67 return purged ? DISCARDABLE_MEMORY_LOCK_STATUS_PURGED |
67 : DISCARDABLE_MEMORY_LOCK_STATUS_SUCCESS; | 68 : DISCARDABLE_MEMORY_LOCK_STATUS_SUCCESS; |
68 } | 69 } |
| 70 |
69 virtual void Unlock() OVERRIDE { | 71 virtual void Unlock() OVERRIDE { |
70 DCHECK(is_locked_); | 72 DCHECK(is_locked_); |
71 g_shared_state.Pointer()->manager.ReleaseLock(this); | 73 g_shared_state.Pointer()->manager.ReleaseLock(this); |
72 is_locked_ = false; | 74 is_locked_ = false; |
73 } | 75 } |
| 76 |
74 virtual void* Memory() const OVERRIDE { | 77 virtual void* Memory() const OVERRIDE { |
75 DCHECK(is_locked_); | 78 DCHECK(is_locked_); |
76 return reinterpret_cast<void*>(buffer_); | 79 return reinterpret_cast<void*>(memory_.address()); |
77 } | 80 } |
78 | 81 |
79 // Overridden from internal::DiscardableMemoryManagerAllocation: | 82 // Overridden from internal::DiscardableMemoryManagerAllocation: |
80 virtual bool AllocateAndAcquireLock() OVERRIDE { | 83 virtual bool AllocateAndAcquireLock() OVERRIDE { |
81 bool persistent = true; | 84 bool persistent = true; |
82 if (!buffer_) { | 85 kern_return_t ret; |
83 kern_return_t ret = vm_allocate( | 86 if (!memory_.size()) { |
| 87 vm_address_t address = 0; |
| 88 ret = vm_allocate( |
84 mach_task_self(), | 89 mach_task_self(), |
85 &buffer_, | 90 &address, |
86 bytes_, | 91 bytes_, |
87 VM_FLAGS_PURGABLE | VM_FLAGS_ANYWHERE | kDiscardableMemoryTag); | 92 VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE | kDiscardableMemoryTag); |
88 CHECK_EQ(KERN_SUCCESS, ret) << "wm_allocate() failed."; | 93 MACH_CHECK(ret == KERN_SUCCESS, ret) << "vm_allocate"; |
| 94 memory_.reset(address, bytes_); |
89 persistent = false; | 95 persistent = false; |
90 } | 96 } |
| 97 |
91 #if !defined(NDEBUG) | 98 #if !defined(NDEBUG) |
92 int status = mprotect( | 99 ret = vm_protect(mach_task_self(), |
93 reinterpret_cast<void*>(buffer_), bytes_, PROT_READ | PROT_WRITE); | 100 memory_.address(), |
94 DCHECK_EQ(0, status); | 101 memory_.size(), |
| 102 FALSE, |
| 103 VM_PROT_DEFAULT); |
| 104 MACH_DCHECK(ret == KERN_SUCCESS, ret) << "vm_protect"; |
95 #endif | 105 #endif |
| 106 |
96 int state = VM_PURGABLE_NONVOLATILE; | 107 int state = VM_PURGABLE_NONVOLATILE; |
97 kern_return_t ret = vm_purgable_control(mach_task_self(), | 108 ret = vm_purgable_control(mach_task_self(), |
98 buffer_, | 109 memory_.address(), |
99 VM_PURGABLE_SET_STATE, | 110 VM_PURGABLE_SET_STATE, |
100 &state); | 111 &state); |
101 CHECK_EQ(KERN_SUCCESS, ret) << "Failed to lock memory."; | 112 MACH_CHECK(ret == KERN_SUCCESS, ret) << "vm_purgable_control"; |
102 if (state & VM_PURGABLE_EMPTY) | 113 if (state & VM_PURGABLE_EMPTY) |
103 persistent = false; | 114 persistent = false; |
104 return persistent; | 115 return persistent; |
105 } | 116 } |
| 117 |
106 virtual void ReleaseLock() OVERRIDE { | 118 virtual void ReleaseLock() OVERRIDE { |
107 int state = VM_PURGABLE_VOLATILE | VM_VOLATILE_GROUP_DEFAULT; | 119 int state = VM_PURGABLE_VOLATILE | VM_VOLATILE_GROUP_DEFAULT; |
108 kern_return_t ret = vm_purgable_control(mach_task_self(), | 120 kern_return_t ret = vm_purgable_control(mach_task_self(), |
109 buffer_, | 121 memory_.address(), |
110 VM_PURGABLE_SET_STATE, | 122 VM_PURGABLE_SET_STATE, |
111 &state); | 123 &state); |
112 CHECK_EQ(KERN_SUCCESS, ret) << "Failed to unlock memory."; | 124 MACH_CHECK(ret == KERN_SUCCESS, ret) << "vm_purgable_control"; |
| 125 |
113 #if !defined(NDEBUG) | 126 #if !defined(NDEBUG) |
114 int status = mprotect(reinterpret_cast<void*>(buffer_), bytes_, PROT_NONE); | 127 ret = vm_protect(mach_task_self(), |
115 DCHECK_EQ(0, status); | 128 memory_.address(), |
| 129 memory_.size(), |
| 130 FALSE, |
| 131 VM_PROT_NONE); |
| 132 MACH_DCHECK(ret == KERN_SUCCESS, ret) << "vm_protect"; |
116 #endif | 133 #endif |
117 } | 134 } |
| 135 |
118 virtual void Purge() OVERRIDE { | 136 virtual void Purge() OVERRIDE { |
119 if (buffer_) { | 137 memory_.reset(); |
120 vm_deallocate(mach_task_self(), buffer_, bytes_); | |
121 buffer_ = 0; | |
122 } | |
123 } | 138 } |
124 | 139 |
125 private: | 140 private: |
126 vm_address_t buffer_; | 141 mac::ScopedMachVM memory_; |
127 const size_t bytes_; | 142 const size_t bytes_; |
128 bool is_locked_; | 143 bool is_locked_; |
129 | 144 |
130 DISALLOW_COPY_AND_ASSIGN(DiscardableMemoryMac); | 145 DISALLOW_COPY_AND_ASSIGN(DiscardableMemoryMac); |
131 }; | 146 }; |
132 | 147 |
133 } // namespace | 148 } // namespace |
134 | 149 |
135 // static | 150 // static |
136 void DiscardableMemory::RegisterMemoryPressureListeners() { | 151 void DiscardableMemory::RegisterMemoryPressureListeners() { |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
190 } | 205 } |
191 | 206 |
192 // static | 207 // static |
193 void DiscardableMemory::PurgeForTesting() { | 208 void DiscardableMemory::PurgeForTesting() { |
194 int state = 0; | 209 int state = 0; |
195 vm_purgable_control(mach_task_self(), 0, VM_PURGABLE_PURGE_ALL, &state); | 210 vm_purgable_control(mach_task_self(), 0, VM_PURGABLE_PURGE_ALL, &state); |
196 internal::DiscardableMemoryEmulated::PurgeForTesting(); | 211 internal::DiscardableMemoryEmulated::PurgeForTesting(); |
197 } | 212 } |
198 | 213 |
199 } // namespace base | 214 } // namespace base |
OLD | NEW |