Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(157)

Side by Side Diff: base/memory/discardable_shared_memory.cc

Issue 822713002: Update from https://crrev.com/309415 (Closed) Base URL: https://github.com/domokit/mojo.git@master
Patch Set: Created 6 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2014 The Chromium Authors. All rights reserved. 1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/memory/discardable_shared_memory.h" 5 #include "base/memory/discardable_shared_memory.h"
6 6
7 #if defined(OS_POSIX) 7 #if defined(OS_POSIX)
8 #include <unistd.h> 8 #include <unistd.h>
9 #endif 9 #endif
10 10
11 #include <algorithm> 11 #include <algorithm>
12 12
13 #include "base/atomicops.h" 13 #include "base/atomicops.h"
14 #include "base/logging.h" 14 #include "base/logging.h"
15 #include "base/numerics/safe_math.h" 15 #include "base/numerics/safe_math.h"
16 #include "base/process/process_metrics.h"
17
18 #if defined(OS_ANDROID)
19 #include "third_party/ashmem/ashmem.h"
20 #endif
16 21
17 namespace base { 22 namespace base {
18 namespace { 23 namespace {
19 24
20 // Use a machine-sized pointer as atomic type. It will use the Atomic32 or 25 // Use a machine-sized pointer as atomic type. It will use the Atomic32 or
21 // Atomic64 routines, depending on the architecture. 26 // Atomic64 routines, depending on the architecture.
22 typedef intptr_t AtomicType; 27 typedef intptr_t AtomicType;
23 typedef uintptr_t UAtomicType; 28 typedef uintptr_t UAtomicType;
24 29
25 // Template specialization for timestamp serialization/deserialization. This 30 // Template specialization for timestamp serialization/deserialization. This
(...skipping 26 matching lines...) Expand all
52 return time.ToInternalValue(); 57 return time.ToInternalValue();
53 } 58 }
54 59
55 struct SharedState { 60 struct SharedState {
56 enum LockState { UNLOCKED = 0, LOCKED = 1 }; 61 enum LockState { UNLOCKED = 0, LOCKED = 1 };
57 62
58 explicit SharedState(AtomicType ivalue) { value.i = ivalue; } 63 explicit SharedState(AtomicType ivalue) { value.i = ivalue; }
59 SharedState(LockState lock_state, Time timestamp) { 64 SharedState(LockState lock_state, Time timestamp) {
60 int64 wire_timestamp = TimeToWireFormat<sizeof(AtomicType)>(timestamp); 65 int64 wire_timestamp = TimeToWireFormat<sizeof(AtomicType)>(timestamp);
61 DCHECK_GE(wire_timestamp, 0); 66 DCHECK_GE(wire_timestamp, 0);
62 DCHECK((lock_state & ~1) == 0); 67 DCHECK_EQ(lock_state & ~1, 0);
63 value.u = (static_cast<UAtomicType>(wire_timestamp) << 1) | lock_state; 68 value.u = (static_cast<UAtomicType>(wire_timestamp) << 1) | lock_state;
64 } 69 }
65 70
66 LockState GetLockState() const { return static_cast<LockState>(value.u & 1); } 71 LockState GetLockState() const { return static_cast<LockState>(value.u & 1); }
67 72
68 Time GetTimestamp() const { 73 Time GetTimestamp() const {
69 return TimeFromWireFormat<sizeof(AtomicType)>(value.u >> 1); 74 return TimeFromWireFormat<sizeof(AtomicType)>(value.u >> 1);
70 } 75 }
71 76
72 // Bit 1: Lock state. Bit is set when locked. 77 // Bit 1: Lock state. Bit is set when locked.
73 // Bit 2..sizeof(AtomicType)*8: Usage timestamp. NULL time when locked or 78 // Bit 2..sizeof(AtomicType)*8: Usage timestamp. NULL time when locked or
74 // purged. 79 // purged.
75 union { 80 union {
76 AtomicType i; 81 AtomicType i;
77 UAtomicType u; 82 UAtomicType u;
78 } value; 83 } value;
79 }; 84 };
80 85
81 // Shared state is stored at offset 0 in shared memory segments. 86 // Shared state is stored at offset 0 in shared memory segments.
82 SharedState* SharedStateFromSharedMemory(const SharedMemory& shared_memory) { 87 SharedState* SharedStateFromSharedMemory(const SharedMemory& shared_memory) {
83 DCHECK(shared_memory.memory()); 88 DCHECK(shared_memory.memory());
84 return static_cast<SharedState*>(shared_memory.memory()); 89 return static_cast<SharedState*>(shared_memory.memory());
85 } 90 }
86 91
92 // Round up |size| to a multiple of alignment, which must be a power of two.
93 size_t Align(size_t alignment, size_t size) {
94 DCHECK_EQ(alignment & (alignment - 1), 0u);
95 return (size + alignment - 1) & ~(alignment - 1);
96 }
97
98 // Round up |size| to a multiple of page size.
99 size_t AlignToPageSize(size_t size) {
100 return Align(base::GetPageSize(), size);
101 }
102
87 } // namespace 103 } // namespace
88 104
89 DiscardableSharedMemory::DiscardableSharedMemory() { 105 DiscardableSharedMemory::DiscardableSharedMemory()
106 : mapped_size_(0), locked_page_count_(0) {
90 } 107 }
91 108
92 DiscardableSharedMemory::DiscardableSharedMemory( 109 DiscardableSharedMemory::DiscardableSharedMemory(
93 SharedMemoryHandle shared_memory_handle) 110 SharedMemoryHandle shared_memory_handle)
94 : shared_memory_(shared_memory_handle, false) { 111 : shared_memory_(shared_memory_handle, false),
112 mapped_size_(0),
113 locked_page_count_(0) {
95 } 114 }
96 115
97 DiscardableSharedMemory::~DiscardableSharedMemory() { 116 DiscardableSharedMemory::~DiscardableSharedMemory() {
98 } 117 }
99 118
100 bool DiscardableSharedMemory::CreateAndMap(size_t size) { 119 bool DiscardableSharedMemory::CreateAndMap(size_t size) {
101 CheckedNumeric<size_t> checked_size = size; 120 CheckedNumeric<size_t> checked_size = size;
102 checked_size += sizeof(SharedState); 121 checked_size += AlignToPageSize(sizeof(SharedState));
103 if (!checked_size.IsValid()) 122 if (!checked_size.IsValid())
104 return false; 123 return false;
105 124
106 if (!shared_memory_.CreateAndMapAnonymous(checked_size.ValueOrDie())) 125 if (!shared_memory_.CreateAndMapAnonymous(checked_size.ValueOrDie()))
107 return false; 126 return false;
108 127
128 mapped_size_ =
129 shared_memory_.mapped_size() - AlignToPageSize(sizeof(SharedState));
130
131 locked_page_count_ = AlignToPageSize(mapped_size_) / base::GetPageSize();
132 #if DCHECK_IS_ON
133 for (size_t page = 0; page < locked_page_count_; ++page)
134 locked_pages_.insert(page);
135 #endif
136
109 DCHECK(last_known_usage_.is_null()); 137 DCHECK(last_known_usage_.is_null());
110 SharedState new_state(SharedState::LOCKED, Time()); 138 SharedState new_state(SharedState::LOCKED, Time());
111 subtle::Release_Store(&SharedStateFromSharedMemory(shared_memory_)->value.i, 139 subtle::Release_Store(&SharedStateFromSharedMemory(shared_memory_)->value.i,
112 new_state.value.i); 140 new_state.value.i);
113 return true; 141 return true;
114 } 142 }
115 143
116 bool DiscardableSharedMemory::Map(size_t size) { 144 bool DiscardableSharedMemory::Map(size_t size) {
117 return shared_memory_.Map(sizeof(SharedState) + size); 145 if (!shared_memory_.Map(AlignToPageSize(sizeof(SharedState)) + size))
146 return false;
147
148 mapped_size_ =
149 shared_memory_.mapped_size() - AlignToPageSize(sizeof(SharedState));
150
151 locked_page_count_ = AlignToPageSize(mapped_size_) / base::GetPageSize();
152 #if DCHECK_IS_ON
153 for (size_t page = 0; page < locked_page_count_; ++page)
154 locked_pages_.insert(page);
155 #endif
156
157 return true;
118 } 158 }
119 159
120 bool DiscardableSharedMemory::Lock() { 160 bool DiscardableSharedMemory::Lock(size_t offset, size_t length) {
121 DCHECK(shared_memory_.memory()); 161 DCHECK_EQ(AlignToPageSize(offset), offset);
162 DCHECK_EQ(AlignToPageSize(length), length);
163
164 // Calls to this function must synchronized properly.
165 DFAKE_SCOPED_LOCK(thread_collision_warner_);
122 166
123 // Return false when instance has been purged or not initialized properly by 167 // Return false when instance has been purged or not initialized properly by
124 // checking if |last_known_usage_| is NULL. 168 // checking if |last_known_usage_| is NULL.
125 if (last_known_usage_.is_null()) 169 if (last_known_usage_.is_null())
126 return false; 170 return false;
127 171
128 SharedState old_state(SharedState::UNLOCKED, last_known_usage_); 172 DCHECK(shared_memory_.memory());
129 SharedState new_state(SharedState::LOCKED, Time());
130 SharedState result(subtle::Acquire_CompareAndSwap(
131 &SharedStateFromSharedMemory(shared_memory_)->value.i,
132 old_state.value.i,
133 new_state.value.i));
134 if (result.value.u == old_state.value.u)
135 return true;
136 173
137 // Update |last_known_usage_| in case the above CAS failed because of 174 // We need to successfully acquire the platform independent lock before
138 // an incorrect timestamp. 175 // individual pages can be locked.
139 last_known_usage_ = result.GetTimestamp(); 176 if (!locked_page_count_) {
140 return false; 177 SharedState old_state(SharedState::UNLOCKED, last_known_usage_);
178 SharedState new_state(SharedState::LOCKED, Time());
179 SharedState result(subtle::Acquire_CompareAndSwap(
180 &SharedStateFromSharedMemory(shared_memory_)->value.i,
181 old_state.value.i,
182 new_state.value.i));
183 if (result.value.u != old_state.value.u) {
184 // Update |last_known_usage_| in case the above CAS failed because of
185 // an incorrect timestamp.
186 last_known_usage_ = result.GetTimestamp();
187 return false;
188 }
189 }
190
191 // Zero for length means "everything onward".
192 if (!length)
193 length = AlignToPageSize(mapped_size_) - offset;
194
195 size_t start = offset / base::GetPageSize();
196 size_t end = start + length / base::GetPageSize();
197 DCHECK_LT(start, end);
198 DCHECK_LE(end, AlignToPageSize(mapped_size_) / base::GetPageSize());
199
200 // Add pages to |locked_page_count_|.
201 // Note: Locking a page that is already locked is an error.
202 locked_page_count_ += end - start;
203 #if DCHECK_IS_ON
204 // Detect incorrect usage by keeping track of exactly what pages are locked.
205 for (auto page = start; page < end; ++page) {
206 auto result = locked_pages_.insert(page);
207 DCHECK(result.second);
208 }
209 DCHECK_EQ(locked_pages_.size(), locked_page_count_);
210 #endif
211
212 #if defined(OS_ANDROID)
213 SharedMemoryHandle handle = shared_memory_.handle();
214 DCHECK(SharedMemory::IsHandleValid(handle));
215 if (ashmem_pin_region(
216 handle.fd, AlignToPageSize(sizeof(SharedState)) + offset, length)) {
217 return false;
218 }
219 #endif
220
221 return true;
141 } 222 }
142 223
143 void DiscardableSharedMemory::Unlock() { 224 void DiscardableSharedMemory::Unlock(size_t offset, size_t length) {
225 DCHECK_EQ(AlignToPageSize(offset), offset);
226 DCHECK_EQ(AlignToPageSize(length), length);
227
228 // Calls to this function must synchronized properly.
229 DFAKE_SCOPED_LOCK(thread_collision_warner_);
230
231 // Zero for length means "everything onward".
232 if (!length)
233 length = AlignToPageSize(mapped_size_) - offset;
234
144 DCHECK(shared_memory_.memory()); 235 DCHECK(shared_memory_.memory());
145 236
237 #if defined(OS_ANDROID)
238 SharedMemoryHandle handle = shared_memory_.handle();
239 DCHECK(SharedMemory::IsHandleValid(handle));
240 if (ashmem_unpin_region(
241 handle.fd, AlignToPageSize(sizeof(SharedState)) + offset, length)) {
242 DPLOG(ERROR) << "ashmem_unpin_region() failed";
243 }
244 #endif
245
246 size_t start = offset / base::GetPageSize();
247 size_t end = start + length / base::GetPageSize();
248 DCHECK_LT(start, end);
249 DCHECK_LE(end, AlignToPageSize(mapped_size_) / base::GetPageSize());
250
251 // Remove pages from |locked_page_count_|.
252 // Note: Unlocking a page that is not locked is an error.
253 DCHECK_GE(locked_page_count_, end - start);
254 locked_page_count_ -= end - start;
255 #if DCHECK_IS_ON
256 // Detect incorrect usage by keeping track of exactly what pages are locked.
257 for (auto page = start; page < end; ++page) {
258 auto erased_count = locked_pages_.erase(page);
259 DCHECK_EQ(1u, erased_count);
260 }
261 DCHECK_EQ(locked_pages_.size(), locked_page_count_);
262 #endif
263
264 // Early out and avoid releasing the platform independent lock if some pages
265 // are still locked.
266 if (locked_page_count_)
267 return;
268
146 Time current_time = Now(); 269 Time current_time = Now();
147 DCHECK(!current_time.is_null()); 270 DCHECK(!current_time.is_null());
148 271
149 SharedState old_state(SharedState::LOCKED, Time()); 272 SharedState old_state(SharedState::LOCKED, Time());
150 SharedState new_state(SharedState::UNLOCKED, current_time); 273 SharedState new_state(SharedState::UNLOCKED, current_time);
151 // Note: timestamp cannot be NULL as that is a unique value used when 274 // Note: timestamp cannot be NULL as that is a unique value used when
152 // locked or purged. 275 // locked or purged.
153 DCHECK(!new_state.GetTimestamp().is_null()); 276 DCHECK(!new_state.GetTimestamp().is_null());
154 // Timestamps precision should at least be accurate to the second. 277 // Timestamp precision should at least be accurate to the second.
155 DCHECK_EQ((new_state.GetTimestamp() - Time::UnixEpoch()).InSeconds(), 278 DCHECK_EQ((new_state.GetTimestamp() - Time::UnixEpoch()).InSeconds(),
156 (current_time - Time::UnixEpoch()).InSeconds()); 279 (current_time - Time::UnixEpoch()).InSeconds());
157 SharedState result(subtle::Release_CompareAndSwap( 280 SharedState result(subtle::Release_CompareAndSwap(
158 &SharedStateFromSharedMemory(shared_memory_)->value.i, 281 &SharedStateFromSharedMemory(shared_memory_)->value.i,
159 old_state.value.i, 282 old_state.value.i,
160 new_state.value.i)); 283 new_state.value.i));
161 284
162 DCHECK_EQ(old_state.value.u, result.value.u); 285 DCHECK_EQ(old_state.value.u, result.value.u);
163 286
164 last_known_usage_ = current_time; 287 last_known_usage_ = current_time;
165 } 288 }
166 289
167 void* DiscardableSharedMemory::memory() const { 290 void* DiscardableSharedMemory::memory() const {
168 return SharedStateFromSharedMemory(shared_memory_) + 1; 291 return reinterpret_cast<uint8*>(shared_memory_.memory()) +
292 AlignToPageSize(sizeof(SharedState));
169 } 293 }
170 294
171 bool DiscardableSharedMemory::Purge(Time current_time) { 295 bool DiscardableSharedMemory::Purge(Time current_time) {
296 // Calls to this function must synchronized properly.
297 DFAKE_SCOPED_LOCK(thread_collision_warner_);
298
172 // Early out if not mapped. This can happen if the segment was previously 299 // Early out if not mapped. This can happen if the segment was previously
173 // unmapped using a call to Close(). 300 // unmapped using a call to Close().
174 if (!shared_memory_.memory()) 301 if (!shared_memory_.memory())
175 return true; 302 return true;
176 303
177 SharedState old_state(SharedState::UNLOCKED, last_known_usage_); 304 SharedState old_state(SharedState::UNLOCKED, last_known_usage_);
178 SharedState new_state(SharedState::UNLOCKED, Time()); 305 SharedState new_state(SharedState::UNLOCKED, Time());
179 SharedState result(subtle::Acquire_CompareAndSwap( 306 SharedState result(subtle::Acquire_CompareAndSwap(
180 &SharedStateFromSharedMemory(shared_memory_)->value.i, 307 &SharedStateFromSharedMemory(shared_memory_)->value.i,
181 old_state.value.i, 308 old_state.value.i,
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
226 void DiscardableSharedMemory::Close() { 353 void DiscardableSharedMemory::Close() {
227 shared_memory_.Unmap(); 354 shared_memory_.Unmap();
228 shared_memory_.Close(); 355 shared_memory_.Close();
229 } 356 }
230 357
231 Time DiscardableSharedMemory::Now() const { 358 Time DiscardableSharedMemory::Now() const {
232 return Time::Now(); 359 return Time::Now();
233 } 360 }
234 361
235 } // namespace base 362 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698