OLD | NEW |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "mojo/system/core_impl.h" | 5 #include "mojo/system/core_impl.h" |
6 | 6 |
7 #include <vector> | 7 #include <vector> |
8 | 8 |
9 #include "base/logging.h" | 9 #include "base/logging.h" |
10 #include "mojo/system/dispatcher.h" | 10 #include "mojo/system/dispatcher.h" |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
59 // INF. |Waiter| locks | 59 // INF. |Waiter| locks |
60 // | 60 // |
61 // Notes: | 61 // Notes: |
62 // - While holding a |Dispatcher| lock, you may not unconditionally attempt | 62 // - While holding a |Dispatcher| lock, you may not unconditionally attempt |
63 // to take another |Dispatcher| lock. (This has consequences on the | 63 // to take another |Dispatcher| lock. (This has consequences on the |
64 // concurrency semantics of |MojoWriteMessage()| when passing handles.) | 64 // concurrency semantics of |MojoWriteMessage()| when passing handles.) |
65 // Doing so would lead to deadlock. | 65 // Doing so would lead to deadlock. |
66 // - Locks at the "INF" level may not have any locks taken while they are | 66 // - Locks at the "INF" level may not have any locks taken while they are |
67 // held. | 67 // held. |
68 | 68 |
| 69 CoreImpl::HandleTableEntry::HandleTableEntry() |
| 70 : busy(false) { |
| 71 } |
| 72 |
| 73 CoreImpl::HandleTableEntry::HandleTableEntry( |
| 74 const scoped_refptr<Dispatcher>& dispatcher) |
| 75 : dispatcher(dispatcher), |
| 76 busy(false) { |
| 77 } |
| 78 |
| 79 CoreImpl::HandleTableEntry::~HandleTableEntry() { |
| 80 DCHECK(!busy); |
| 81 } |
| 82 |
69 // static | 83 // static |
70 CoreImpl* CoreImpl::singleton_ = NULL; | 84 CoreImpl* CoreImpl::singleton_ = NULL; |
71 | 85 |
72 // static | 86 // static |
73 void CoreImpl::Init() { | 87 void CoreImpl::Init() { |
74 CHECK(!singleton_); | 88 CHECK(!singleton_); |
75 singleton_ = new CoreImpl(); | 89 singleton_ = new CoreImpl(); |
76 } | 90 } |
77 | 91 |
78 MojoResult CoreImpl::Close(MojoHandle handle) { | 92 MojoResult CoreImpl::Close(MojoHandle handle) { |
79 if (handle == MOJO_HANDLE_INVALID) | 93 if (handle == MOJO_HANDLE_INVALID) |
80 return MOJO_RESULT_INVALID_ARGUMENT; | 94 return MOJO_RESULT_INVALID_ARGUMENT; |
81 | 95 |
82 scoped_refptr<Dispatcher> dispatcher; | 96 scoped_refptr<Dispatcher> dispatcher; |
83 { | 97 { |
84 base::AutoLock locker(handle_table_lock_); | 98 base::AutoLock locker(handle_table_lock_); |
85 HandleTableMap::iterator it = handle_table_.find(handle); | 99 HandleTableMap::iterator it = handle_table_.find(handle); |
86 if (it == handle_table_.end()) | 100 if (it == handle_table_.end()) |
87 return MOJO_RESULT_INVALID_ARGUMENT; | 101 return MOJO_RESULT_INVALID_ARGUMENT; |
88 dispatcher = it->second; | 102 if (it->second.busy) |
| 103 return MOJO_RESULT_BUSY; |
| 104 dispatcher = it->second.dispatcher; |
89 handle_table_.erase(it); | 105 handle_table_.erase(it); |
90 } | 106 } |
91 | 107 |
92 // The dispatcher doesn't have a say in being closed, but gets notified of it. | 108 // The dispatcher doesn't have a say in being closed, but gets notified of it. |
93 // Note: This is done outside of |handle_table_lock_|. As a result, there's a | 109 // Note: This is done outside of |handle_table_lock_|. As a result, there's a |
94 // race condition that the dispatcher must handle; see the comment in | 110 // race condition that the dispatcher must handle; see the comment in |
95 // |Dispatcher| in dispatcher.h. | 111 // |Dispatcher| in dispatcher.h. |
96 return dispatcher->Close(); | 112 return dispatcher->Close(); |
97 } | 113 } |
98 | 114 |
(...skipping 13 matching lines...) Expand all Loading... |
112 return MOJO_RESULT_INVALID_ARGUMENT; | 128 return MOJO_RESULT_INVALID_ARGUMENT; |
113 if (num_handles < 1) | 129 if (num_handles < 1) |
114 return MOJO_RESULT_INVALID_ARGUMENT; | 130 return MOJO_RESULT_INVALID_ARGUMENT; |
115 if (num_handles > kMaxWaitManyNumHandles) | 131 if (num_handles > kMaxWaitManyNumHandles) |
116 return MOJO_RESULT_RESOURCE_EXHAUSTED; | 132 return MOJO_RESULT_RESOURCE_EXHAUSTED; |
117 return WaitManyInternal(handles, flags, num_handles, deadline); | 133 return WaitManyInternal(handles, flags, num_handles, deadline); |
118 } | 134 } |
119 | 135 |
120 MojoResult CoreImpl::CreateMessagePipe(MojoHandle* handle_0, | 136 MojoResult CoreImpl::CreateMessagePipe(MojoHandle* handle_0, |
121 MojoHandle* handle_1) { | 137 MojoHandle* handle_1) { |
| 138 if (!VerifyUserPointer<MojoHandle>(handle_0, 1)) |
| 139 return MOJO_RESULT_INVALID_ARGUMENT; |
| 140 if (!VerifyUserPointer<MojoHandle>(handle_1, 1)) |
| 141 return MOJO_RESULT_INVALID_ARGUMENT; |
| 142 |
122 scoped_refptr<MessagePipeDispatcher> dispatcher_0( | 143 scoped_refptr<MessagePipeDispatcher> dispatcher_0( |
123 new MessagePipeDispatcher()); | 144 new MessagePipeDispatcher()); |
124 scoped_refptr<MessagePipeDispatcher> dispatcher_1( | 145 scoped_refptr<MessagePipeDispatcher> dispatcher_1( |
125 new MessagePipeDispatcher()); | 146 new MessagePipeDispatcher()); |
126 | 147 |
127 MojoHandle h0, h1; | 148 MojoHandle h0, h1; |
128 { | 149 { |
129 base::AutoLock locker(handle_table_lock_); | 150 base::AutoLock locker(handle_table_lock_); |
130 | 151 |
131 h0 = AddDispatcherNoLock(dispatcher_0); | 152 h0 = AddDispatcherNoLock(dispatcher_0); |
(...skipping 18 matching lines...) Expand all Loading... |
150 | 171 |
151 MojoResult CoreImpl::WriteMessage( | 172 MojoResult CoreImpl::WriteMessage( |
152 MojoHandle handle, | 173 MojoHandle handle, |
153 const void* bytes, uint32_t num_bytes, | 174 const void* bytes, uint32_t num_bytes, |
154 const MojoHandle* handles, uint32_t num_handles, | 175 const MojoHandle* handles, uint32_t num_handles, |
155 MojoWriteMessageFlags flags) { | 176 MojoWriteMessageFlags flags) { |
156 scoped_refptr<Dispatcher> dispatcher(GetDispatcher(handle)); | 177 scoped_refptr<Dispatcher> dispatcher(GetDispatcher(handle)); |
157 if (!dispatcher.get()) | 178 if (!dispatcher.get()) |
158 return MOJO_RESULT_INVALID_ARGUMENT; | 179 return MOJO_RESULT_INVALID_ARGUMENT; |
159 | 180 |
160 return dispatcher->WriteMessage(bytes, num_bytes, | 181 // Easy case: not sending any handles. |
161 handles, num_handles, | 182 if (num_handles == 0) |
162 flags); | 183 return dispatcher->WriteMessage(bytes, num_bytes, NULL, flags); |
| 184 |
| 185 // We have to handle |handles| here, since we have to mark them busy in the |
| 186 // global handle table. We can't delegate this to the dispatcher, since the |
| 187 // handle table lock must be acquired before the dispatcher lock. |
| 188 // |
| 189 // (This leads to an oddity: |handles|/|num_handles| are always verified for |
| 190 // validity, even for dispatchers that don't support |WriteMessage()| and will |
| 191 // simply return failure unconditionally. It also breaks the usual |
| 192 // left-to-right verification order of arguments.) |
| 193 if (!VerifyUserPointer<MojoHandle>(handles, num_handles)) |
| 194 return MOJO_RESULT_INVALID_ARGUMENT; |
| 195 if (num_handles > kMaxMessageNumHandles) |
| 196 return MOJO_RESULT_RESOURCE_EXHAUSTED; |
| 197 |
| 198 // We'll need to hold on to the dispatchers so that we can pass them on to |
| 199 // |WriteMessage()| and also so that we can unlock their locks afterwards |
| 200 // without accessing the handle table. These can be dumb pointers, since their |
| 201 // entries in the handle table won't get removed (since they'll be marked as |
| 202 // busy). |
| 203 std::vector<Dispatcher*> dispatchers(num_handles); |
| 204 |
| 205 // When we pass handles, we have to try to take all their dispatchers' locks |
| 206 // and mark the handles as busy. If the call succeeds, we then remove the |
| 207 // handles from the handle table. |
| 208 { |
| 209 base::AutoLock locker(handle_table_lock_); |
| 210 |
| 211 std::vector<HandleTableEntry*> entries(num_handles); |
| 212 |
| 213 // First verify all the handles and get their dispatchers. |
| 214 uint32_t i; |
| 215 MojoResult error_result = MOJO_RESULT_INTERNAL; |
| 216 for (i = 0; i < num_handles; i++) { |
| 217 // Sending your own handle is not allowed (and, for consistency, returns |
| 218 // "busy"). |
| 219 if (handles[i] == handle) { |
| 220 error_result = MOJO_RESULT_BUSY; |
| 221 break; |
| 222 } |
| 223 |
| 224 HandleTableMap::iterator it = handle_table_.find(handles[i]); |
| 225 if (it == handle_table_.end()) { |
| 226 error_result = MOJO_RESULT_INVALID_ARGUMENT; |
| 227 break; |
| 228 } |
| 229 |
| 230 entries[i] = &it->second; |
| 231 if (entries[i]->busy) { |
| 232 error_result = MOJO_RESULT_BUSY; |
| 233 break; |
| 234 } |
| 235 // Note: By marking the handle as busy here, we're also preventing the |
| 236 // same handle from being sent multiple times in the same message. |
| 237 entries[i]->busy = true; |
| 238 |
| 239 // Try to take the lock. |
| 240 if (!entries[i]->dispatcher->lock().Try()) { |
| 241 // Unset the busy flag (since it won't be unset below). |
| 242 entries[i]->busy = false; |
| 243 error_result = MOJO_RESULT_BUSY; |
| 244 break; |
| 245 } |
| 246 |
| 247 // Hang on to the pointer to the dispatcher (which we'll need to release |
| 248 // the lock without going through the handle table). |
| 249 dispatchers[i] = entries[i]->dispatcher; |
| 250 } |
| 251 if (i < num_handles) { |
| 252 DCHECK_NE(error_result, MOJO_RESULT_INTERNAL); |
| 253 |
| 254 // Unset the busy flags and release the locks. |
| 255 for (uint32_t j = 0; j < i; j++) { |
| 256 DCHECK(entries[j]->busy); |
| 257 entries[j]->busy = false; |
| 258 entries[j]->dispatcher->lock().Release(); |
| 259 } |
| 260 return error_result; |
| 261 } |
| 262 } |
| 263 |
| 264 MojoResult rv = dispatcher->WriteMessage(bytes, num_bytes, |
| 265 &dispatchers, |
| 266 flags); |
| 267 |
| 268 // We need to release the dispatcher locks before we take the handle table |
| 269 // lock. |
| 270 for (uint32_t i = 0; i < num_handles; i++) { |
| 271 dispatchers[i]->lock().AssertAcquired(); |
| 272 dispatchers[i]->lock().Release(); |
| 273 } |
| 274 |
| 275 if (rv == MOJO_RESULT_OK) { |
| 276 base::AutoLock locker(handle_table_lock_); |
| 277 |
| 278 // Succeeded, so the handles should be removed from the handle table. (The |
| 279 // transferring to new dispatchers/closing must have already been done.) |
| 280 for (uint32_t i = 0; i < num_handles; i++) { |
| 281 HandleTableMap::iterator it = handle_table_.find(handles[i]); |
| 282 DCHECK(it != handle_table_.end()); |
| 283 DCHECK(it->second.busy); |
| 284 handle_table_.erase(it); |
| 285 } |
| 286 } else { |
| 287 base::AutoLock locker(handle_table_lock_); |
| 288 |
| 289 // Failed, so the handles should go back to their normal state. |
| 290 for (uint32_t i = 0; i < num_handles; i++) { |
| 291 HandleTableMap::iterator it = handle_table_.find(handles[i]); |
| 292 DCHECK(it != handle_table_.end()); |
| 293 DCHECK(it->second.busy); |
| 294 it->second.busy = false; |
| 295 } |
| 296 } |
| 297 |
| 298 return rv; |
163 } | 299 } |
164 | 300 |
165 MojoResult CoreImpl::ReadMessage( | 301 MojoResult CoreImpl::ReadMessage( |
166 MojoHandle handle, | 302 MojoHandle handle, |
167 void* bytes, uint32_t* num_bytes, | 303 void* bytes, uint32_t* num_bytes, |
168 MojoHandle* handles, uint32_t* num_handles, | 304 MojoHandle* handles, uint32_t* num_handles, |
169 MojoReadMessageFlags flags) { | 305 MojoReadMessageFlags flags) { |
170 scoped_refptr<Dispatcher> dispatcher(GetDispatcher(handle)); | 306 scoped_refptr<Dispatcher> dispatcher(GetDispatcher(handle)); |
171 if (!dispatcher.get()) | 307 if (!dispatcher.get()) |
172 return MOJO_RESULT_INVALID_ARGUMENT; | 308 return MOJO_RESULT_INVALID_ARGUMENT; |
173 | 309 |
174 return dispatcher->ReadMessage(bytes, num_bytes, | 310 uint32_t max_num_dispatchers = 0; |
175 handles, num_handles, | 311 if (num_handles) { |
176 flags); | 312 if (!VerifyUserPointer<uint32_t>(num_handles, 1)) |
| 313 return MOJO_RESULT_INVALID_ARGUMENT; |
| 314 if (!VerifyUserPointer<MojoHandle>(handles, *num_handles)) |
| 315 return MOJO_RESULT_INVALID_ARGUMENT; |
| 316 max_num_dispatchers = *num_handles; |
| 317 } |
| 318 |
| 319 // Easy case: won't receive any handles. |
| 320 if (max_num_dispatchers == 0) |
| 321 return dispatcher->ReadMessage(bytes, num_bytes, 0, NULL, flags); |
| 322 |
| 323 std::vector<scoped_refptr<Dispatcher> > dispatchers; |
| 324 MojoResult rv = dispatcher->ReadMessage(bytes, num_bytes, |
| 325 max_num_dispatchers, &dispatchers, |
| 326 flags); |
| 327 if (!dispatchers.empty()) { |
| 328 DCHECK_EQ(rv, MOJO_RESULT_OK); |
| 329 |
| 330 *num_handles = static_cast<uint32_t>(dispatchers.size()); |
| 331 DCHECK_LE(*num_handles, max_num_dispatchers); |
| 332 |
| 333 base::AutoLock locker(handle_table_lock_); |
| 334 |
| 335 for (size_t i = 0; i < dispatchers.size(); i++) { |
| 336 // TODO(vtl): What should we do if we hit the maximum handle table size |
| 337 // here? Currently, we'll just fill in those handles with |
| 338 // |MOJO_HANDLE_INVALID| (and return success anyway). |
| 339 handles[i] = AddDispatcherNoLock(dispatchers[i]); |
| 340 } |
| 341 } |
| 342 |
| 343 return rv; |
177 } | 344 } |
178 | 345 |
179 CoreImpl::CoreImpl() | 346 CoreImpl::CoreImpl() |
180 : next_handle_(MOJO_HANDLE_INVALID + 1) { | 347 : next_handle_(MOJO_HANDLE_INVALID + 1) { |
181 } | 348 } |
182 | 349 |
183 CoreImpl::~CoreImpl() { | 350 CoreImpl::~CoreImpl() { |
184 // This should usually not be reached (the singleton lives forever), except | 351 // This should usually not be reached (the singleton lives forever), except |
185 // in tests. | 352 // in tests. |
186 } | 353 } |
187 | 354 |
188 scoped_refptr<Dispatcher> CoreImpl::GetDispatcher(MojoHandle handle) { | 355 scoped_refptr<Dispatcher> CoreImpl::GetDispatcher(MojoHandle handle) { |
189 if (handle == MOJO_HANDLE_INVALID) | 356 if (handle == MOJO_HANDLE_INVALID) |
190 return NULL; | 357 return NULL; |
191 | 358 |
192 base::AutoLock locker(handle_table_lock_); | 359 base::AutoLock locker(handle_table_lock_); |
193 HandleTableMap::iterator it = handle_table_.find(handle); | 360 HandleTableMap::iterator it = handle_table_.find(handle); |
194 if (it == handle_table_.end()) | 361 if (it == handle_table_.end()) |
195 return NULL; | 362 return NULL; |
196 | 363 |
197 return it->second; | 364 return it->second.dispatcher; |
198 } | 365 } |
199 | 366 |
200 MojoHandle CoreImpl::AddDispatcherNoLock(scoped_refptr<Dispatcher> dispatcher) { | 367 MojoHandle CoreImpl::AddDispatcherNoLock( |
| 368 const scoped_refptr<Dispatcher>& dispatcher) { |
201 DCHECK(dispatcher.get()); | 369 DCHECK(dispatcher.get()); |
202 handle_table_lock_.AssertAcquired(); | 370 handle_table_lock_.AssertAcquired(); |
203 DCHECK_NE(next_handle_, MOJO_HANDLE_INVALID); | 371 DCHECK_NE(next_handle_, MOJO_HANDLE_INVALID); |
204 | 372 |
205 if (handle_table_.size() >= kMaxHandleTableSize) | 373 if (handle_table_.size() >= kMaxHandleTableSize) |
206 return MOJO_HANDLE_INVALID; | 374 return MOJO_HANDLE_INVALID; |
207 | 375 |
208 // TODO(vtl): Maybe we want to do something different/smarter. (Or maybe try | 376 // TODO(vtl): Maybe we want to do something different/smarter. (Or maybe try |
209 // assigning randomly?) | 377 // assigning randomly?) |
210 while (handle_table_.find(next_handle_) != handle_table_.end()) { | 378 while (handle_table_.find(next_handle_) != handle_table_.end()) { |
211 next_handle_++; | 379 next_handle_++; |
212 if (next_handle_ == MOJO_HANDLE_INVALID) | 380 if (next_handle_ == MOJO_HANDLE_INVALID) |
213 next_handle_++; | 381 next_handle_++; |
214 } | 382 } |
215 | 383 |
216 MojoHandle new_handle = next_handle_; | 384 MojoHandle new_handle = next_handle_; |
217 handle_table_[new_handle] = dispatcher; | 385 handle_table_[new_handle] = HandleTableEntry(dispatcher); |
218 | 386 |
219 next_handle_++; | 387 next_handle_++; |
220 if (next_handle_ == MOJO_HANDLE_INVALID) | 388 if (next_handle_ == MOJO_HANDLE_INVALID) |
221 next_handle_++; | 389 next_handle_++; |
222 | 390 |
223 return new_handle; | 391 return new_handle; |
224 } | 392 } |
225 | 393 |
226 // Note: We allow |handles| to repeat the same handle multiple times, since | 394 // Note: We allow |handles| to repeat the same handle multiple times, since |
227 // different flags may be specified. | 395 // different flags may be specified. |
228 // TODO(vtl): This incurs a performance cost in |RemoveWaiter()|. Analyze this | 396 // TODO(vtl): This incurs a performance cost in |RemoveWaiter()|. Analyze this |
229 // more carefully and address it if necessary. | 397 // more carefully and address it if necessary. |
230 MojoResult CoreImpl::WaitManyInternal(const MojoHandle* handles, | 398 MojoResult CoreImpl::WaitManyInternal(const MojoHandle* handles, |
231 const MojoWaitFlags* flags, | 399 const MojoWaitFlags* flags, |
232 uint32_t num_handles, | 400 uint32_t num_handles, |
233 MojoDeadline deadline) { | 401 MojoDeadline deadline) { |
234 DCHECK_GT(num_handles, 0u); | 402 DCHECK_GT(num_handles, 0u); |
235 | 403 |
236 std::vector<scoped_refptr<Dispatcher> > dispatchers; | 404 std::vector<scoped_refptr<Dispatcher> > dispatchers; |
237 dispatchers.reserve(num_handles); | 405 dispatchers.reserve(num_handles); |
238 for (uint32_t i = 0; i < num_handles; i++) { | 406 for (uint32_t i = 0; i < num_handles; i++) { |
239 scoped_refptr<Dispatcher> d = GetDispatcher(handles[i]); | 407 scoped_refptr<Dispatcher> dispatcher = GetDispatcher(handles[i]); |
240 if (!d.get()) | 408 if (!dispatcher.get()) |
241 return MOJO_RESULT_INVALID_ARGUMENT; | 409 return MOJO_RESULT_INVALID_ARGUMENT; |
242 dispatchers.push_back(d); | 410 dispatchers.push_back(dispatcher); |
243 } | 411 } |
244 | 412 |
245 // TODO(vtl): Should make the waiter live (permanently) in TLS. | 413 // TODO(vtl): Should make the waiter live (permanently) in TLS. |
246 Waiter waiter; | 414 Waiter waiter; |
247 waiter.Init(); | 415 waiter.Init(); |
248 | 416 |
249 uint32_t i; | 417 uint32_t i; |
250 MojoResult rv = MOJO_RESULT_OK; | 418 MojoResult rv = MOJO_RESULT_OK; |
251 for (i = 0; i < num_handles; i++) { | 419 for (i = 0; i < num_handles; i++) { |
252 rv = dispatchers[i]->AddWaiter(&waiter, | 420 rv = dispatchers[i]->AddWaiter(&waiter, |
(...skipping 13 matching lines...) Expand all Loading... |
266 // |Wait()|/|WaitMany()| call. (Only after doing this can |waiter| be | 434 // |Wait()|/|WaitMany()| call. (Only after doing this can |waiter| be |
267 // destroyed, but this would still be required if the waiter were in TLS.) | 435 // destroyed, but this would still be required if the waiter were in TLS.) |
268 for (i = 0; i < num_added; i++) | 436 for (i = 0; i < num_added; i++) |
269 dispatchers[i]->RemoveWaiter(&waiter); | 437 dispatchers[i]->RemoveWaiter(&waiter); |
270 | 438 |
271 return rv; | 439 return rv; |
272 } | 440 } |
273 | 441 |
274 } // namespace system | 442 } // namespace system |
275 } // namespace mojo | 443 } // namespace mojo |
OLD | NEW |