| OLD | NEW |
| (Empty) |
| 1 // Copyright 2013 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "mojo/system/core_impl.h" | |
| 6 | |
| 7 #include <vector> | |
| 8 | |
| 9 #include "base/logging.h" | |
| 10 #include "base/time/time.h" | |
| 11 #include "mojo/system/constants.h" | |
| 12 #include "mojo/system/data_pipe.h" | |
| 13 #include "mojo/system/data_pipe_consumer_dispatcher.h" | |
| 14 #include "mojo/system/data_pipe_producer_dispatcher.h" | |
| 15 #include "mojo/system/dispatcher.h" | |
| 16 #include "mojo/system/local_data_pipe.h" | |
| 17 #include "mojo/system/memory.h" | |
| 18 #include "mojo/system/message_pipe.h" | |
| 19 #include "mojo/system/message_pipe_dispatcher.h" | |
| 20 #include "mojo/system/raw_shared_buffer.h" | |
| 21 #include "mojo/system/shared_buffer_dispatcher.h" | |
| 22 #include "mojo/system/waiter.h" | |
| 23 | |
| 24 namespace mojo { | |
| 25 namespace system { | |
| 26 | |
| 27 // Implementation notes | |
| 28 // | |
| 29 // Mojo primitives are implemented by the singleton |CoreImpl| object. Most | |
| 30 // calls are for a "primary" handle (the first argument). | |
| 31 // |CoreImpl::GetDispatcher()| is used to look up a |Dispatcher| object for a | |
| 32 // given handle. That object implements most primitives for that object. The | |
| 33 // wait primitives are not attached to objects and are implemented by |CoreImpl| | |
| 34 // itself. | |
| 35 // | |
| 36 // Some objects have multiple handles associated to them, e.g., message pipes | |
| 37 // (which have two). In such a case, there is still a |Dispatcher| (e.g., | |
| 38 // |MessagePipeDispatcher|) for each handle, with each handle having a strong | |
| 39 // reference to the common "secondary" object (e.g., |MessagePipe|). This | |
| 40 // secondary object does NOT have any references to the |Dispatcher|s (even if | |
| 41 // it did, it wouldn't be able to do anything with them due to lock order | |
| 42 // requirements -- see below). | |
| 43 // | |
| 44 // Waiting is implemented by having the thread that wants to wait call the | |
| 45 // |Dispatcher|s for the handles that it wants to wait on with a |Waiter| | |
| 46 // object; this |Waiter| object may be created on the stack of that thread or be | |
| 47 // kept in thread local storage for that thread (TODO(vtl): future improvement). | |
| 48 // The |Dispatcher| then adds the |Waiter| to a |WaiterList| that's either owned | |
| 49 // by that |Dispatcher| (see |SimpleDispatcher|) or by a secondary object (e.g., | |
| 50 // |MessagePipe|). To signal/wake a |Waiter|, the object in question -- either a | |
| 51 // |SimpleDispatcher| or a secondary object -- talks to its |WaiterList|. | |
| 52 | |
| 53 // Thread-safety notes | |
| 54 // | |
| 55 // Mojo primitives calls are thread-safe. We achieve this with relatively | |
| 56 // fine-grained locking. There is a global handle table lock. This lock should | |
| 57 // be held as briefly as possible (TODO(vtl): a future improvement would be to | |
| 58 // switch it to a reader-writer lock). Each |Dispatcher| object then has a lock | |
| 59 // (which subclasses can use to protect their data). | |
| 60 // | |
| 61 // The lock ordering is as follows: | |
| 62 // 1. global handle table lock, global mapping table lock | |
| 63 // 2. |Dispatcher| locks | |
| 64 // 3. secondary object locks | |
| 65 // ... | |
| 66 // INF. |Waiter| locks | |
| 67 // | |
| 68 // Notes: | |
| 69 // - While holding a |Dispatcher| lock, you may not unconditionally attempt | |
| 70 // to take another |Dispatcher| lock. (This has consequences on the | |
| 71 // concurrency semantics of |MojoWriteMessage()| when passing handles.) | |
| 72 // Doing so would lead to deadlock. | |
| 73 // - Locks at the "INF" level may not have any locks taken while they are | |
| 74 // held. | |
| 75 | |
| 76 CoreImpl::HandleTableEntry::HandleTableEntry() | |
| 77 : busy(false) { | |
| 78 } | |
| 79 | |
| 80 CoreImpl::HandleTableEntry::HandleTableEntry( | |
| 81 const scoped_refptr<Dispatcher>& dispatcher) | |
| 82 : dispatcher(dispatcher), | |
| 83 busy(false) { | |
| 84 } | |
| 85 | |
| 86 CoreImpl::HandleTableEntry::~HandleTableEntry() { | |
| 87 DCHECK(!busy); | |
| 88 } | |
| 89 | |
| 90 CoreImpl::CoreImpl() { | |
| 91 } | |
| 92 | |
| 93 CoreImpl::~CoreImpl() { | |
| 94 // This should usually not be reached (the singleton lives forever), except in | |
| 95 // tests. | |
| 96 } | |
| 97 | |
| 98 MojoHandle CoreImpl::AddDispatcher( | |
| 99 const scoped_refptr<Dispatcher>& dispatcher) { | |
| 100 base::AutoLock locker(handle_table_lock_); | |
| 101 return handle_table_.AddDispatcher(dispatcher); | |
| 102 } | |
| 103 | |
| 104 MojoTimeTicks CoreImpl::GetTimeTicksNow() { | |
| 105 return base::TimeTicks::Now().ToInternalValue(); | |
| 106 } | |
| 107 | |
| 108 MojoResult CoreImpl::Close(MojoHandle handle) { | |
| 109 if (handle == MOJO_HANDLE_INVALID) | |
| 110 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 111 | |
| 112 scoped_refptr<Dispatcher> dispatcher; | |
| 113 { | |
| 114 base::AutoLock locker(handle_table_lock_); | |
| 115 MojoResult result = handle_table_.GetAndRemoveDispatcher(handle, | |
| 116 &dispatcher); | |
| 117 if (result != MOJO_RESULT_OK) | |
| 118 return result; | |
| 119 } | |
| 120 | |
| 121 // The dispatcher doesn't have a say in being closed, but gets notified of it. | |
| 122 // Note: This is done outside of |handle_table_lock_|. As a result, there's a | |
| 123 // race condition that the dispatcher must handle; see the comment in | |
| 124 // |Dispatcher| in dispatcher.h. | |
| 125 return dispatcher->Close(); | |
| 126 } | |
| 127 | |
| 128 MojoResult CoreImpl::Wait(MojoHandle handle, | |
| 129 MojoWaitFlags flags, | |
| 130 MojoDeadline deadline) { | |
| 131 return WaitManyInternal(&handle, &flags, 1, deadline); | |
| 132 } | |
| 133 | |
| 134 MojoResult CoreImpl::WaitMany(const MojoHandle* handles, | |
| 135 const MojoWaitFlags* flags, | |
| 136 uint32_t num_handles, | |
| 137 MojoDeadline deadline) { | |
| 138 if (!VerifyUserPointer<MojoHandle>(handles, num_handles)) | |
| 139 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 140 if (!VerifyUserPointer<MojoWaitFlags>(flags, num_handles)) | |
| 141 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 142 if (num_handles < 1) | |
| 143 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 144 if (num_handles > kMaxWaitManyNumHandles) | |
| 145 return MOJO_RESULT_RESOURCE_EXHAUSTED; | |
| 146 return WaitManyInternal(handles, flags, num_handles, deadline); | |
| 147 } | |
| 148 | |
| 149 MojoResult CoreImpl::CreateMessagePipe(MojoHandle* message_pipe_handle0, | |
| 150 MojoHandle* message_pipe_handle1) { | |
| 151 if (!VerifyUserPointer<MojoHandle>(message_pipe_handle0, 1)) | |
| 152 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 153 if (!VerifyUserPointer<MojoHandle>(message_pipe_handle1, 1)) | |
| 154 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 155 | |
| 156 scoped_refptr<MessagePipeDispatcher> dispatcher0(new MessagePipeDispatcher()); | |
| 157 scoped_refptr<MessagePipeDispatcher> dispatcher1(new MessagePipeDispatcher()); | |
| 158 | |
| 159 std::pair<MojoHandle, MojoHandle> handle_pair; | |
| 160 { | |
| 161 base::AutoLock locker(handle_table_lock_); | |
| 162 handle_pair = handle_table_.AddDispatcherPair(dispatcher0, dispatcher1); | |
| 163 } | |
| 164 if (handle_pair.first == MOJO_HANDLE_INVALID) { | |
| 165 DCHECK_EQ(handle_pair.second, MOJO_HANDLE_INVALID); | |
| 166 LOG(ERROR) << "Handle table full"; | |
| 167 dispatcher0->Close(); | |
| 168 dispatcher1->Close(); | |
| 169 return MOJO_RESULT_RESOURCE_EXHAUSTED; | |
| 170 } | |
| 171 | |
| 172 scoped_refptr<MessagePipe> message_pipe(new MessagePipe()); | |
| 173 dispatcher0->Init(message_pipe, 0); | |
| 174 dispatcher1->Init(message_pipe, 1); | |
| 175 | |
| 176 *message_pipe_handle0 = handle_pair.first; | |
| 177 *message_pipe_handle1 = handle_pair.second; | |
| 178 return MOJO_RESULT_OK; | |
| 179 } | |
| 180 | |
| 181 // Implementation note: To properly cancel waiters and avoid other races, this | |
| 182 // does not transfer dispatchers from one handle to another, even when sending a | |
| 183 // message in-process. Instead, it must transfer the "contents" of the | |
| 184 // dispatcher to a new dispatcher, and then close the old dispatcher. If this | |
| 185 // isn't done, in the in-process case, calls on the old handle may complete | |
| 186 // after the the message has been received and a new handle created (and | |
| 187 // possibly even after calls have been made on the new handle). | |
| 188 MojoResult CoreImpl::WriteMessage(MojoHandle message_pipe_handle, | |
| 189 const void* bytes, | |
| 190 uint32_t num_bytes, | |
| 191 const MojoHandle* handles, | |
| 192 uint32_t num_handles, | |
| 193 MojoWriteMessageFlags flags) { | |
| 194 scoped_refptr<Dispatcher> dispatcher(GetDispatcher(message_pipe_handle)); | |
| 195 if (!dispatcher.get()) | |
| 196 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 197 | |
| 198 // Easy case: not sending any handles. | |
| 199 if (num_handles == 0) | |
| 200 return dispatcher->WriteMessage(bytes, num_bytes, NULL, flags); | |
| 201 | |
| 202 // We have to handle |handles| here, since we have to mark them busy in the | |
| 203 // global handle table. We can't delegate this to the dispatcher, since the | |
| 204 // handle table lock must be acquired before the dispatcher lock. | |
| 205 // | |
| 206 // (This leads to an oddity: |handles|/|num_handles| are always verified for | |
| 207 // validity, even for dispatchers that don't support |WriteMessage()| and will | |
| 208 // simply return failure unconditionally. It also breaks the usual | |
| 209 // left-to-right verification order of arguments.) | |
| 210 if (!VerifyUserPointer<MojoHandle>(handles, num_handles)) | |
| 211 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 212 if (num_handles > kMaxMessageNumHandles) | |
| 213 return MOJO_RESULT_RESOURCE_EXHAUSTED; | |
| 214 | |
| 215 // We'll need to hold on to the dispatchers so that we can pass them on to | |
| 216 // |WriteMessage()| and also so that we can unlock their locks afterwards | |
| 217 // without accessing the handle table. These can be dumb pointers, since their | |
| 218 // entries in the handle table won't get removed (since they'll be marked as | |
| 219 // busy). | |
| 220 std::vector<DispatcherTransport> transports(num_handles); | |
| 221 | |
| 222 // When we pass handles, we have to try to take all their dispatchers' locks | |
| 223 // and mark the handles as busy. If the call succeeds, we then remove the | |
| 224 // handles from the handle table. | |
| 225 { | |
| 226 base::AutoLock locker(handle_table_lock_); | |
| 227 MojoResult result = handle_table_.MarkBusyAndStartTransport( | |
| 228 message_pipe_handle, handles, num_handles, &transports); | |
| 229 if (result != MOJO_RESULT_OK) | |
| 230 return result; | |
| 231 } | |
| 232 | |
| 233 MojoResult rv = dispatcher->WriteMessage(bytes, num_bytes, &transports, | |
| 234 flags); | |
| 235 | |
| 236 // We need to release the dispatcher locks before we take the handle table | |
| 237 // lock. | |
| 238 for (uint32_t i = 0; i < num_handles; i++) | |
| 239 transports[i].End(); | |
| 240 | |
| 241 { | |
| 242 base::AutoLock locker(handle_table_lock_); | |
| 243 if (rv == MOJO_RESULT_OK) | |
| 244 handle_table_.RemoveBusyHandles(handles, num_handles); | |
| 245 else | |
| 246 handle_table_.RestoreBusyHandles(handles, num_handles); | |
| 247 } | |
| 248 | |
| 249 return rv; | |
| 250 } | |
| 251 | |
| 252 MojoResult CoreImpl::ReadMessage(MojoHandle message_pipe_handle, | |
| 253 void* bytes, | |
| 254 uint32_t* num_bytes, | |
| 255 MojoHandle* handles, | |
| 256 uint32_t* num_handles, | |
| 257 MojoReadMessageFlags flags) { | |
| 258 scoped_refptr<Dispatcher> dispatcher(GetDispatcher(message_pipe_handle)); | |
| 259 if (!dispatcher.get()) | |
| 260 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 261 | |
| 262 if (num_handles) { | |
| 263 if (!VerifyUserPointer<uint32_t>(num_handles, 1)) | |
| 264 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 265 if (!VerifyUserPointer<MojoHandle>(handles, *num_handles)) | |
| 266 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 267 } | |
| 268 | |
| 269 // Easy case: won't receive any handles. | |
| 270 if (!num_handles || *num_handles == 0) | |
| 271 return dispatcher->ReadMessage(bytes, num_bytes, NULL, num_handles, flags); | |
| 272 | |
| 273 std::vector<scoped_refptr<Dispatcher> > dispatchers; | |
| 274 MojoResult rv = dispatcher->ReadMessage(bytes, num_bytes, | |
| 275 &dispatchers, num_handles, | |
| 276 flags); | |
| 277 if (!dispatchers.empty()) { | |
| 278 DCHECK_EQ(rv, MOJO_RESULT_OK); | |
| 279 DCHECK(num_handles); | |
| 280 DCHECK_LE(dispatchers.size(), static_cast<size_t>(*num_handles)); | |
| 281 | |
| 282 bool success; | |
| 283 { | |
| 284 base::AutoLock locker(handle_table_lock_); | |
| 285 success = handle_table_.AddDispatcherVector(dispatchers, handles); | |
| 286 } | |
| 287 if (!success) { | |
| 288 LOG(ERROR) << "Received message with " << dispatchers.size() | |
| 289 << " handles, but handle table full"; | |
| 290 // Close dispatchers (outside the lock). | |
| 291 for (size_t i = 0; i < dispatchers.size(); i++) { | |
| 292 if (dispatchers[i]) | |
| 293 dispatchers[i]->Close(); | |
| 294 } | |
| 295 } | |
| 296 } | |
| 297 | |
| 298 return rv; | |
| 299 } | |
| 300 | |
| 301 MojoResult CoreImpl::CreateDataPipe(const MojoCreateDataPipeOptions* options, | |
| 302 MojoHandle* data_pipe_producer_handle, | |
| 303 MojoHandle* data_pipe_consumer_handle) { | |
| 304 if (options) { | |
| 305 // The |struct_size| field must be valid to read. | |
| 306 if (!VerifyUserPointer<uint32_t>(&options->struct_size, 1)) | |
| 307 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 308 // And then |options| must point to at least |options->struct_size| bytes. | |
| 309 if (!VerifyUserPointer<void>(options, options->struct_size)) | |
| 310 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 311 } | |
| 312 if (!VerifyUserPointer<MojoHandle>(data_pipe_producer_handle, 1)) | |
| 313 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 314 if (!VerifyUserPointer<MojoHandle>(data_pipe_consumer_handle, 1)) | |
| 315 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 316 | |
| 317 MojoCreateDataPipeOptions validated_options = { 0 }; | |
| 318 MojoResult result = DataPipe::ValidateOptions(options, &validated_options); | |
| 319 if (result != MOJO_RESULT_OK) | |
| 320 return result; | |
| 321 | |
| 322 scoped_refptr<DataPipeProducerDispatcher> producer_dispatcher( | |
| 323 new DataPipeProducerDispatcher()); | |
| 324 scoped_refptr<DataPipeConsumerDispatcher> consumer_dispatcher( | |
| 325 new DataPipeConsumerDispatcher()); | |
| 326 | |
| 327 std::pair<MojoHandle, MojoHandle> handle_pair; | |
| 328 { | |
| 329 base::AutoLock locker(handle_table_lock_); | |
| 330 handle_pair = handle_table_.AddDispatcherPair(producer_dispatcher, | |
| 331 consumer_dispatcher); | |
| 332 } | |
| 333 if (handle_pair.first == MOJO_HANDLE_INVALID) { | |
| 334 DCHECK_EQ(handle_pair.second, MOJO_HANDLE_INVALID); | |
| 335 LOG(ERROR) << "Handle table full"; | |
| 336 producer_dispatcher->Close(); | |
| 337 consumer_dispatcher->Close(); | |
| 338 return MOJO_RESULT_RESOURCE_EXHAUSTED; | |
| 339 } | |
| 340 DCHECK_NE(handle_pair.second, MOJO_HANDLE_INVALID); | |
| 341 | |
| 342 scoped_refptr<DataPipe> data_pipe(new LocalDataPipe(validated_options)); | |
| 343 producer_dispatcher->Init(data_pipe); | |
| 344 consumer_dispatcher->Init(data_pipe); | |
| 345 | |
| 346 *data_pipe_producer_handle = handle_pair.first; | |
| 347 *data_pipe_consumer_handle = handle_pair.second; | |
| 348 return MOJO_RESULT_OK; | |
| 349 } | |
| 350 | |
| 351 MojoResult CoreImpl::WriteData(MojoHandle data_pipe_producer_handle, | |
| 352 const void* elements, | |
| 353 uint32_t* num_bytes, | |
| 354 MojoWriteDataFlags flags) { | |
| 355 scoped_refptr<Dispatcher> dispatcher( | |
| 356 GetDispatcher(data_pipe_producer_handle)); | |
| 357 if (!dispatcher.get()) | |
| 358 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 359 | |
| 360 return dispatcher->WriteData(elements, num_bytes, flags); | |
| 361 } | |
| 362 | |
| 363 MojoResult CoreImpl::BeginWriteData(MojoHandle data_pipe_producer_handle, | |
| 364 void** buffer, | |
| 365 uint32_t* buffer_num_bytes, | |
| 366 MojoWriteDataFlags flags) { | |
| 367 scoped_refptr<Dispatcher> dispatcher( | |
| 368 GetDispatcher(data_pipe_producer_handle)); | |
| 369 if (!dispatcher.get()) | |
| 370 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 371 | |
| 372 return dispatcher->BeginWriteData(buffer, buffer_num_bytes, flags); | |
| 373 } | |
| 374 | |
| 375 MojoResult CoreImpl::EndWriteData(MojoHandle data_pipe_producer_handle, | |
| 376 uint32_t num_bytes_written) { | |
| 377 scoped_refptr<Dispatcher> dispatcher( | |
| 378 GetDispatcher(data_pipe_producer_handle)); | |
| 379 if (!dispatcher.get()) | |
| 380 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 381 | |
| 382 return dispatcher->EndWriteData(num_bytes_written); | |
| 383 } | |
| 384 | |
| 385 MojoResult CoreImpl::ReadData(MojoHandle data_pipe_consumer_handle, | |
| 386 void* elements, | |
| 387 uint32_t* num_bytes, | |
| 388 MojoReadDataFlags flags) { | |
| 389 scoped_refptr<Dispatcher> dispatcher( | |
| 390 GetDispatcher(data_pipe_consumer_handle)); | |
| 391 if (!dispatcher.get()) | |
| 392 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 393 | |
| 394 return dispatcher->ReadData(elements, num_bytes, flags); | |
| 395 } | |
| 396 | |
| 397 MojoResult CoreImpl::BeginReadData(MojoHandle data_pipe_consumer_handle, | |
| 398 const void** buffer, | |
| 399 uint32_t* buffer_num_bytes, | |
| 400 MojoReadDataFlags flags) { | |
| 401 scoped_refptr<Dispatcher> dispatcher( | |
| 402 GetDispatcher(data_pipe_consumer_handle)); | |
| 403 if (!dispatcher.get()) | |
| 404 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 405 | |
| 406 return dispatcher->BeginReadData(buffer, buffer_num_bytes, flags); | |
| 407 } | |
| 408 | |
| 409 MojoResult CoreImpl::EndReadData(MojoHandle data_pipe_consumer_handle, | |
| 410 uint32_t num_bytes_read) { | |
| 411 scoped_refptr<Dispatcher> dispatcher( | |
| 412 GetDispatcher(data_pipe_consumer_handle)); | |
| 413 if (!dispatcher.get()) | |
| 414 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 415 | |
| 416 return dispatcher->EndReadData(num_bytes_read); | |
| 417 } | |
| 418 | |
| 419 MojoResult CoreImpl::CreateSharedBuffer( | |
| 420 const MojoCreateSharedBufferOptions* options, | |
| 421 uint64_t num_bytes, | |
| 422 MojoHandle* shared_buffer_handle) { | |
| 423 if (options) { | |
| 424 // The |struct_size| field must be valid to read. | |
| 425 if (!VerifyUserPointer<uint32_t>(&options->struct_size, 1)) | |
| 426 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 427 // And then |options| must point to at least |options->struct_size| bytes. | |
| 428 if (!VerifyUserPointer<void>(options, options->struct_size)) | |
| 429 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 430 } | |
| 431 if (!VerifyUserPointer<MojoHandle>(shared_buffer_handle, 1)) | |
| 432 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 433 | |
| 434 MojoCreateSharedBufferOptions validated_options = { 0 }; | |
| 435 MojoResult result = | |
| 436 SharedBufferDispatcher::ValidateOptions(options, &validated_options); | |
| 437 if (result != MOJO_RESULT_OK) | |
| 438 return result; | |
| 439 | |
| 440 scoped_refptr<SharedBufferDispatcher> dispatcher; | |
| 441 result = SharedBufferDispatcher::Create(validated_options, num_bytes, | |
| 442 &dispatcher); | |
| 443 if (result != MOJO_RESULT_OK) { | |
| 444 DCHECK(!dispatcher); | |
| 445 return result; | |
| 446 } | |
| 447 | |
| 448 MojoHandle h = AddDispatcher(dispatcher); | |
| 449 if (h == MOJO_HANDLE_INVALID) { | |
| 450 LOG(ERROR) << "Handle table full"; | |
| 451 dispatcher->Close(); | |
| 452 return MOJO_RESULT_RESOURCE_EXHAUSTED; | |
| 453 } | |
| 454 | |
| 455 *shared_buffer_handle = h; | |
| 456 return MOJO_RESULT_OK; | |
| 457 } | |
| 458 | |
| 459 MojoResult CoreImpl::DuplicateBufferHandle( | |
| 460 MojoHandle buffer_handle, | |
| 461 const MojoDuplicateBufferHandleOptions* options, | |
| 462 MojoHandle* new_buffer_handle) { | |
| 463 scoped_refptr<Dispatcher> dispatcher(GetDispatcher(buffer_handle)); | |
| 464 if (!dispatcher.get()) | |
| 465 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 466 | |
| 467 // Don't verify |options| here; that's the dispatcher's job. | |
| 468 if (!VerifyUserPointer<MojoHandle>(new_buffer_handle, 1)) | |
| 469 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 470 | |
| 471 scoped_refptr<Dispatcher> new_dispatcher; | |
| 472 MojoResult result = dispatcher->DuplicateBufferHandle(options, | |
| 473 &new_dispatcher); | |
| 474 if (result != MOJO_RESULT_OK) | |
| 475 return result; | |
| 476 | |
| 477 MojoHandle new_handle = AddDispatcher(new_dispatcher); | |
| 478 if (new_handle == MOJO_HANDLE_INVALID) { | |
| 479 LOG(ERROR) << "Handle table full"; | |
| 480 dispatcher->Close(); | |
| 481 return MOJO_RESULT_RESOURCE_EXHAUSTED; | |
| 482 } | |
| 483 | |
| 484 *new_buffer_handle = new_handle; | |
| 485 return MOJO_RESULT_OK; | |
| 486 } | |
| 487 | |
| 488 MojoResult CoreImpl::MapBuffer(MojoHandle buffer_handle, | |
| 489 uint64_t offset, | |
| 490 uint64_t num_bytes, | |
| 491 void** buffer, | |
| 492 MojoMapBufferFlags flags) { | |
| 493 scoped_refptr<Dispatcher> dispatcher(GetDispatcher(buffer_handle)); | |
| 494 if (!dispatcher.get()) | |
| 495 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 496 | |
| 497 if (!VerifyUserPointer<void*>(buffer, 1)) | |
| 498 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 499 | |
| 500 scoped_ptr<RawSharedBufferMapping> mapping; | |
| 501 MojoResult result = dispatcher->MapBuffer(offset, num_bytes, flags, &mapping); | |
| 502 if (result != MOJO_RESULT_OK) | |
| 503 return result; | |
| 504 | |
| 505 DCHECK(mapping); | |
| 506 void* address = mapping->base(); | |
| 507 { | |
| 508 base::AutoLock locker(mapping_table_lock_); | |
| 509 result = mapping_table_.AddMapping(mapping.Pass()); | |
| 510 } | |
| 511 if (result != MOJO_RESULT_OK) | |
| 512 return result; | |
| 513 | |
| 514 *buffer = address; | |
| 515 return MOJO_RESULT_OK; | |
| 516 } | |
| 517 | |
| 518 MojoResult CoreImpl::UnmapBuffer(void* buffer) { | |
| 519 base::AutoLock locker(mapping_table_lock_); | |
| 520 return mapping_table_.RemoveMapping(buffer); | |
| 521 } | |
| 522 | |
| 523 scoped_refptr<Dispatcher> CoreImpl::GetDispatcher(MojoHandle handle) { | |
| 524 if (handle == MOJO_HANDLE_INVALID) | |
| 525 return NULL; | |
| 526 | |
| 527 base::AutoLock locker(handle_table_lock_); | |
| 528 return handle_table_.GetDispatcher(handle); | |
| 529 } | |
| 530 | |
| 531 // Note: We allow |handles| to repeat the same handle multiple times, since | |
| 532 // different flags may be specified. | |
| 533 // TODO(vtl): This incurs a performance cost in |RemoveWaiter()|. Analyze this | |
| 534 // more carefully and address it if necessary. | |
| 535 MojoResult CoreImpl::WaitManyInternal(const MojoHandle* handles, | |
| 536 const MojoWaitFlags* flags, | |
| 537 uint32_t num_handles, | |
| 538 MojoDeadline deadline) { | |
| 539 DCHECK_GT(num_handles, 0u); | |
| 540 | |
| 541 std::vector<scoped_refptr<Dispatcher> > dispatchers; | |
| 542 dispatchers.reserve(num_handles); | |
| 543 for (uint32_t i = 0; i < num_handles; i++) { | |
| 544 scoped_refptr<Dispatcher> dispatcher = GetDispatcher(handles[i]); | |
| 545 if (!dispatcher.get()) | |
| 546 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 547 dispatchers.push_back(dispatcher); | |
| 548 } | |
| 549 | |
| 550 // TODO(vtl): Should make the waiter live (permanently) in TLS. | |
| 551 Waiter waiter; | |
| 552 waiter.Init(); | |
| 553 | |
| 554 uint32_t i; | |
| 555 MojoResult rv = MOJO_RESULT_OK; | |
| 556 for (i = 0; i < num_handles; i++) { | |
| 557 rv = dispatchers[i]->AddWaiter(&waiter, | |
| 558 flags[i], | |
| 559 static_cast<MojoResult>(i)); | |
| 560 if (rv != MOJO_RESULT_OK) | |
| 561 break; | |
| 562 } | |
| 563 uint32_t num_added = i; | |
| 564 | |
| 565 if (rv == MOJO_RESULT_ALREADY_EXISTS) | |
| 566 rv = static_cast<MojoResult>(i); // The i-th one is already "triggered". | |
| 567 else if (rv == MOJO_RESULT_OK) | |
| 568 rv = waiter.Wait(deadline); | |
| 569 | |
| 570 // Make sure no other dispatchers try to wake |waiter| for the current | |
| 571 // |Wait()|/|WaitMany()| call. (Only after doing this can |waiter| be | |
| 572 // destroyed, but this would still be required if the waiter were in TLS.) | |
| 573 for (i = 0; i < num_added; i++) | |
| 574 dispatchers[i]->RemoveWaiter(&waiter); | |
| 575 | |
| 576 return rv; | |
| 577 } | |
| 578 | |
| 579 } // namespace system | |
| 580 } // namespace mojo | |
| OLD | NEW |