| OLD | NEW |
| (Empty) |
| 1 // Copyright 2013 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "mojo/edk/system/core.h" | |
| 6 | |
| 7 #include <vector> | |
| 8 | |
| 9 #include "base/logging.h" | |
| 10 #include "base/time/time.h" | |
| 11 #include "mojo/edk/embedder/platform_shared_buffer.h" | |
| 12 #include "mojo/edk/embedder/platform_support.h" | |
| 13 #include "mojo/edk/system/async_waiter.h" | |
| 14 #include "mojo/edk/system/configuration.h" | |
| 15 #include "mojo/edk/system/data_pipe.h" | |
| 16 #include "mojo/edk/system/data_pipe_consumer_dispatcher.h" | |
| 17 #include "mojo/edk/system/data_pipe_producer_dispatcher.h" | |
| 18 #include "mojo/edk/system/dispatcher.h" | |
| 19 #include "mojo/edk/system/handle_signals_state.h" | |
| 20 #include "mojo/edk/system/local_data_pipe.h" | |
| 21 #include "mojo/edk/system/memory.h" | |
| 22 #include "mojo/edk/system/message_pipe.h" | |
| 23 #include "mojo/edk/system/message_pipe_dispatcher.h" | |
| 24 #include "mojo/edk/system/shared_buffer_dispatcher.h" | |
| 25 #include "mojo/edk/system/waiter.h" | |
| 26 #include "mojo/public/c/system/macros.h" | |
| 27 | |
| 28 namespace mojo { | |
| 29 namespace system { | |
| 30 | |
| 31 // Implementation notes | |
| 32 // | |
| 33 // Mojo primitives are implemented by the singleton |Core| object. Most calls | |
| 34 // are for a "primary" handle (the first argument). |Core::GetDispatcher()| is | |
| 35 // used to look up a |Dispatcher| object for a given handle. That object | |
| 36 // implements most primitives for that object. The wait primitives are not | |
| 37 // attached to objects and are implemented by |Core| itself. | |
| 38 // | |
| 39 // Some objects have multiple handles associated to them, e.g., message pipes | |
| 40 // (which have two). In such a case, there is still a |Dispatcher| (e.g., | |
| 41 // |MessagePipeDispatcher|) for each handle, with each handle having a strong | |
| 42 // reference to the common "secondary" object (e.g., |MessagePipe|). This | |
| 43 // secondary object does NOT have any references to the |Dispatcher|s (even if | |
| 44 // it did, it wouldn't be able to do anything with them due to lock order | |
| 45 // requirements -- see below). | |
| 46 // | |
| 47 // Waiting is implemented by having the thread that wants to wait call the | |
| 48 // |Dispatcher|s for the handles that it wants to wait on with a |Waiter| | |
| 49 // object; this |Waiter| object may be created on the stack of that thread or be | |
| 50 // kept in thread local storage for that thread (TODO(vtl): future improvement). | |
| 51 // The |Dispatcher| then adds the |Waiter| to an |AwakableList| that's either | |
| 52 // owned by that |Dispatcher| (see |SimpleDispatcher|) or by a secondary object | |
| 53 // (e.g., |MessagePipe|). To signal/wake a |Waiter|, the object in question -- | |
| 54 // either a |SimpleDispatcher| or a secondary object -- talks to its | |
| 55 // |AwakableList|. | |
| 56 | |
| 57 // Thread-safety notes | |
| 58 // | |
| 59 // Mojo primitives calls are thread-safe. We achieve this with relatively | |
| 60 // fine-grained locking. There is a global handle table lock. This lock should | |
| 61 // be held as briefly as possible (TODO(vtl): a future improvement would be to | |
| 62 // switch it to a reader-writer lock). Each |Dispatcher| object then has a lock | |
| 63 // (which subclasses can use to protect their data). | |
| 64 // | |
| 65 // The lock ordering is as follows: | |
| 66 // 1. global handle table lock, global mapping table lock | |
| 67 // 2. |Dispatcher| locks | |
| 68 // 3. secondary object locks | |
| 69 // ... | |
| 70 // INF. |Waiter| locks | |
| 71 // | |
| 72 // Notes: | |
| 73 // - While holding a |Dispatcher| lock, you may not unconditionally attempt | |
| 74 // to take another |Dispatcher| lock. (This has consequences on the | |
| 75 // concurrency semantics of |MojoWriteMessage()| when passing handles.) | |
| 76 // Doing so would lead to deadlock. | |
| 77 // - Locks at the "INF" level may not have any locks taken while they are | |
| 78 // held. | |
| 79 | |
| 80 // TODO(vtl): This should take a |scoped_ptr<PlatformSupport>| as a parameter. | |
| 81 Core::Core(scoped_ptr<embedder::PlatformSupport> platform_support) | |
| 82 : platform_support_(platform_support.Pass()) { | |
| 83 } | |
| 84 | |
| 85 Core::~Core() { | |
| 86 } | |
| 87 | |
| 88 MojoHandle Core::AddDispatcher(const scoped_refptr<Dispatcher>& dispatcher) { | |
| 89 base::AutoLock locker(handle_table_lock_); | |
| 90 return handle_table_.AddDispatcher(dispatcher); | |
| 91 } | |
| 92 | |
| 93 scoped_refptr<Dispatcher> Core::GetDispatcher(MojoHandle handle) { | |
| 94 if (handle == MOJO_HANDLE_INVALID) | |
| 95 return nullptr; | |
| 96 | |
| 97 base::AutoLock locker(handle_table_lock_); | |
| 98 return handle_table_.GetDispatcher(handle); | |
| 99 } | |
| 100 | |
| 101 MojoResult Core::AsyncWait(MojoHandle handle, | |
| 102 MojoHandleSignals signals, | |
| 103 base::Callback<void(MojoResult)> callback) { | |
| 104 scoped_refptr<Dispatcher> dispatcher = GetDispatcher(handle); | |
| 105 DCHECK(dispatcher); | |
| 106 | |
| 107 scoped_ptr<AsyncWaiter> waiter = make_scoped_ptr(new AsyncWaiter(callback)); | |
| 108 MojoResult rv = dispatcher->AddAwakable(waiter.get(), signals, 0, nullptr); | |
| 109 if (rv == MOJO_RESULT_OK) | |
| 110 ignore_result(waiter.release()); | |
| 111 return rv; | |
| 112 } | |
| 113 | |
| 114 MojoTimeTicks Core::GetTimeTicksNow() { | |
| 115 return base::TimeTicks::Now().ToInternalValue(); | |
| 116 } | |
| 117 | |
| 118 MojoResult Core::Close(MojoHandle handle) { | |
| 119 if (handle == MOJO_HANDLE_INVALID) | |
| 120 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 121 | |
| 122 scoped_refptr<Dispatcher> dispatcher; | |
| 123 { | |
| 124 base::AutoLock locker(handle_table_lock_); | |
| 125 MojoResult result = | |
| 126 handle_table_.GetAndRemoveDispatcher(handle, &dispatcher); | |
| 127 if (result != MOJO_RESULT_OK) | |
| 128 return result; | |
| 129 } | |
| 130 | |
| 131 // The dispatcher doesn't have a say in being closed, but gets notified of it. | |
| 132 // Note: This is done outside of |handle_table_lock_|. As a result, there's a | |
| 133 // race condition that the dispatcher must handle; see the comment in | |
| 134 // |Dispatcher| in dispatcher.h. | |
| 135 return dispatcher->Close(); | |
| 136 } | |
| 137 | |
| 138 MojoResult Core::Wait(MojoHandle handle, | |
| 139 MojoHandleSignals signals, | |
| 140 MojoDeadline deadline, | |
| 141 UserPointer<MojoHandleSignalsState> signals_state) { | |
| 142 uint32_t unused = static_cast<uint32_t>(-1); | |
| 143 HandleSignalsState hss; | |
| 144 MojoResult rv = WaitManyInternal(&handle, &signals, 1, deadline, &unused, | |
| 145 signals_state.IsNull() ? nullptr : &hss); | |
| 146 if (rv != MOJO_RESULT_INVALID_ARGUMENT && !signals_state.IsNull()) | |
| 147 signals_state.Put(hss); | |
| 148 return rv; | |
| 149 } | |
| 150 | |
| 151 MojoResult Core::WaitMany(UserPointer<const MojoHandle> handles, | |
| 152 UserPointer<const MojoHandleSignals> signals, | |
| 153 uint32_t num_handles, | |
| 154 MojoDeadline deadline, | |
| 155 UserPointer<uint32_t> result_index, | |
| 156 UserPointer<MojoHandleSignalsState> signals_states) { | |
| 157 if (num_handles < 1) | |
| 158 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 159 if (num_handles > GetConfiguration().max_wait_many_num_handles) | |
| 160 return MOJO_RESULT_RESOURCE_EXHAUSTED; | |
| 161 | |
| 162 UserPointer<const MojoHandle>::Reader handles_reader(handles, num_handles); | |
| 163 UserPointer<const MojoHandleSignals>::Reader signals_reader(signals, | |
| 164 num_handles); | |
| 165 uint32_t index = static_cast<uint32_t>(-1); | |
| 166 MojoResult rv; | |
| 167 if (signals_states.IsNull()) { | |
| 168 rv = WaitManyInternal(handles_reader.GetPointer(), | |
| 169 signals_reader.GetPointer(), num_handles, deadline, | |
| 170 &index, nullptr); | |
| 171 } else { | |
| 172 UserPointer<MojoHandleSignalsState>::Writer signals_states_writer( | |
| 173 signals_states, num_handles); | |
| 174 // Note: The |reinterpret_cast| is safe, since |HandleSignalsState| is a | |
| 175 // subclass of |MojoHandleSignalsState| that doesn't add any data members. | |
| 176 rv = WaitManyInternal(handles_reader.GetPointer(), | |
| 177 signals_reader.GetPointer(), num_handles, deadline, | |
| 178 &index, reinterpret_cast<HandleSignalsState*>( | |
| 179 signals_states_writer.GetPointer())); | |
| 180 if (rv != MOJO_RESULT_INVALID_ARGUMENT) | |
| 181 signals_states_writer.Commit(); | |
| 182 } | |
| 183 if (index != static_cast<uint32_t>(-1) && !result_index.IsNull()) | |
| 184 result_index.Put(index); | |
| 185 return rv; | |
| 186 } | |
| 187 | |
| 188 MojoResult Core::CreateMessagePipe( | |
| 189 UserPointer<const MojoCreateMessagePipeOptions> options, | |
| 190 UserPointer<MojoHandle> message_pipe_handle0, | |
| 191 UserPointer<MojoHandle> message_pipe_handle1) { | |
| 192 MojoCreateMessagePipeOptions validated_options = {}; | |
| 193 MojoResult result = | |
| 194 MessagePipeDispatcher::ValidateCreateOptions(options, &validated_options); | |
| 195 if (result != MOJO_RESULT_OK) | |
| 196 return result; | |
| 197 | |
| 198 scoped_refptr<MessagePipeDispatcher> dispatcher0( | |
| 199 new MessagePipeDispatcher(validated_options)); | |
| 200 scoped_refptr<MessagePipeDispatcher> dispatcher1( | |
| 201 new MessagePipeDispatcher(validated_options)); | |
| 202 | |
| 203 std::pair<MojoHandle, MojoHandle> handle_pair; | |
| 204 { | |
| 205 base::AutoLock locker(handle_table_lock_); | |
| 206 handle_pair = handle_table_.AddDispatcherPair(dispatcher0, dispatcher1); | |
| 207 } | |
| 208 if (handle_pair.first == MOJO_HANDLE_INVALID) { | |
| 209 DCHECK_EQ(handle_pair.second, MOJO_HANDLE_INVALID); | |
| 210 LOG(ERROR) << "Handle table full"; | |
| 211 dispatcher0->Close(); | |
| 212 dispatcher1->Close(); | |
| 213 return MOJO_RESULT_RESOURCE_EXHAUSTED; | |
| 214 } | |
| 215 | |
| 216 scoped_refptr<MessagePipe> message_pipe(MessagePipe::CreateLocalLocal()); | |
| 217 dispatcher0->Init(message_pipe, 0); | |
| 218 dispatcher1->Init(message_pipe, 1); | |
| 219 | |
| 220 message_pipe_handle0.Put(handle_pair.first); | |
| 221 message_pipe_handle1.Put(handle_pair.second); | |
| 222 return MOJO_RESULT_OK; | |
| 223 } | |
| 224 | |
| 225 // Implementation note: To properly cancel waiters and avoid other races, this | |
| 226 // does not transfer dispatchers from one handle to another, even when sending a | |
| 227 // message in-process. Instead, it must transfer the "contents" of the | |
| 228 // dispatcher to a new dispatcher, and then close the old dispatcher. If this | |
| 229 // isn't done, in the in-process case, calls on the old handle may complete | |
| 230 // after the the message has been received and a new handle created (and | |
| 231 // possibly even after calls have been made on the new handle). | |
| 232 MojoResult Core::WriteMessage(MojoHandle message_pipe_handle, | |
| 233 UserPointer<const void> bytes, | |
| 234 uint32_t num_bytes, | |
| 235 UserPointer<const MojoHandle> handles, | |
| 236 uint32_t num_handles, | |
| 237 MojoWriteMessageFlags flags) { | |
| 238 scoped_refptr<Dispatcher> dispatcher(GetDispatcher(message_pipe_handle)); | |
| 239 if (!dispatcher) | |
| 240 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 241 | |
| 242 // Easy case: not sending any handles. | |
| 243 if (num_handles == 0) | |
| 244 return dispatcher->WriteMessage(bytes, num_bytes, nullptr, flags); | |
| 245 | |
| 246 // We have to handle |handles| here, since we have to mark them busy in the | |
| 247 // global handle table. We can't delegate this to the dispatcher, since the | |
| 248 // handle table lock must be acquired before the dispatcher lock. | |
| 249 // | |
| 250 // (This leads to an oddity: |handles|/|num_handles| are always verified for | |
| 251 // validity, even for dispatchers that don't support |WriteMessage()| and will | |
| 252 // simply return failure unconditionally. It also breaks the usual | |
| 253 // left-to-right verification order of arguments.) | |
| 254 if (num_handles > GetConfiguration().max_message_num_handles) | |
| 255 return MOJO_RESULT_RESOURCE_EXHAUSTED; | |
| 256 | |
| 257 UserPointer<const MojoHandle>::Reader handles_reader(handles, num_handles); | |
| 258 | |
| 259 // We'll need to hold on to the dispatchers so that we can pass them on to | |
| 260 // |WriteMessage()| and also so that we can unlock their locks afterwards | |
| 261 // without accessing the handle table. These can be dumb pointers, since their | |
| 262 // entries in the handle table won't get removed (since they'll be marked as | |
| 263 // busy). | |
| 264 std::vector<DispatcherTransport> transports(num_handles); | |
| 265 | |
| 266 // When we pass handles, we have to try to take all their dispatchers' locks | |
| 267 // and mark the handles as busy. If the call succeeds, we then remove the | |
| 268 // handles from the handle table. | |
| 269 { | |
| 270 base::AutoLock locker(handle_table_lock_); | |
| 271 MojoResult result = handle_table_.MarkBusyAndStartTransport( | |
| 272 message_pipe_handle, handles_reader.GetPointer(), num_handles, | |
| 273 &transports); | |
| 274 if (result != MOJO_RESULT_OK) | |
| 275 return result; | |
| 276 } | |
| 277 | |
| 278 MojoResult rv = | |
| 279 dispatcher->WriteMessage(bytes, num_bytes, &transports, flags); | |
| 280 | |
| 281 // We need to release the dispatcher locks before we take the handle table | |
| 282 // lock. | |
| 283 for (uint32_t i = 0; i < num_handles; i++) | |
| 284 transports[i].End(); | |
| 285 | |
| 286 { | |
| 287 base::AutoLock locker(handle_table_lock_); | |
| 288 if (rv == MOJO_RESULT_OK) { | |
| 289 handle_table_.RemoveBusyHandles(handles_reader.GetPointer(), num_handles); | |
| 290 } else { | |
| 291 handle_table_.RestoreBusyHandles(handles_reader.GetPointer(), | |
| 292 num_handles); | |
| 293 } | |
| 294 } | |
| 295 | |
| 296 return rv; | |
| 297 } | |
| 298 | |
| 299 MojoResult Core::ReadMessage(MojoHandle message_pipe_handle, | |
| 300 UserPointer<void> bytes, | |
| 301 UserPointer<uint32_t> num_bytes, | |
| 302 UserPointer<MojoHandle> handles, | |
| 303 UserPointer<uint32_t> num_handles, | |
| 304 MojoReadMessageFlags flags) { | |
| 305 scoped_refptr<Dispatcher> dispatcher(GetDispatcher(message_pipe_handle)); | |
| 306 if (!dispatcher) | |
| 307 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 308 | |
| 309 uint32_t num_handles_value = num_handles.IsNull() ? 0 : num_handles.Get(); | |
| 310 | |
| 311 MojoResult rv; | |
| 312 if (num_handles_value == 0) { | |
| 313 // Easy case: won't receive any handles. | |
| 314 rv = dispatcher->ReadMessage(bytes, num_bytes, nullptr, &num_handles_value, | |
| 315 flags); | |
| 316 } else { | |
| 317 DispatcherVector dispatchers; | |
| 318 rv = dispatcher->ReadMessage(bytes, num_bytes, &dispatchers, | |
| 319 &num_handles_value, flags); | |
| 320 if (!dispatchers.empty()) { | |
| 321 DCHECK_EQ(rv, MOJO_RESULT_OK); | |
| 322 DCHECK(!num_handles.IsNull()); | |
| 323 DCHECK_LE(dispatchers.size(), static_cast<size_t>(num_handles_value)); | |
| 324 | |
| 325 bool success; | |
| 326 UserPointer<MojoHandle>::Writer handles_writer(handles, | |
| 327 dispatchers.size()); | |
| 328 { | |
| 329 base::AutoLock locker(handle_table_lock_); | |
| 330 success = handle_table_.AddDispatcherVector( | |
| 331 dispatchers, handles_writer.GetPointer()); | |
| 332 } | |
| 333 if (success) { | |
| 334 handles_writer.Commit(); | |
| 335 } else { | |
| 336 LOG(ERROR) << "Received message with " << dispatchers.size() | |
| 337 << " handles, but handle table full"; | |
| 338 // Close dispatchers (outside the lock). | |
| 339 for (size_t i = 0; i < dispatchers.size(); i++) { | |
| 340 if (dispatchers[i]) | |
| 341 dispatchers[i]->Close(); | |
| 342 } | |
| 343 if (rv == MOJO_RESULT_OK) | |
| 344 rv = MOJO_RESULT_RESOURCE_EXHAUSTED; | |
| 345 } | |
| 346 } | |
| 347 } | |
| 348 | |
| 349 if (!num_handles.IsNull()) | |
| 350 num_handles.Put(num_handles_value); | |
| 351 return rv; | |
| 352 } | |
| 353 | |
| 354 MojoResult Core::CreateDataPipe( | |
| 355 UserPointer<const MojoCreateDataPipeOptions> options, | |
| 356 UserPointer<MojoHandle> data_pipe_producer_handle, | |
| 357 UserPointer<MojoHandle> data_pipe_consumer_handle) { | |
| 358 MojoCreateDataPipeOptions validated_options = {}; | |
| 359 MojoResult result = | |
| 360 DataPipe::ValidateCreateOptions(options, &validated_options); | |
| 361 if (result != MOJO_RESULT_OK) | |
| 362 return result; | |
| 363 | |
| 364 scoped_refptr<DataPipeProducerDispatcher> producer_dispatcher( | |
| 365 new DataPipeProducerDispatcher()); | |
| 366 scoped_refptr<DataPipeConsumerDispatcher> consumer_dispatcher( | |
| 367 new DataPipeConsumerDispatcher()); | |
| 368 | |
| 369 std::pair<MojoHandle, MojoHandle> handle_pair; | |
| 370 { | |
| 371 base::AutoLock locker(handle_table_lock_); | |
| 372 handle_pair = handle_table_.AddDispatcherPair(producer_dispatcher, | |
| 373 consumer_dispatcher); | |
| 374 } | |
| 375 if (handle_pair.first == MOJO_HANDLE_INVALID) { | |
| 376 DCHECK_EQ(handle_pair.second, MOJO_HANDLE_INVALID); | |
| 377 LOG(ERROR) << "Handle table full"; | |
| 378 producer_dispatcher->Close(); | |
| 379 consumer_dispatcher->Close(); | |
| 380 return MOJO_RESULT_RESOURCE_EXHAUSTED; | |
| 381 } | |
| 382 DCHECK_NE(handle_pair.second, MOJO_HANDLE_INVALID); | |
| 383 | |
| 384 scoped_refptr<DataPipe> data_pipe(new LocalDataPipe(validated_options)); | |
| 385 producer_dispatcher->Init(data_pipe); | |
| 386 consumer_dispatcher->Init(data_pipe); | |
| 387 | |
| 388 data_pipe_producer_handle.Put(handle_pair.first); | |
| 389 data_pipe_consumer_handle.Put(handle_pair.second); | |
| 390 return MOJO_RESULT_OK; | |
| 391 } | |
| 392 | |
| 393 MojoResult Core::WriteData(MojoHandle data_pipe_producer_handle, | |
| 394 UserPointer<const void> elements, | |
| 395 UserPointer<uint32_t> num_bytes, | |
| 396 MojoWriteDataFlags flags) { | |
| 397 scoped_refptr<Dispatcher> dispatcher( | |
| 398 GetDispatcher(data_pipe_producer_handle)); | |
| 399 if (!dispatcher) | |
| 400 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 401 | |
| 402 return dispatcher->WriteData(elements, num_bytes, flags); | |
| 403 } | |
| 404 | |
| 405 MojoResult Core::BeginWriteData(MojoHandle data_pipe_producer_handle, | |
| 406 UserPointer<void*> buffer, | |
| 407 UserPointer<uint32_t> buffer_num_bytes, | |
| 408 MojoWriteDataFlags flags) { | |
| 409 scoped_refptr<Dispatcher> dispatcher( | |
| 410 GetDispatcher(data_pipe_producer_handle)); | |
| 411 if (!dispatcher) | |
| 412 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 413 | |
| 414 return dispatcher->BeginWriteData(buffer, buffer_num_bytes, flags); | |
| 415 } | |
| 416 | |
| 417 MojoResult Core::EndWriteData(MojoHandle data_pipe_producer_handle, | |
| 418 uint32_t num_bytes_written) { | |
| 419 scoped_refptr<Dispatcher> dispatcher( | |
| 420 GetDispatcher(data_pipe_producer_handle)); | |
| 421 if (!dispatcher) | |
| 422 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 423 | |
| 424 return dispatcher->EndWriteData(num_bytes_written); | |
| 425 } | |
| 426 | |
| 427 MojoResult Core::ReadData(MojoHandle data_pipe_consumer_handle, | |
| 428 UserPointer<void> elements, | |
| 429 UserPointer<uint32_t> num_bytes, | |
| 430 MojoReadDataFlags flags) { | |
| 431 scoped_refptr<Dispatcher> dispatcher( | |
| 432 GetDispatcher(data_pipe_consumer_handle)); | |
| 433 if (!dispatcher) | |
| 434 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 435 | |
| 436 return dispatcher->ReadData(elements, num_bytes, flags); | |
| 437 } | |
| 438 | |
| 439 MojoResult Core::BeginReadData(MojoHandle data_pipe_consumer_handle, | |
| 440 UserPointer<const void*> buffer, | |
| 441 UserPointer<uint32_t> buffer_num_bytes, | |
| 442 MojoReadDataFlags flags) { | |
| 443 scoped_refptr<Dispatcher> dispatcher( | |
| 444 GetDispatcher(data_pipe_consumer_handle)); | |
| 445 if (!dispatcher) | |
| 446 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 447 | |
| 448 return dispatcher->BeginReadData(buffer, buffer_num_bytes, flags); | |
| 449 } | |
| 450 | |
| 451 MojoResult Core::EndReadData(MojoHandle data_pipe_consumer_handle, | |
| 452 uint32_t num_bytes_read) { | |
| 453 scoped_refptr<Dispatcher> dispatcher( | |
| 454 GetDispatcher(data_pipe_consumer_handle)); | |
| 455 if (!dispatcher) | |
| 456 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 457 | |
| 458 return dispatcher->EndReadData(num_bytes_read); | |
| 459 } | |
| 460 | |
| 461 MojoResult Core::CreateSharedBuffer( | |
| 462 UserPointer<const MojoCreateSharedBufferOptions> options, | |
| 463 uint64_t num_bytes, | |
| 464 UserPointer<MojoHandle> shared_buffer_handle) { | |
| 465 MojoCreateSharedBufferOptions validated_options = {}; | |
| 466 MojoResult result = SharedBufferDispatcher::ValidateCreateOptions( | |
| 467 options, &validated_options); | |
| 468 if (result != MOJO_RESULT_OK) | |
| 469 return result; | |
| 470 | |
| 471 scoped_refptr<SharedBufferDispatcher> dispatcher; | |
| 472 result = SharedBufferDispatcher::Create(platform_support(), validated_options, | |
| 473 num_bytes, &dispatcher); | |
| 474 if (result != MOJO_RESULT_OK) { | |
| 475 DCHECK(!dispatcher); | |
| 476 return result; | |
| 477 } | |
| 478 | |
| 479 MojoHandle h = AddDispatcher(dispatcher); | |
| 480 if (h == MOJO_HANDLE_INVALID) { | |
| 481 LOG(ERROR) << "Handle table full"; | |
| 482 dispatcher->Close(); | |
| 483 return MOJO_RESULT_RESOURCE_EXHAUSTED; | |
| 484 } | |
| 485 | |
| 486 shared_buffer_handle.Put(h); | |
| 487 return MOJO_RESULT_OK; | |
| 488 } | |
| 489 | |
| 490 MojoResult Core::DuplicateBufferHandle( | |
| 491 MojoHandle buffer_handle, | |
| 492 UserPointer<const MojoDuplicateBufferHandleOptions> options, | |
| 493 UserPointer<MojoHandle> new_buffer_handle) { | |
| 494 scoped_refptr<Dispatcher> dispatcher(GetDispatcher(buffer_handle)); | |
| 495 if (!dispatcher) | |
| 496 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 497 | |
| 498 // Don't verify |options| here; that's the dispatcher's job. | |
| 499 scoped_refptr<Dispatcher> new_dispatcher; | |
| 500 MojoResult result = | |
| 501 dispatcher->DuplicateBufferHandle(options, &new_dispatcher); | |
| 502 if (result != MOJO_RESULT_OK) | |
| 503 return result; | |
| 504 | |
| 505 MojoHandle new_handle = AddDispatcher(new_dispatcher); | |
| 506 if (new_handle == MOJO_HANDLE_INVALID) { | |
| 507 LOG(ERROR) << "Handle table full"; | |
| 508 dispatcher->Close(); | |
| 509 return MOJO_RESULT_RESOURCE_EXHAUSTED; | |
| 510 } | |
| 511 | |
| 512 new_buffer_handle.Put(new_handle); | |
| 513 return MOJO_RESULT_OK; | |
| 514 } | |
| 515 | |
| 516 MojoResult Core::MapBuffer(MojoHandle buffer_handle, | |
| 517 uint64_t offset, | |
| 518 uint64_t num_bytes, | |
| 519 UserPointer<void*> buffer, | |
| 520 MojoMapBufferFlags flags) { | |
| 521 scoped_refptr<Dispatcher> dispatcher(GetDispatcher(buffer_handle)); | |
| 522 if (!dispatcher) | |
| 523 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 524 | |
| 525 scoped_ptr<embedder::PlatformSharedBufferMapping> mapping; | |
| 526 MojoResult result = dispatcher->MapBuffer(offset, num_bytes, flags, &mapping); | |
| 527 if (result != MOJO_RESULT_OK) | |
| 528 return result; | |
| 529 | |
| 530 DCHECK(mapping); | |
| 531 void* address = mapping->GetBase(); | |
| 532 { | |
| 533 base::AutoLock locker(mapping_table_lock_); | |
| 534 result = mapping_table_.AddMapping(mapping.Pass()); | |
| 535 } | |
| 536 if (result != MOJO_RESULT_OK) | |
| 537 return result; | |
| 538 | |
| 539 buffer.Put(address); | |
| 540 return MOJO_RESULT_OK; | |
| 541 } | |
| 542 | |
| 543 MojoResult Core::UnmapBuffer(UserPointer<void> buffer) { | |
| 544 base::AutoLock locker(mapping_table_lock_); | |
| 545 return mapping_table_.RemoveMapping(buffer.GetPointerValue()); | |
| 546 } | |
| 547 | |
| 548 // Note: We allow |handles| to repeat the same handle multiple times, since | |
| 549 // different flags may be specified. | |
| 550 // TODO(vtl): This incurs a performance cost in |Remove()|. Analyze this | |
| 551 // more carefully and address it if necessary. | |
| 552 MojoResult Core::WaitManyInternal(const MojoHandle* handles, | |
| 553 const MojoHandleSignals* signals, | |
| 554 uint32_t num_handles, | |
| 555 MojoDeadline deadline, | |
| 556 uint32_t* result_index, | |
| 557 HandleSignalsState* signals_states) { | |
| 558 DCHECK_GT(num_handles, 0u); | |
| 559 DCHECK_EQ(*result_index, static_cast<uint32_t>(-1)); | |
| 560 | |
| 561 DispatcherVector dispatchers; | |
| 562 dispatchers.reserve(num_handles); | |
| 563 for (uint32_t i = 0; i < num_handles; i++) { | |
| 564 scoped_refptr<Dispatcher> dispatcher = GetDispatcher(handles[i]); | |
| 565 if (!dispatcher) { | |
| 566 *result_index = i; | |
| 567 return MOJO_RESULT_INVALID_ARGUMENT; | |
| 568 } | |
| 569 dispatchers.push_back(dispatcher); | |
| 570 } | |
| 571 | |
| 572 // TODO(vtl): Should make the waiter live (permanently) in TLS. | |
| 573 Waiter waiter; | |
| 574 waiter.Init(); | |
| 575 | |
| 576 uint32_t i; | |
| 577 MojoResult rv = MOJO_RESULT_OK; | |
| 578 for (i = 0; i < num_handles; i++) { | |
| 579 rv = dispatchers[i]->AddAwakable( | |
| 580 &waiter, signals[i], i, signals_states ? &signals_states[i] : nullptr); | |
| 581 if (rv != MOJO_RESULT_OK) { | |
| 582 *result_index = i; | |
| 583 break; | |
| 584 } | |
| 585 } | |
| 586 uint32_t num_added = i; | |
| 587 | |
| 588 if (rv == MOJO_RESULT_ALREADY_EXISTS) | |
| 589 rv = MOJO_RESULT_OK; // The i-th one is already "triggered". | |
| 590 else if (rv == MOJO_RESULT_OK) | |
| 591 rv = waiter.Wait(deadline, result_index); | |
| 592 | |
| 593 // Make sure no other dispatchers try to wake |waiter| for the current | |
| 594 // |Wait()|/|WaitMany()| call. (Only after doing this can |waiter| be | |
| 595 // destroyed, but this would still be required if the waiter were in TLS.) | |
| 596 for (i = 0; i < num_added; i++) { | |
| 597 dispatchers[i]->RemoveAwakable( | |
| 598 &waiter, signals_states ? &signals_states[i] : nullptr); | |
| 599 } | |
| 600 if (signals_states) { | |
| 601 for (; i < num_handles; i++) | |
| 602 signals_states[i] = dispatchers[i]->GetHandleSignalsState(); | |
| 603 } | |
| 604 | |
| 605 return rv; | |
| 606 } | |
| 607 | |
| 608 } // namespace system | |
| 609 } // namespace mojo | |
| OLD | NEW |