OLD | NEW |
| (Empty) |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "mojo/edk/system/core.h" | |
6 | |
7 #include <vector> | |
8 | |
9 #include "base/logging.h" | |
10 #include "base/time/time.h" | |
11 #include "mojo/edk/embedder/platform_shared_buffer.h" | |
12 #include "mojo/edk/embedder/platform_support.h" | |
13 #include "mojo/edk/system/constants.h" | |
14 #include "mojo/edk/system/data_pipe.h" | |
15 #include "mojo/edk/system/data_pipe_consumer_dispatcher.h" | |
16 #include "mojo/edk/system/data_pipe_producer_dispatcher.h" | |
17 #include "mojo/edk/system/dispatcher.h" | |
18 #include "mojo/edk/system/handle_signals_state.h" | |
19 #include "mojo/edk/system/local_data_pipe.h" | |
20 #include "mojo/edk/system/memory.h" | |
21 #include "mojo/edk/system/message_pipe.h" | |
22 #include "mojo/edk/system/message_pipe_dispatcher.h" | |
23 #include "mojo/edk/system/shared_buffer_dispatcher.h" | |
24 #include "mojo/edk/system/waiter.h" | |
25 #include "mojo/public/c/system/macros.h" | |
26 | |
27 namespace mojo { | |
28 namespace system { | |
29 | |
30 // Implementation notes | |
31 // | |
32 // Mojo primitives are implemented by the singleton |Core| object. Most calls | |
33 // are for a "primary" handle (the first argument). |Core::GetDispatcher()| is | |
34 // used to look up a |Dispatcher| object for a given handle. That object | |
35 // implements most primitives for that object. The wait primitives are not | |
36 // attached to objects and are implemented by |Core| itself. | |
37 // | |
38 // Some objects have multiple handles associated to them, e.g., message pipes | |
39 // (which have two). In such a case, there is still a |Dispatcher| (e.g., | |
40 // |MessagePipeDispatcher|) for each handle, with each handle having a strong | |
41 // reference to the common "secondary" object (e.g., |MessagePipe|). This | |
42 // secondary object does NOT have any references to the |Dispatcher|s (even if | |
43 // it did, it wouldn't be able to do anything with them due to lock order | |
44 // requirements -- see below). | |
45 // | |
46 // Waiting is implemented by having the thread that wants to wait call the | |
47 // |Dispatcher|s for the handles that it wants to wait on with a |Waiter| | |
48 // object; this |Waiter| object may be created on the stack of that thread or be | |
49 // kept in thread local storage for that thread (TODO(vtl): future improvement). | |
50 // The |Dispatcher| then adds the |Waiter| to a |WaiterList| that's either owned | |
51 // by that |Dispatcher| (see |SimpleDispatcher|) or by a secondary object (e.g., | |
52 // |MessagePipe|). To signal/wake a |Waiter|, the object in question -- either a | |
53 // |SimpleDispatcher| or a secondary object -- talks to its |WaiterList|. | |
54 | |
55 // Thread-safety notes | |
56 // | |
57 // Mojo primitives calls are thread-safe. We achieve this with relatively | |
58 // fine-grained locking. There is a global handle table lock. This lock should | |
59 // be held as briefly as possible (TODO(vtl): a future improvement would be to | |
60 // switch it to a reader-writer lock). Each |Dispatcher| object then has a lock | |
61 // (which subclasses can use to protect their data). | |
62 // | |
63 // The lock ordering is as follows: | |
64 // 1. global handle table lock, global mapping table lock | |
65 // 2. |Dispatcher| locks | |
66 // 3. secondary object locks | |
67 // ... | |
68 // INF. |Waiter| locks | |
69 // | |
70 // Notes: | |
71 // - While holding a |Dispatcher| lock, you may not unconditionally attempt | |
72 // to take another |Dispatcher| lock. (This has consequences on the | |
73 // concurrency semantics of |MojoWriteMessage()| when passing handles.) | |
74 // Doing so would lead to deadlock. | |
75 // - Locks at the "INF" level may not have any locks taken while they are | |
76 // held. | |
77 | |
78 // TODO(vtl): This should take a |scoped_ptr<PlatformSupport>| as a parameter. | |
79 Core::Core(scoped_ptr<embedder::PlatformSupport> platform_support) | |
80 : platform_support_(platform_support.Pass()) { | |
81 } | |
82 | |
83 Core::~Core() { | |
84 } | |
85 | |
86 MojoHandle Core::AddDispatcher(const scoped_refptr<Dispatcher>& dispatcher) { | |
87 base::AutoLock locker(handle_table_lock_); | |
88 return handle_table_.AddDispatcher(dispatcher); | |
89 } | |
90 | |
91 scoped_refptr<Dispatcher> Core::GetDispatcher(MojoHandle handle) { | |
92 if (handle == MOJO_HANDLE_INVALID) | |
93 return nullptr; | |
94 | |
95 base::AutoLock locker(handle_table_lock_); | |
96 return handle_table_.GetDispatcher(handle); | |
97 } | |
98 | |
99 MojoTimeTicks Core::GetTimeTicksNow() { | |
100 return base::TimeTicks::Now().ToInternalValue(); | |
101 } | |
102 | |
103 MojoResult Core::Close(MojoHandle handle) { | |
104 if (handle == MOJO_HANDLE_INVALID) | |
105 return MOJO_RESULT_INVALID_ARGUMENT; | |
106 | |
107 scoped_refptr<Dispatcher> dispatcher; | |
108 { | |
109 base::AutoLock locker(handle_table_lock_); | |
110 MojoResult result = | |
111 handle_table_.GetAndRemoveDispatcher(handle, &dispatcher); | |
112 if (result != MOJO_RESULT_OK) | |
113 return result; | |
114 } | |
115 | |
116 // The dispatcher doesn't have a say in being closed, but gets notified of it. | |
117 // Note: This is done outside of |handle_table_lock_|. As a result, there's a | |
118 // race condition that the dispatcher must handle; see the comment in | |
119 // |Dispatcher| in dispatcher.h. | |
120 return dispatcher->Close(); | |
121 } | |
122 | |
123 MojoResult Core::Wait(MojoHandle handle, | |
124 MojoHandleSignals signals, | |
125 MojoDeadline deadline, | |
126 UserPointer<MojoHandleSignalsState> signals_state) { | |
127 uint32_t unused = static_cast<uint32_t>(-1); | |
128 HandleSignalsState hss; | |
129 MojoResult rv = WaitManyInternal(&handle, | |
130 &signals, | |
131 1, | |
132 deadline, | |
133 &unused, | |
134 signals_state.IsNull() ? nullptr : &hss); | |
135 if (rv != MOJO_RESULT_INVALID_ARGUMENT && !signals_state.IsNull()) | |
136 signals_state.Put(hss); | |
137 return rv; | |
138 } | |
139 | |
140 MojoResult Core::WaitMany(UserPointer<const MojoHandle> handles, | |
141 UserPointer<const MojoHandleSignals> signals, | |
142 uint32_t num_handles, | |
143 MojoDeadline deadline, | |
144 UserPointer<uint32_t> result_index, | |
145 UserPointer<MojoHandleSignalsState> signals_states) { | |
146 if (num_handles < 1) | |
147 return MOJO_RESULT_INVALID_ARGUMENT; | |
148 if (num_handles > kMaxWaitManyNumHandles) | |
149 return MOJO_RESULT_RESOURCE_EXHAUSTED; | |
150 | |
151 UserPointer<const MojoHandle>::Reader handles_reader(handles, num_handles); | |
152 UserPointer<const MojoHandleSignals>::Reader signals_reader(signals, | |
153 num_handles); | |
154 uint32_t index = static_cast<uint32_t>(-1); | |
155 MojoResult rv; | |
156 if (signals_states.IsNull()) { | |
157 rv = WaitManyInternal(handles_reader.GetPointer(), | |
158 signals_reader.GetPointer(), | |
159 num_handles, | |
160 deadline, | |
161 &index, | |
162 nullptr); | |
163 } else { | |
164 UserPointer<MojoHandleSignalsState>::Writer signals_states_writer( | |
165 signals_states, num_handles); | |
166 // Note: The |reinterpret_cast| is safe, since |HandleSignalsState| is a | |
167 // subclass of |MojoHandleSignalsState| that doesn't add any data members. | |
168 rv = WaitManyInternal(handles_reader.GetPointer(), | |
169 signals_reader.GetPointer(), | |
170 num_handles, | |
171 deadline, | |
172 &index, | |
173 reinterpret_cast<HandleSignalsState*>( | |
174 signals_states_writer.GetPointer())); | |
175 if (rv != MOJO_RESULT_INVALID_ARGUMENT) | |
176 signals_states_writer.Commit(); | |
177 } | |
178 if (index != static_cast<uint32_t>(-1) && !result_index.IsNull()) | |
179 result_index.Put(index); | |
180 return rv; | |
181 } | |
182 | |
183 MojoResult Core::CreateMessagePipe( | |
184 UserPointer<const MojoCreateMessagePipeOptions> options, | |
185 UserPointer<MojoHandle> message_pipe_handle0, | |
186 UserPointer<MojoHandle> message_pipe_handle1) { | |
187 MojoCreateMessagePipeOptions validated_options = {}; | |
188 MojoResult result = | |
189 MessagePipeDispatcher::ValidateCreateOptions(options, &validated_options); | |
190 if (result != MOJO_RESULT_OK) | |
191 return result; | |
192 | |
193 scoped_refptr<MessagePipeDispatcher> dispatcher0( | |
194 new MessagePipeDispatcher(validated_options)); | |
195 scoped_refptr<MessagePipeDispatcher> dispatcher1( | |
196 new MessagePipeDispatcher(validated_options)); | |
197 | |
198 std::pair<MojoHandle, MojoHandle> handle_pair; | |
199 { | |
200 base::AutoLock locker(handle_table_lock_); | |
201 handle_pair = handle_table_.AddDispatcherPair(dispatcher0, dispatcher1); | |
202 } | |
203 if (handle_pair.first == MOJO_HANDLE_INVALID) { | |
204 DCHECK_EQ(handle_pair.second, MOJO_HANDLE_INVALID); | |
205 LOG(ERROR) << "Handle table full"; | |
206 dispatcher0->Close(); | |
207 dispatcher1->Close(); | |
208 return MOJO_RESULT_RESOURCE_EXHAUSTED; | |
209 } | |
210 | |
211 scoped_refptr<MessagePipe> message_pipe(MessagePipe::CreateLocalLocal()); | |
212 dispatcher0->Init(message_pipe, 0); | |
213 dispatcher1->Init(message_pipe, 1); | |
214 | |
215 message_pipe_handle0.Put(handle_pair.first); | |
216 message_pipe_handle1.Put(handle_pair.second); | |
217 return MOJO_RESULT_OK; | |
218 } | |
219 | |
220 // Implementation note: To properly cancel waiters and avoid other races, this | |
221 // does not transfer dispatchers from one handle to another, even when sending a | |
222 // message in-process. Instead, it must transfer the "contents" of the | |
223 // dispatcher to a new dispatcher, and then close the old dispatcher. If this | |
224 // isn't done, in the in-process case, calls on the old handle may complete | |
225 // after the the message has been received and a new handle created (and | |
226 // possibly even after calls have been made on the new handle). | |
227 MojoResult Core::WriteMessage(MojoHandle message_pipe_handle, | |
228 UserPointer<const void> bytes, | |
229 uint32_t num_bytes, | |
230 UserPointer<const MojoHandle> handles, | |
231 uint32_t num_handles, | |
232 MojoWriteMessageFlags flags) { | |
233 scoped_refptr<Dispatcher> dispatcher(GetDispatcher(message_pipe_handle)); | |
234 if (!dispatcher.get()) | |
235 return MOJO_RESULT_INVALID_ARGUMENT; | |
236 | |
237 // Easy case: not sending any handles. | |
238 if (num_handles == 0) | |
239 return dispatcher->WriteMessage(bytes, num_bytes, nullptr, flags); | |
240 | |
241 // We have to handle |handles| here, since we have to mark them busy in the | |
242 // global handle table. We can't delegate this to the dispatcher, since the | |
243 // handle table lock must be acquired before the dispatcher lock. | |
244 // | |
245 // (This leads to an oddity: |handles|/|num_handles| are always verified for | |
246 // validity, even for dispatchers that don't support |WriteMessage()| and will | |
247 // simply return failure unconditionally. It also breaks the usual | |
248 // left-to-right verification order of arguments.) | |
249 if (num_handles > kMaxMessageNumHandles) | |
250 return MOJO_RESULT_RESOURCE_EXHAUSTED; | |
251 | |
252 UserPointer<const MojoHandle>::Reader handles_reader(handles, num_handles); | |
253 | |
254 // We'll need to hold on to the dispatchers so that we can pass them on to | |
255 // |WriteMessage()| and also so that we can unlock their locks afterwards | |
256 // without accessing the handle table. These can be dumb pointers, since their | |
257 // entries in the handle table won't get removed (since they'll be marked as | |
258 // busy). | |
259 std::vector<DispatcherTransport> transports(num_handles); | |
260 | |
261 // When we pass handles, we have to try to take all their dispatchers' locks | |
262 // and mark the handles as busy. If the call succeeds, we then remove the | |
263 // handles from the handle table. | |
264 { | |
265 base::AutoLock locker(handle_table_lock_); | |
266 MojoResult result = | |
267 handle_table_.MarkBusyAndStartTransport(message_pipe_handle, | |
268 handles_reader.GetPointer(), | |
269 num_handles, | |
270 &transports); | |
271 if (result != MOJO_RESULT_OK) | |
272 return result; | |
273 } | |
274 | |
275 MojoResult rv = | |
276 dispatcher->WriteMessage(bytes, num_bytes, &transports, flags); | |
277 | |
278 // We need to release the dispatcher locks before we take the handle table | |
279 // lock. | |
280 for (uint32_t i = 0; i < num_handles; i++) | |
281 transports[i].End(); | |
282 | |
283 { | |
284 base::AutoLock locker(handle_table_lock_); | |
285 if (rv == MOJO_RESULT_OK) { | |
286 handle_table_.RemoveBusyHandles(handles_reader.GetPointer(), num_handles); | |
287 } else { | |
288 handle_table_.RestoreBusyHandles(handles_reader.GetPointer(), | |
289 num_handles); | |
290 } | |
291 } | |
292 | |
293 return rv; | |
294 } | |
295 | |
296 MojoResult Core::ReadMessage(MojoHandle message_pipe_handle, | |
297 UserPointer<void> bytes, | |
298 UserPointer<uint32_t> num_bytes, | |
299 UserPointer<MojoHandle> handles, | |
300 UserPointer<uint32_t> num_handles, | |
301 MojoReadMessageFlags flags) { | |
302 scoped_refptr<Dispatcher> dispatcher(GetDispatcher(message_pipe_handle)); | |
303 if (!dispatcher.get()) | |
304 return MOJO_RESULT_INVALID_ARGUMENT; | |
305 | |
306 uint32_t num_handles_value = num_handles.IsNull() ? 0 : num_handles.Get(); | |
307 | |
308 MojoResult rv; | |
309 if (num_handles_value == 0) { | |
310 // Easy case: won't receive any handles. | |
311 rv = dispatcher->ReadMessage( | |
312 bytes, num_bytes, nullptr, &num_handles_value, flags); | |
313 } else { | |
314 DispatcherVector dispatchers; | |
315 rv = dispatcher->ReadMessage( | |
316 bytes, num_bytes, &dispatchers, &num_handles_value, flags); | |
317 if (!dispatchers.empty()) { | |
318 DCHECK_EQ(rv, MOJO_RESULT_OK); | |
319 DCHECK(!num_handles.IsNull()); | |
320 DCHECK_LE(dispatchers.size(), static_cast<size_t>(num_handles_value)); | |
321 | |
322 bool success; | |
323 UserPointer<MojoHandle>::Writer handles_writer(handles, | |
324 dispatchers.size()); | |
325 { | |
326 base::AutoLock locker(handle_table_lock_); | |
327 success = handle_table_.AddDispatcherVector( | |
328 dispatchers, handles_writer.GetPointer()); | |
329 } | |
330 if (success) { | |
331 handles_writer.Commit(); | |
332 } else { | |
333 LOG(ERROR) << "Received message with " << dispatchers.size() | |
334 << " handles, but handle table full"; | |
335 // Close dispatchers (outside the lock). | |
336 for (size_t i = 0; i < dispatchers.size(); i++) { | |
337 if (dispatchers[i].get()) | |
338 dispatchers[i]->Close(); | |
339 } | |
340 if (rv == MOJO_RESULT_OK) | |
341 rv = MOJO_RESULT_RESOURCE_EXHAUSTED; | |
342 } | |
343 } | |
344 } | |
345 | |
346 if (!num_handles.IsNull()) | |
347 num_handles.Put(num_handles_value); | |
348 return rv; | |
349 } | |
350 | |
351 MojoResult Core::CreateDataPipe( | |
352 UserPointer<const MojoCreateDataPipeOptions> options, | |
353 UserPointer<MojoHandle> data_pipe_producer_handle, | |
354 UserPointer<MojoHandle> data_pipe_consumer_handle) { | |
355 MojoCreateDataPipeOptions validated_options = {}; | |
356 MojoResult result = | |
357 DataPipe::ValidateCreateOptions(options, &validated_options); | |
358 if (result != MOJO_RESULT_OK) | |
359 return result; | |
360 | |
361 scoped_refptr<DataPipeProducerDispatcher> producer_dispatcher( | |
362 new DataPipeProducerDispatcher()); | |
363 scoped_refptr<DataPipeConsumerDispatcher> consumer_dispatcher( | |
364 new DataPipeConsumerDispatcher()); | |
365 | |
366 std::pair<MojoHandle, MojoHandle> handle_pair; | |
367 { | |
368 base::AutoLock locker(handle_table_lock_); | |
369 handle_pair = handle_table_.AddDispatcherPair(producer_dispatcher, | |
370 consumer_dispatcher); | |
371 } | |
372 if (handle_pair.first == MOJO_HANDLE_INVALID) { | |
373 DCHECK_EQ(handle_pair.second, MOJO_HANDLE_INVALID); | |
374 LOG(ERROR) << "Handle table full"; | |
375 producer_dispatcher->Close(); | |
376 consumer_dispatcher->Close(); | |
377 return MOJO_RESULT_RESOURCE_EXHAUSTED; | |
378 } | |
379 DCHECK_NE(handle_pair.second, MOJO_HANDLE_INVALID); | |
380 | |
381 scoped_refptr<DataPipe> data_pipe(new LocalDataPipe(validated_options)); | |
382 producer_dispatcher->Init(data_pipe); | |
383 consumer_dispatcher->Init(data_pipe); | |
384 | |
385 data_pipe_producer_handle.Put(handle_pair.first); | |
386 data_pipe_consumer_handle.Put(handle_pair.second); | |
387 return MOJO_RESULT_OK; | |
388 } | |
389 | |
390 MojoResult Core::WriteData(MojoHandle data_pipe_producer_handle, | |
391 UserPointer<const void> elements, | |
392 UserPointer<uint32_t> num_bytes, | |
393 MojoWriteDataFlags flags) { | |
394 scoped_refptr<Dispatcher> dispatcher( | |
395 GetDispatcher(data_pipe_producer_handle)); | |
396 if (!dispatcher.get()) | |
397 return MOJO_RESULT_INVALID_ARGUMENT; | |
398 | |
399 return dispatcher->WriteData(elements, num_bytes, flags); | |
400 } | |
401 | |
402 MojoResult Core::BeginWriteData(MojoHandle data_pipe_producer_handle, | |
403 UserPointer<void*> buffer, | |
404 UserPointer<uint32_t> buffer_num_bytes, | |
405 MojoWriteDataFlags flags) { | |
406 scoped_refptr<Dispatcher> dispatcher( | |
407 GetDispatcher(data_pipe_producer_handle)); | |
408 if (!dispatcher.get()) | |
409 return MOJO_RESULT_INVALID_ARGUMENT; | |
410 | |
411 return dispatcher->BeginWriteData(buffer, buffer_num_bytes, flags); | |
412 } | |
413 | |
414 MojoResult Core::EndWriteData(MojoHandle data_pipe_producer_handle, | |
415 uint32_t num_bytes_written) { | |
416 scoped_refptr<Dispatcher> dispatcher( | |
417 GetDispatcher(data_pipe_producer_handle)); | |
418 if (!dispatcher.get()) | |
419 return MOJO_RESULT_INVALID_ARGUMENT; | |
420 | |
421 return dispatcher->EndWriteData(num_bytes_written); | |
422 } | |
423 | |
424 MojoResult Core::ReadData(MojoHandle data_pipe_consumer_handle, | |
425 UserPointer<void> elements, | |
426 UserPointer<uint32_t> num_bytes, | |
427 MojoReadDataFlags flags) { | |
428 scoped_refptr<Dispatcher> dispatcher( | |
429 GetDispatcher(data_pipe_consumer_handle)); | |
430 if (!dispatcher.get()) | |
431 return MOJO_RESULT_INVALID_ARGUMENT; | |
432 | |
433 return dispatcher->ReadData(elements, num_bytes, flags); | |
434 } | |
435 | |
436 MojoResult Core::BeginReadData(MojoHandle data_pipe_consumer_handle, | |
437 UserPointer<const void*> buffer, | |
438 UserPointer<uint32_t> buffer_num_bytes, | |
439 MojoReadDataFlags flags) { | |
440 scoped_refptr<Dispatcher> dispatcher( | |
441 GetDispatcher(data_pipe_consumer_handle)); | |
442 if (!dispatcher.get()) | |
443 return MOJO_RESULT_INVALID_ARGUMENT; | |
444 | |
445 return dispatcher->BeginReadData(buffer, buffer_num_bytes, flags); | |
446 } | |
447 | |
448 MojoResult Core::EndReadData(MojoHandle data_pipe_consumer_handle, | |
449 uint32_t num_bytes_read) { | |
450 scoped_refptr<Dispatcher> dispatcher( | |
451 GetDispatcher(data_pipe_consumer_handle)); | |
452 if (!dispatcher.get()) | |
453 return MOJO_RESULT_INVALID_ARGUMENT; | |
454 | |
455 return dispatcher->EndReadData(num_bytes_read); | |
456 } | |
457 | |
458 MojoResult Core::CreateSharedBuffer( | |
459 UserPointer<const MojoCreateSharedBufferOptions> options, | |
460 uint64_t num_bytes, | |
461 UserPointer<MojoHandle> shared_buffer_handle) { | |
462 MojoCreateSharedBufferOptions validated_options = {}; | |
463 MojoResult result = SharedBufferDispatcher::ValidateCreateOptions( | |
464 options, &validated_options); | |
465 if (result != MOJO_RESULT_OK) | |
466 return result; | |
467 | |
468 scoped_refptr<SharedBufferDispatcher> dispatcher; | |
469 result = SharedBufferDispatcher::Create( | |
470 platform_support(), validated_options, num_bytes, &dispatcher); | |
471 if (result != MOJO_RESULT_OK) { | |
472 DCHECK(!dispatcher.get()); | |
473 return result; | |
474 } | |
475 | |
476 MojoHandle h = AddDispatcher(dispatcher); | |
477 if (h == MOJO_HANDLE_INVALID) { | |
478 LOG(ERROR) << "Handle table full"; | |
479 dispatcher->Close(); | |
480 return MOJO_RESULT_RESOURCE_EXHAUSTED; | |
481 } | |
482 | |
483 shared_buffer_handle.Put(h); | |
484 return MOJO_RESULT_OK; | |
485 } | |
486 | |
487 MojoResult Core::DuplicateBufferHandle( | |
488 MojoHandle buffer_handle, | |
489 UserPointer<const MojoDuplicateBufferHandleOptions> options, | |
490 UserPointer<MojoHandle> new_buffer_handle) { | |
491 scoped_refptr<Dispatcher> dispatcher(GetDispatcher(buffer_handle)); | |
492 if (!dispatcher.get()) | |
493 return MOJO_RESULT_INVALID_ARGUMENT; | |
494 | |
495 // Don't verify |options| here; that's the dispatcher's job. | |
496 scoped_refptr<Dispatcher> new_dispatcher; | |
497 MojoResult result = | |
498 dispatcher->DuplicateBufferHandle(options, &new_dispatcher); | |
499 if (result != MOJO_RESULT_OK) | |
500 return result; | |
501 | |
502 MojoHandle new_handle = AddDispatcher(new_dispatcher); | |
503 if (new_handle == MOJO_HANDLE_INVALID) { | |
504 LOG(ERROR) << "Handle table full"; | |
505 dispatcher->Close(); | |
506 return MOJO_RESULT_RESOURCE_EXHAUSTED; | |
507 } | |
508 | |
509 new_buffer_handle.Put(new_handle); | |
510 return MOJO_RESULT_OK; | |
511 } | |
512 | |
513 MojoResult Core::MapBuffer(MojoHandle buffer_handle, | |
514 uint64_t offset, | |
515 uint64_t num_bytes, | |
516 UserPointer<void*> buffer, | |
517 MojoMapBufferFlags flags) { | |
518 scoped_refptr<Dispatcher> dispatcher(GetDispatcher(buffer_handle)); | |
519 if (!dispatcher.get()) | |
520 return MOJO_RESULT_INVALID_ARGUMENT; | |
521 | |
522 scoped_ptr<embedder::PlatformSharedBufferMapping> mapping; | |
523 MojoResult result = dispatcher->MapBuffer(offset, num_bytes, flags, &mapping); | |
524 if (result != MOJO_RESULT_OK) | |
525 return result; | |
526 | |
527 DCHECK(mapping); | |
528 void* address = mapping->GetBase(); | |
529 { | |
530 base::AutoLock locker(mapping_table_lock_); | |
531 result = mapping_table_.AddMapping(mapping.Pass()); | |
532 } | |
533 if (result != MOJO_RESULT_OK) | |
534 return result; | |
535 | |
536 buffer.Put(address); | |
537 return MOJO_RESULT_OK; | |
538 } | |
539 | |
540 MojoResult Core::UnmapBuffer(UserPointer<void> buffer) { | |
541 base::AutoLock locker(mapping_table_lock_); | |
542 return mapping_table_.RemoveMapping(buffer.GetPointerValue()); | |
543 } | |
544 | |
545 // Note: We allow |handles| to repeat the same handle multiple times, since | |
546 // different flags may be specified. | |
547 // TODO(vtl): This incurs a performance cost in |RemoveWaiter()|. Analyze this | |
548 // more carefully and address it if necessary. | |
549 MojoResult Core::WaitManyInternal(const MojoHandle* handles, | |
550 const MojoHandleSignals* signals, | |
551 uint32_t num_handles, | |
552 MojoDeadline deadline, | |
553 uint32_t* result_index, | |
554 HandleSignalsState* signals_states) { | |
555 DCHECK_GT(num_handles, 0u); | |
556 DCHECK_EQ(*result_index, static_cast<uint32_t>(-1)); | |
557 | |
558 DispatcherVector dispatchers; | |
559 dispatchers.reserve(num_handles); | |
560 for (uint32_t i = 0; i < num_handles; i++) { | |
561 scoped_refptr<Dispatcher> dispatcher = GetDispatcher(handles[i]); | |
562 if (!dispatcher.get()) { | |
563 *result_index = i; | |
564 return MOJO_RESULT_INVALID_ARGUMENT; | |
565 } | |
566 dispatchers.push_back(dispatcher); | |
567 } | |
568 | |
569 // TODO(vtl): Should make the waiter live (permanently) in TLS. | |
570 Waiter waiter; | |
571 waiter.Init(); | |
572 | |
573 uint32_t i; | |
574 MojoResult rv = MOJO_RESULT_OK; | |
575 for (i = 0; i < num_handles; i++) { | |
576 rv = dispatchers[i]->AddWaiter( | |
577 &waiter, signals[i], i, signals_states ? &signals_states[i] : nullptr); | |
578 if (rv != MOJO_RESULT_OK) { | |
579 *result_index = i; | |
580 break; | |
581 } | |
582 } | |
583 uint32_t num_added = i; | |
584 | |
585 if (rv == MOJO_RESULT_ALREADY_EXISTS) | |
586 rv = MOJO_RESULT_OK; // The i-th one is already "triggered". | |
587 else if (rv == MOJO_RESULT_OK) | |
588 rv = waiter.Wait(deadline, result_index); | |
589 | |
590 // Make sure no other dispatchers try to wake |waiter| for the current | |
591 // |Wait()|/|WaitMany()| call. (Only after doing this can |waiter| be | |
592 // destroyed, but this would still be required if the waiter were in TLS.) | |
593 for (i = 0; i < num_added; i++) { | |
594 dispatchers[i]->RemoveWaiter(&waiter, | |
595 signals_states ? &signals_states[i] : nullptr); | |
596 } | |
597 if (signals_states) { | |
598 for (; i < num_handles; i++) | |
599 signals_states[i] = dispatchers[i]->GetHandleSignalsState(); | |
600 } | |
601 | |
602 return rv; | |
603 } | |
604 | |
605 } // namespace system | |
606 } // namespace mojo | |
OLD | NEW |