| OLD | NEW |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "mojo/public/cpp/bindings/lib/multiplex_router.h" | 5 #include "mojo/public/cpp/bindings/lib/multiplex_router.h" |
| 6 | 6 |
| 7 #include <stdint.h> | 7 #include <stdint.h> |
| 8 | 8 |
| 9 #include <utility> | 9 #include <utility> |
| 10 | 10 |
| (...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 117 Task() {} | 117 Task() {} |
| 118 }; | 118 }; |
| 119 | 119 |
| 120 MultiplexRouter::MultiplexRouter(bool set_interface_id_namesapce_bit, | 120 MultiplexRouter::MultiplexRouter(bool set_interface_id_namesapce_bit, |
| 121 ScopedMessagePipeHandle message_pipe, | 121 ScopedMessagePipeHandle message_pipe, |
| 122 const MojoAsyncWaiter* waiter) | 122 const MojoAsyncWaiter* waiter) |
| 123 : RefCountedDeleteOnMessageLoop(base::MessageLoop::current() | 123 : RefCountedDeleteOnMessageLoop(base::MessageLoop::current() |
| 124 ->task_runner()), | 124 ->task_runner()), |
| 125 set_interface_id_namespace_bit_(set_interface_id_namesapce_bit), | 125 set_interface_id_namespace_bit_(set_interface_id_namesapce_bit), |
| 126 header_validator_(this), | 126 header_validator_(this), |
| 127 connector_(message_pipe.Pass(), Connector::MULTI_THREADED_SEND, waiter), | 127 connector_(std::move(message_pipe), |
| 128 Connector::MULTI_THREADED_SEND, |
| 129 waiter), |
| 128 encountered_error_(false), | 130 encountered_error_(false), |
| 129 control_message_handler_(this), | 131 control_message_handler_(this), |
| 130 control_message_proxy_(&connector_), | 132 control_message_proxy_(&connector_), |
| 131 next_interface_id_value_(1), | 133 next_interface_id_value_(1), |
| 132 testing_mode_(false) { | 134 testing_mode_(false) { |
| 133 connector_.set_incoming_receiver(&header_validator_); | 135 connector_.set_incoming_receiver(&header_validator_); |
| 134 connector_.set_connection_error_handler( | 136 connector_.set_connection_error_handler( |
| 135 [this]() { OnPipeConnectionError(); }); | 137 [this]() { OnPipeConnectionError(); }); |
| 136 } | 138 } |
| 137 | 139 |
| (...skipping 148 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 286 connector_.RaiseError(); | 288 connector_.RaiseError(); |
| 287 } else { | 289 } else { |
| 288 task_runner_->PostTask(FROM_HERE, | 290 task_runner_->PostTask(FROM_HERE, |
| 289 base::Bind(&MultiplexRouter::RaiseError, this)); | 291 base::Bind(&MultiplexRouter::RaiseError, this)); |
| 290 } | 292 } |
| 291 } | 293 } |
| 292 | 294 |
| 293 scoped_ptr<AssociatedGroup> MultiplexRouter::CreateAssociatedGroup() { | 295 scoped_ptr<AssociatedGroup> MultiplexRouter::CreateAssociatedGroup() { |
| 294 scoped_ptr<AssociatedGroup> group(new AssociatedGroup); | 296 scoped_ptr<AssociatedGroup> group(new AssociatedGroup); |
| 295 group->router_ = this; | 297 group->router_ = this; |
| 296 return group.Pass(); | 298 return group; |
| 297 } | 299 } |
| 298 | 300 |
| 299 // static | 301 // static |
| 300 MultiplexRouter* MultiplexRouter::GetRouter(AssociatedGroup* associated_group) { | 302 MultiplexRouter* MultiplexRouter::GetRouter(AssociatedGroup* associated_group) { |
| 301 return associated_group->router_.get(); | 303 return associated_group->router_.get(); |
| 302 } | 304 } |
| 303 | 305 |
| 304 bool MultiplexRouter::HasAssociatedEndpoints() const { | 306 bool MultiplexRouter::HasAssociatedEndpoints() const { |
| 305 DCHECK(thread_checker_.CalledOnValidThread()); | 307 DCHECK(thread_checker_.CalledOnValidThread()); |
| 306 base::AutoLock locker(lock_); | 308 base::AutoLock locker(lock_); |
| (...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 393 UpdateEndpointStateMayRemove(endpoint, PEER_ENDPOINT_CLOSED); | 395 UpdateEndpointStateMayRemove(endpoint, PEER_ENDPOINT_CLOSED); |
| 394 } | 396 } |
| 395 | 397 |
| 396 ProcessTasks(false); | 398 ProcessTasks(false); |
| 397 } | 399 } |
| 398 | 400 |
| 399 void MultiplexRouter::ProcessTasks(bool force_async) { | 401 void MultiplexRouter::ProcessTasks(bool force_async) { |
| 400 lock_.AssertAcquired(); | 402 lock_.AssertAcquired(); |
| 401 | 403 |
| 402 while (!tasks_.empty()) { | 404 while (!tasks_.empty()) { |
| 403 scoped_ptr<Task> task(tasks_.front().Pass()); | 405 scoped_ptr<Task> task(std::move(tasks_.front())); |
| 404 tasks_.pop_front(); | 406 tasks_.pop_front(); |
| 405 | 407 |
| 406 bool processed = task->IsNotifyErrorTask() | 408 bool processed = task->IsNotifyErrorTask() |
| 407 ? ProcessNotifyErrorTask(task.get(), &force_async) | 409 ? ProcessNotifyErrorTask(task.get(), &force_async) |
| 408 : ProcessIncomingMessageTask(task.get(), &force_async); | 410 : ProcessIncomingMessageTask(task.get(), &force_async); |
| 409 | 411 |
| 410 if (!processed) { | 412 if (!processed) { |
| 411 tasks_.push_front(task.Pass()); | 413 tasks_.push_front(std::move(task)); |
| 412 break; | 414 break; |
| 413 } | 415 } |
| 414 } | 416 } |
| 415 } | 417 } |
| 416 | 418 |
| 417 bool MultiplexRouter::ProcessNotifyErrorTask(Task* task, bool* force_async) { | 419 bool MultiplexRouter::ProcessNotifyErrorTask(Task* task, bool* force_async) { |
| 418 lock_.AssertAcquired(); | 420 lock_.AssertAcquired(); |
| 419 InterfaceEndpoint* endpoint = task->endpoint_to_notify.get(); | 421 InterfaceEndpoint* endpoint = task->endpoint_to_notify.get(); |
| 420 if (!endpoint->client()) | 422 if (!endpoint->client()) |
| 421 return true; | 423 return true; |
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 481 } | 483 } |
| 482 | 484 |
| 483 if (!endpoint->task_runner()->BelongsToCurrentThread() || *force_async) { | 485 if (!endpoint->task_runner()->BelongsToCurrentThread() || *force_async) { |
| 484 endpoint->task_runner()->PostTask( | 486 endpoint->task_runner()->PostTask( |
| 485 FROM_HERE, base::Bind(&MultiplexRouter::LockAndCallProcessTasks, this)); | 487 FROM_HERE, base::Bind(&MultiplexRouter::LockAndCallProcessTasks, this)); |
| 486 return false; | 488 return false; |
| 487 } | 489 } |
| 488 | 490 |
| 489 *force_async = true; | 491 *force_async = true; |
| 490 InterfaceEndpointClient* client = endpoint->client(); | 492 InterfaceEndpointClient* client = endpoint->client(); |
| 491 scoped_ptr<Message> owned_message = task->message.Pass(); | 493 scoped_ptr<Message> owned_message = std::move(task->message); |
| 492 bool result = false; | 494 bool result = false; |
| 493 { | 495 { |
| 494 // We must unlock before calling into |client| because it may call this | 496 // We must unlock before calling into |client| because it may call this |
| 495 // object within HandleIncomingMessage(). Holding the lock will lead to | 497 // object within HandleIncomingMessage(). Holding the lock will lead to |
| 496 // deadlock. | 498 // deadlock. |
| 497 // | 499 // |
| 498 // It is safe to call into |client| without the lock. Because |client| is | 500 // It is safe to call into |client| without the lock. Because |client| is |
| 499 // always accessed on the same thread, including DetachEndpointClient(). | 501 // always accessed on the same thread, including DetachEndpointClient(). |
| 500 base::AutoUnlock unlocker(lock_); | 502 base::AutoUnlock unlocker(lock_); |
| 501 result = client->HandleIncomingMessage(owned_message.get()); | 503 result = client->HandleIncomingMessage(owned_message.get()); |
| (...skipping 27 matching lines...) Expand all Loading... |
| 529 } | 531 } |
| 530 | 532 |
| 531 void MultiplexRouter::RaiseErrorInNonTestingMode() { | 533 void MultiplexRouter::RaiseErrorInNonTestingMode() { |
| 532 lock_.AssertAcquired(); | 534 lock_.AssertAcquired(); |
| 533 if (!testing_mode_) | 535 if (!testing_mode_) |
| 534 RaiseError(); | 536 RaiseError(); |
| 535 } | 537 } |
| 536 | 538 |
| 537 } // namespace internal | 539 } // namespace internal |
| 538 } // namespace mojo | 540 } // namespace mojo |
| OLD | NEW |