| Index: mojo/system/proxy_message_pipe_endpoint.cc
|
| diff --git a/mojo/system/proxy_message_pipe_endpoint.cc b/mojo/system/proxy_message_pipe_endpoint.cc
|
| index f9b9e56e3da2d6da127a3a0a8d2f5c1a48d7a223..91a77602fe6f1c243895ef9bc55c59d0b1340109 100644
|
| --- a/mojo/system/proxy_message_pipe_endpoint.cc
|
| +++ b/mojo/system/proxy_message_pipe_endpoint.cc
|
| @@ -51,10 +51,14 @@ bool ProxyMessagePipeEndpoint::OnPeerClose() {
|
| DCHECK(is_peer_open_);
|
|
|
| is_peer_open_ = false;
|
| - if (EnqueueMessage(MessageInTransit::Create(
|
| - MessageInTransit::kTypeMessagePipe,
|
| - MessageInTransit::kSubtypeMessagePipePeerClosed,
|
| - NULL, 0), NULL) != MOJO_RESULT_OK) {
|
| + MessageInTransit* message =
|
| + MessageInTransit::Create(MessageInTransit::kTypeMessagePipe,
|
| + MessageInTransit::kSubtypeMessagePipePeerClosed,
|
| + NULL, 0);
|
| + if (CanEnqueueMessage(message, NULL) == MOJO_RESULT_OK) {
|
| + EnqueueMessage(message, NULL);
|
| + } else {
|
| + message->Destroy();
|
| // TODO(vtl): Do something more sensible on error here?
|
| LOG(WARNING) << "Failed to send peer closed control message";
|
| }
|
| @@ -65,34 +69,39 @@ bool ProxyMessagePipeEndpoint::OnPeerClose() {
|
| return !paused_message_queue_.empty();
|
| }
|
|
|
| -MojoResult ProxyMessagePipeEndpoint::EnqueueMessage(
|
| - MessageInTransit* message,
|
| +MojoResult ProxyMessagePipeEndpoint::CanEnqueueMessage(
|
| + const MessageInTransit* /*message*/,
|
| const std::vector<Dispatcher*>* dispatchers) {
|
| - DCHECK(is_open_);
|
| - // If our (local) peer isn't open, we should only be enqueueing our own
|
| - // control messages.
|
| - DCHECK(is_peer_open_ ||
|
| - (message->type() == MessageInTransit::kTypeMessagePipe));
|
| -
|
| // TODO(vtl): Support sending handles over OS pipes.
|
| if (dispatchers) {
|
| - message->Destroy();
|
| NOTIMPLEMENTED();
|
| return MOJO_RESULT_UNIMPLEMENTED;
|
| }
|
| + return MOJO_RESULT_OK;
|
| +}
|
|
|
| - MojoResult rv = MOJO_RESULT_OK;
|
| +void ProxyMessagePipeEndpoint::EnqueueMessage(
|
| + MessageInTransit* message,
|
| + std::vector<scoped_refptr<Dispatcher> >* dispatchers) {
|
| + DCHECK(is_open_);
|
| + // If our (local) peer isn't open, we should only be enqueueing our own
|
| + // control messages.
|
| + DCHECK(is_peer_open_ ||
|
| + (message->type() == MessageInTransit::kTypeMessagePipe));
|
| +
|
| + // TODO(vtl)
|
| + DCHECK(!dispatchers || dispatchers->empty());
|
|
|
| if (is_running()) {
|
| message->set_source_id(local_id_);
|
| message->set_destination_id(remote_id_);
|
| + // TODO(vtl): Figure out error handling here (where it's rather late) --
|
| + // maybe move whatever checks we can into |CanEnqueueMessage()|.
|
| if (!channel_->WriteMessage(message))
|
| - rv = MOJO_RESULT_FAILED_PRECONDITION;
|
| + LOG(WARNING) << "Failed to write message to channel";
|
| } else {
|
| paused_message_queue_.push_back(message);
|
| }
|
| -
|
| - return rv;
|
| }
|
|
|
| void ProxyMessagePipeEndpoint::Attach(scoped_refptr<Channel> channel,
|
| @@ -120,15 +129,17 @@ bool ProxyMessagePipeEndpoint::Run(MessageInTransit::EndpointId remote_id) {
|
| remote_id_ = remote_id;
|
| AssertConsistentState();
|
|
|
| - MojoResult result = MOJO_RESULT_OK;
|
| for (std::deque<MessageInTransit*>::iterator it =
|
| paused_message_queue_.begin();
|
| it != paused_message_queue_.end();
|
| ++it) {
|
| - result = EnqueueMessage(*it, NULL);
|
| - if (result != MOJO_RESULT_OK) {
|
| + if (CanEnqueueMessage(*it, NULL) == MOJO_RESULT_OK) {
|
| + EnqueueMessage(*it, NULL);
|
| + } else {
|
| + (*it)->Destroy();
|
| // TODO(vtl): Do something more sensible on error here?
|
| LOG(WARNING) << "Failed to send message";
|
| + // TODO(vtl): Abort?
|
| }
|
| }
|
| paused_message_queue_.clear();
|
|
|