Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(174)

Side by Side Diff: mojo/system/channel.cc

Issue 240133005: Mojo: Make some attempts towards fixing remote message pipe closure. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 6 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2013 The Chromium Authors. All rights reserved. 1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "mojo/system/channel.h" 5 #include "mojo/system/channel.h"
6 6
7 #include "base/basictypes.h" 7 #include "base/basictypes.h"
8 #include "base/bind.h" 8 #include "base/bind.h"
9 #include "base/compiler_specific.h" 9 #include "base/compiler_specific.h"
10 #include "base/logging.h" 10 #include "base/logging.h"
11 #include "base/strings/stringprintf.h" 11 #include "base/strings/stringprintf.h"
12 #include "mojo/system/message_pipe_endpoint.h" 12 #include "mojo/system/message_pipe_endpoint.h"
13 13
14 namespace mojo { 14 namespace mojo {
15 namespace system { 15 namespace system {
16 16
17 COMPILE_ASSERT(Channel::kBootstrapEndpointId != 17 COMPILE_ASSERT(Channel::kBootstrapEndpointId !=
18 MessageInTransit::kInvalidEndpointId, 18 MessageInTransit::kInvalidEndpointId,
19 kBootstrapEndpointId_is_invalid); 19 kBootstrapEndpointId_is_invalid);
20 20
21 STATIC_CONST_MEMBER_DEFINITION const MessageInTransit::EndpointId 21 STATIC_CONST_MEMBER_DEFINITION const MessageInTransit::EndpointId
22 Channel::kBootstrapEndpointId; 22 Channel::kBootstrapEndpointId;
23 23
24 Channel::EndpointInfo::EndpointInfo() { 24 Channel::EndpointInfo::EndpointInfo()
25 : state(STATE_NORMAL),
26 port() {
25 } 27 }
26 28
27 Channel::EndpointInfo::EndpointInfo(scoped_refptr<MessagePipe> message_pipe, 29 Channel::EndpointInfo::EndpointInfo(scoped_refptr<MessagePipe> message_pipe,
28 unsigned port) 30 unsigned port)
29 : message_pipe(message_pipe), 31 : state(STATE_NORMAL),
32 message_pipe(message_pipe),
30 port(port) { 33 port(port) {
31 } 34 }
32 35
33 Channel::EndpointInfo::~EndpointInfo() { 36 Channel::EndpointInfo::~EndpointInfo() {
34 } 37 }
35 38
36 Channel::Channel() 39 Channel::Channel()
37 : next_local_id_(kBootstrapEndpointId) { 40 : next_local_id_(kBootstrapEndpointId) {
38 } 41 }
39 42
(...skipping 15 matching lines...) Expand all
55 } 58 }
56 59
57 void Channel::Shutdown() { 60 void Channel::Shutdown() {
58 DCHECK(creation_thread_checker_.CalledOnValidThread()); 61 DCHECK(creation_thread_checker_.CalledOnValidThread());
59 62
60 base::AutoLock locker(lock_); 63 base::AutoLock locker(lock_);
61 DCHECK(raw_channel_.get()); 64 DCHECK(raw_channel_.get());
62 raw_channel_->Shutdown(); 65 raw_channel_->Shutdown();
63 raw_channel_.reset(); 66 raw_channel_.reset();
64 67
65 // This should not occur, but it probably mostly results in leaking; 68 // This shouldn't usually occur, but it should be okay if all the endpoints
66 // (Explicitly clearing the |local_id_to_endpoint_info_map_| would likely put 69 // are zombies (i.e., waiting to be removed, and not actually having any
67 // things in an inconsistent state, which is worse. Note that if the map is 70 // references to |MessagePipe|s).
68 // nonempty, we probably won't be destroyed, since the endpoints have a 71 // TODO(vtl): To make this actually okay, we need to make sure the other side
69 // reference to us.) 72 // channels being killed off properly.
70 LOG_IF(ERROR, !local_id_to_endpoint_info_map_.empty()) 73 LOG_IF(WARNING, !local_id_to_endpoint_info_map_.empty())
71 << "Channel shutting down with endpoints still attached"; 74 << "Channel shutting down with endpoints still attached "
72 // TODO(vtl): This currently blows up, but the fix will be nontrivial. 75 "(hopefully all zombies)";
darin (slow to review) 2014/04/16 21:33:43 nit: indentation
viettrungluu 2014/04/16 22:25:01 Done.
73 // crbug.com/360081 76
74 //DCHECK(local_id_to_endpoint_info_map_.empty()); 77 #ifndef NDEBUG
78 // Check that everything left is a zombie. (Note: We don't explicitly clear
79 // |local_id_to_endpoint_info_map_|, since that would likely put us in an
80 // inconsistent state if we have non-zombies.)
81 for (IdToEndpointInfoMap::const_iterator it =
82 local_id_to_endpoint_info_map_.begin();
83 it != local_id_to_endpoint_info_map_.end();
84 ++it) {
85 DCHECK_NE(it->second.state, EndpointInfo::STATE_NORMAL);
86 DCHECK(!it->second.message_pipe.get());
87 }
88 #endif
75 } 89 }
76 90
77 MessageInTransit::EndpointId Channel::AttachMessagePipeEndpoint( 91 MessageInTransit::EndpointId Channel::AttachMessagePipeEndpoint(
78 scoped_refptr<MessagePipe> message_pipe, unsigned port) { 92 scoped_refptr<MessagePipe> message_pipe,
93 unsigned port) {
94 DCHECK(message_pipe);
79 DCHECK(port == 0 || port == 1); 95 DCHECK(port == 0 || port == 1);
80 // Note: This assertion must *not* be done under |lock_|.
81 DCHECK_EQ(message_pipe->GetType(port), MessagePipeEndpoint::kTypeProxy);
82 96
83 MessageInTransit::EndpointId local_id; 97 MessageInTransit::EndpointId local_id;
84 { 98 {
85 base::AutoLock locker(lock_); 99 base::AutoLock locker(lock_);
86 100
87 while (next_local_id_ == MessageInTransit::kInvalidEndpointId || 101 while (next_local_id_ == MessageInTransit::kInvalidEndpointId ||
88 local_id_to_endpoint_info_map_.find(next_local_id_) != 102 local_id_to_endpoint_info_map_.find(next_local_id_) !=
89 local_id_to_endpoint_info_map_.end()) 103 local_id_to_endpoint_info_map_.end())
90 next_local_id_++; 104 next_local_id_++;
91 105
92 local_id = next_local_id_; 106 local_id = next_local_id_;
93 next_local_id_++; 107 next_local_id_++;
94 108
95 // TODO(vtl): Use emplace when we move to C++11 unordered_maps. (It'll avoid 109 // TODO(vtl): Use emplace when we move to C++11 unordered_maps. (It'll avoid
96 // some expensive reference count increment/decrements.) Once this is done, 110 // some expensive reference count increment/decrements.) Once this is done,
97 // we should be able to delete |EndpointInfo|'s default constructor. 111 // we should be able to delete |EndpointInfo|'s default constructor.
98 local_id_to_endpoint_info_map_[local_id] = EndpointInfo(message_pipe, port); 112 local_id_to_endpoint_info_map_[local_id] = EndpointInfo(message_pipe, port);
99 } 113 }
100 114
101 message_pipe->Attach(port, scoped_refptr<Channel>(this), local_id); 115 // This might fail if that port got an |OnPeerClose()| before attaching.
102 return local_id; 116 if (message_pipe->Attach(port, scoped_refptr<Channel>(this), local_id))
117 return local_id;
118
119 // Note: If it failed, quite possibly the endpoint info was removed from that
120 // map (there's a race between us adding it to the map above and calling
121 // |Attach()|). And even if an entry exists for |local_id|, we need to check
122 // that it's the one we added (and not some other one that was added since).
123 {
124 base::AutoLock locker(lock_);
125 IdToEndpointInfoMap::iterator it =
126 local_id_to_endpoint_info_map_.find(local_id);
127 if (it != local_id_to_endpoint_info_map_.end() &&
128 it->second.message_pipe.get() == message_pipe.get() &&
129 it->second.port == port) {
130 DCHECK_EQ(it->second.state, EndpointInfo::STATE_NORMAL);
131 // TODO(vtl): FIXME -- This is wrong. We need to specify (to
132 // |AttachMessagePipeEndpoint()| who's going to be responsible for calling
133 // |RunMessagePipeEndpoint()| ("us", or the remote by sending us a
134 // |kSubtypeChannelRunMessagePipeEndpoint|). If the remote is going to
135 // run, then we'll get messages to an "invalid" local ID (for running, for
136 // removal).
137 local_id_to_endpoint_info_map_.erase(it);
138 }
139 }
140 return MessageInTransit::kInvalidEndpointId;
103 } 141 }
104 142
105 bool Channel::RunMessagePipeEndpoint(MessageInTransit::EndpointId local_id, 143 bool Channel::RunMessagePipeEndpoint(MessageInTransit::EndpointId local_id,
106 MessageInTransit::EndpointId remote_id) { 144 MessageInTransit::EndpointId remote_id) {
107 EndpointInfo endpoint_info; 145 EndpointInfo endpoint_info;
108 { 146 {
109 base::AutoLock locker(lock_); 147 base::AutoLock locker(lock_);
110 148
111 IdToEndpointInfoMap::const_iterator it = 149 IdToEndpointInfoMap::const_iterator it =
112 local_id_to_endpoint_info_map_.find(local_id); 150 local_id_to_endpoint_info_map_.find(local_id);
113 if (it == local_id_to_endpoint_info_map_.end()) 151 if (it == local_id_to_endpoint_info_map_.end())
114 return false; 152 return false;
115 endpoint_info = it->second; 153 endpoint_info = it->second;
116 } 154 }
117 155
156 // Assume that this was in response to |kSubtypeChannelRunMessagePipeEndpoint|
157 // and ignore it.
158 if (endpoint_info.state != EndpointInfo::STATE_NORMAL) {
159 DVLOG(2) << "Ignoring run message pipe endpoint for zombie endpoint "
160 "(local ID " << local_id << ", remote ID " << remote_id << ")";
161 return true;
162 }
163
118 // TODO(vtl): FIXME -- We need to handle the case that message pipe is already 164 // TODO(vtl): FIXME -- We need to handle the case that message pipe is already
119 // running when we're here due to |kSubtypeChannelRunMessagePipeEndpoint|). 165 // running when we're here due to |kSubtypeChannelRunMessagePipeEndpoint|).
120 endpoint_info.message_pipe->Run(endpoint_info.port, remote_id); 166 endpoint_info.message_pipe->Run(endpoint_info.port, remote_id);
121 return true; 167 return true;
122 } 168 }
123 169
124 void Channel::RunRemoteMessagePipeEndpoint( 170 void Channel::RunRemoteMessagePipeEndpoint(
125 MessageInTransit::EndpointId local_id, 171 MessageInTransit::EndpointId local_id,
126 MessageInTransit::EndpointId remote_id) { 172 MessageInTransit::EndpointId remote_id) {
127 base::AutoLock locker(lock_); 173 base::AutoLock locker(lock_);
128
129 DCHECK(local_id_to_endpoint_info_map_.find(local_id) != 174 DCHECK(local_id_to_endpoint_info_map_.find(local_id) !=
130 local_id_to_endpoint_info_map_.end()); 175 local_id_to_endpoint_info_map_.end());
131 176 if (!SendControlMessage(
132 scoped_ptr<MessageInTransit> message(new MessageInTransit( 177 MessageInTransit::kSubtypeChannelRunMessagePipeEndpoint,
133 MessageInTransit::kTypeChannel, 178 local_id, remote_id)) {
134 MessageInTransit::kSubtypeChannelRunMessagePipeEndpoint, 179 HandleLocalError(base::StringPrintf(
135 0, 0, NULL)); 180 "Failed to send message to run remote message pipe endpoint (local ID "
136 message->set_source_id(local_id); 181 "%u, remote ID %u)",
137 message->set_destination_id(remote_id); 182 static_cast<unsigned>(local_id), static_cast<unsigned>(remote_id)));
138 if (!raw_channel_->WriteMessage(message.Pass())) {
139 // TODO(vtl): FIXME -- I guess we should report the error back somehow so
140 // that the dispatcher can be closed?
141 CHECK(false) << "Not yet handled";
142 } 183 }
143 } 184 }
144 185
145 bool Channel::WriteMessage(scoped_ptr<MessageInTransit> message) { 186 bool Channel::WriteMessage(scoped_ptr<MessageInTransit> message) {
146 base::AutoLock locker(lock_); 187 base::AutoLock locker(lock_);
147 if (!raw_channel_.get()) { 188 if (!raw_channel_.get()) {
148 // TODO(vtl): I think this is probably not an error condition, but I should 189 // TODO(vtl): I think this is probably not an error condition, but I should
149 // think about it (and the shutdown sequence) more carefully. 190 // think about it (and the shutdown sequence) more carefully.
150 LOG(WARNING) << "WriteMessage() after shutdown"; 191 LOG(WARNING) << "WriteMessage() after shutdown";
151 return false; 192 return false;
152 } 193 }
153 194
154 return raw_channel_->WriteMessage(message.Pass()); 195 return raw_channel_->WriteMessage(message.Pass());
155 } 196 }
156 197
157 bool Channel::IsWriteBufferEmpty() { 198 bool Channel::IsWriteBufferEmpty() {
158 base::AutoLock locker(lock_); 199 base::AutoLock locker(lock_);
159 DCHECK(raw_channel_.get()); 200 DCHECK(raw_channel_.get());
160 return raw_channel_->IsWriteBufferEmpty(); 201 return raw_channel_->IsWriteBufferEmpty();
161 } 202 }
162 203
163 void Channel::DetachMessagePipeEndpoint(MessageInTransit::EndpointId local_id) { 204 void Channel::DetachMessagePipeEndpoint(
205 MessageInTransit::EndpointId local_id,
206 MessageInTransit::EndpointId remote_id) {
164 DCHECK_NE(local_id, MessageInTransit::kInvalidEndpointId); 207 DCHECK_NE(local_id, MessageInTransit::kInvalidEndpointId);
165 208
166 base::AutoLock locker_(lock_); 209 bool should_send_remove_message = false;
167 local_id_to_endpoint_info_map_.erase(local_id); 210 {
211 base::AutoLock locker_(lock_);
212 IdToEndpointInfoMap::iterator it =
213 local_id_to_endpoint_info_map_.find(local_id);
214 DCHECK(it != local_id_to_endpoint_info_map_.end());
215
216 switch (it->second.state) {
217 case EndpointInfo::STATE_NORMAL:
218 it->second.state = EndpointInfo::STATE_WAIT_REMOTE_REMOVE_ACK;
219 it->second.message_pipe = NULL;
220 should_send_remove_message =
221 (remote_id != MessageInTransit::kInvalidEndpointId);
222 break;
223 case EndpointInfo::STATE_WAIT_LOCAL_DETACH:
224 local_id_to_endpoint_info_map_.erase(it);
225 break;
226 case EndpointInfo::STATE_WAIT_REMOTE_REMOVE_ACK:
227 NOTREACHED();
228 break;
229 case EndpointInfo::STATE_WAIT_LOCAL_DETACH_AND_REMOTE_REMOVE_ACK:
230 it->second.state = EndpointInfo::STATE_WAIT_REMOTE_REMOVE_ACK;
231 break;
232 }
233 }
234 if (!should_send_remove_message)
235 return;
236
237 if (!SendControlMessage(
238 MessageInTransit::kSubtypeChannelRemoveMessagePipeEndpoint,
239 local_id, remote_id)) {
240 HandleLocalError(base::StringPrintf(
241 "Failed to send message to remove remote message pipe endpoint (local "
242 "ID %u, remote ID %u)",
243 static_cast<unsigned>(local_id), static_cast<unsigned>(remote_id)));
244 }
168 } 245 }
169 246
170 Channel::~Channel() { 247 Channel::~Channel() {
171 // The channel should have been shut down first. 248 // The channel should have been shut down first.
172 DCHECK(!raw_channel_.get()); 249 DCHECK(!raw_channel_.get());
173
174 DLOG_IF(WARNING, !local_id_to_endpoint_info_map_.empty())
175 << "Destroying Channel with " << local_id_to_endpoint_info_map_.size()
176 << " endpoints still present";
177 } 250 }
178 251
179 void Channel::OnReadMessage(const MessageInTransit::View& message_view) { 252 void Channel::OnReadMessage(const MessageInTransit::View& message_view) {
180 // Note: |ValidateReadMessage()| will call |HandleRemoteError()| if necessary. 253 // Note: |ValidateReadMessage()| will call |HandleRemoteError()| if necessary.
181 if (!ValidateReadMessage(message_view)) 254 if (!ValidateReadMessage(message_view))
182 return; 255 return;
183 256
184 switch (message_view.type()) { 257 switch (message_view.type()) {
185 case MessageInTransit::kTypeMessagePipeEndpoint: 258 case MessageInTransit::kTypeMessagePipeEndpoint:
186 case MessageInTransit::kTypeMessagePipe: 259 case MessageInTransit::kTypeMessagePipe:
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
242 // This is strongly indicative of some problem. However, it's not a fatal 315 // This is strongly indicative of some problem. However, it's not a fatal
243 // error, since it may indicate a bug (or hostile) remote process. Don't 316 // error, since it may indicate a bug (or hostile) remote process. Don't
244 // die even for Debug builds, since handling this properly needs to be 317 // die even for Debug builds, since handling this properly needs to be
245 // tested (TODO(vtl)). 318 // tested (TODO(vtl)).
246 DLOG(ERROR) << "This should not happen under normal operation."; 319 DLOG(ERROR) << "This should not happen under normal operation.";
247 return; 320 return;
248 } 321 }
249 endpoint_info = it->second; 322 endpoint_info = it->second;
250 } 323 }
251 324
325 // Ignore messages for zombie endpoints (not an error).
326 if (endpoint_info.state != EndpointInfo::STATE_NORMAL) {
327 DVLOG(2) << "Ignoring downstream message for zombie endpoint (local ID = "
328 << local_id << ", remote ID = " << message_view.source_id() << ")";
329 return;
330 }
331
252 // We need to duplicate the message, because |EnqueueMessage()| will take 332 // We need to duplicate the message, because |EnqueueMessage()| will take
253 // ownership of it. 333 // ownership of it.
254 scoped_ptr<MessageInTransit> message(new MessageInTransit(message_view)); 334 scoped_ptr<MessageInTransit> message(new MessageInTransit(message_view));
255 message->DeserializeDispatchers(this); 335 message->DeserializeDispatchers(this);
256 MojoResult result = endpoint_info.message_pipe->EnqueueMessage( 336 MojoResult result = endpoint_info.message_pipe->EnqueueMessage(
257 MessagePipe::GetPeerPort(endpoint_info.port), message.Pass(), NULL); 337 MessagePipe::GetPeerPort(endpoint_info.port), message.Pass(), NULL);
258 if (result != MOJO_RESULT_OK) { 338 if (result != MOJO_RESULT_OK) {
259 // TODO(vtl): This might be a "non-error", e.g., if the destination endpoint 339 // TODO(vtl): This might be a "non-error", e.g., if the destination endpoint
260 // has been closed (in an unavoidable race). This might also be a "remote" 340 // has been closed (in an unavoidable race). This might also be a "remote"
261 // error, e.g., if the remote side is sending invalid control messages (to 341 // error, e.g., if the remote side is sending invalid control messages (to
262 // the message pipe). 342 // the message pipe).
263 HandleLocalError(base::StringPrintf( 343 HandleLocalError(base::StringPrintf(
264 "Failed to enqueue message to local destination ID %u (result %d)", 344 "Failed to enqueue message to local ID %u (result %d)",
265 static_cast<unsigned>(local_id), static_cast<int>(result))); 345 static_cast<unsigned>(local_id), static_cast<int>(result)));
266 return; 346 return;
267 } 347 }
268 } 348 }
269 349
270 void Channel::OnReadMessageForChannel( 350 void Channel::OnReadMessageForChannel(
271 const MessageInTransit::View& message_view) { 351 const MessageInTransit::View& message_view) {
272 DCHECK_EQ(message_view.type(), MessageInTransit::kTypeChannel); 352 DCHECK_EQ(message_view.type(), MessageInTransit::kTypeChannel);
273 353
274 switch (message_view.subtype()) { 354 switch (message_view.subtype()) {
275 case MessageInTransit::kSubtypeChannelRunMessagePipeEndpoint: 355 case MessageInTransit::kSubtypeChannelRunMessagePipeEndpoint:
276 // TODO(vtl): FIXME -- Error handling (also validation of 356 DVLOG(2) << "Handling channel message to run message pipe (local ID "
277 // source/destination IDs). 357 << message_view.destination_id() << ", remote ID "
278 DVLOG(2) << "Handling channel message to run message pipe (local ID = "
279 << message_view.destination_id() << ", remote ID = "
280 << message_view.source_id() << ")"; 358 << message_view.source_id() << ")";
281 if (!RunMessagePipeEndpoint(message_view.destination_id(), 359 if (!RunMessagePipeEndpoint(message_view.destination_id(),
282 message_view.source_id())) 360 message_view.source_id())) {
283 HandleRemoteError("Received invalid channel run message pipe message"); 361 HandleRemoteError(
362 "Received invalid channel message to run message pipe");
363 }
364 break;
365 case MessageInTransit::kSubtypeChannelRemoveMessagePipeEndpoint:
366 DVLOG(2) << "Handling channel message to remove message pipe (local ID "
367 << message_view.destination_id() << ", remote ID "
368 << message_view.source_id() << ")";
369 if (!RemoveMessagePipeEndpoint(message_view.destination_id(),
370 message_view.source_id())) {
371 HandleRemoteError(
372 "Received invalid channel message to remove message pipe");
373 }
374 break;
375 case MessageInTransit::kSubtypeChannelRemoveMessagePipeEndpointAck:
376 DVLOG(2) << "Handling channel message to ack remove message pipe (local "
377 "ID "
378 << message_view.destination_id() << ", remote ID "
379 << message_view.source_id() << ")";
380 if (!RemoveMessagePipeEndpoint(message_view.destination_id(),
381 message_view.source_id())) {
382 HandleRemoteError(
383 "Received invalid channel message to ack remove message pipe");
384 }
284 break; 385 break;
285 default: 386 default:
286 HandleRemoteError("Received invalid channel message"); 387 HandleRemoteError("Received invalid channel message");
287 NOTREACHED(); 388 NOTREACHED();
288 break; 389 break;
289 } 390 }
290 } 391 }
291 392
393 bool Channel::RemoveMessagePipeEndpoint(
394 MessageInTransit::EndpointId local_id,
395 MessageInTransit::EndpointId remote_id) {
396 EndpointInfo endpoint_info;
397 {
398 base::AutoLock locker(lock_);
399
400 IdToEndpointInfoMap::iterator it =
401 local_id_to_endpoint_info_map_.find(local_id);
402 if (it == local_id_to_endpoint_info_map_.end()) {
403 DVLOG(2) << "Remove message pipe error: not found";
404 return false;
405 }
406
407 // If it's waiting for the remove ack, just do it and return.
408 if (it->second.state == EndpointInfo::STATE_WAIT_REMOTE_REMOVE_ACK) {
409 local_id_to_endpoint_info_map_.erase(it);
410 return true;
411 }
412
413 if (it->second.state != EndpointInfo::STATE_NORMAL) {
414 DVLOG(2) << "Remove message pipe error: wrong state";
415 return false;
416 }
417
418 it->second.state = EndpointInfo::STATE_WAIT_LOCAL_DETACH;
419 endpoint_info = it->second;
420 it->second.message_pipe = NULL;
421 }
422
423 if (!SendControlMessage(
424 MessageInTransit::kSubtypeChannelRemoveMessagePipeEndpointAck,
425 local_id, remote_id)) {
426 HandleLocalError(base::StringPrintf(
427 "Failed to send message to remove remote message pipe endpoint ack "
428 "(local ID %u, remote ID %u)",
429 static_cast<unsigned>(local_id), static_cast<unsigned>(remote_id)));
430 }
431
432 endpoint_info.message_pipe->OnRemove(endpoint_info.port);
433
434 return true;
435 }
436
437 bool Channel::SendControlMessage(MessageInTransit::Subtype subtype,
438 MessageInTransit::EndpointId local_id,
439 MessageInTransit::EndpointId remote_id) {
440 DVLOG(2) << "Sending channel control message: subtype " << subtype
441 << ", local ID " << local_id << ", remote ID " << remote_id;
442 scoped_ptr<MessageInTransit> message(new MessageInTransit(
443 MessageInTransit::kTypeChannel, subtype, 0, 0, NULL));
444 message->set_source_id(local_id);
445 message->set_destination_id(remote_id);
446 return raw_channel_->WriteMessage(message.Pass());
447 }
448
292 void Channel::HandleRemoteError(const base::StringPiece& error_message) { 449 void Channel::HandleRemoteError(const base::StringPiece& error_message) {
293 // TODO(vtl): Is this how we really want to handle this? Probably we want to 450 // TODO(vtl): Is this how we really want to handle this? Probably we want to
294 // terminate the connection, since it's spewing invalid stuff. 451 // terminate the connection, since it's spewing invalid stuff.
295 LOG(WARNING) << error_message; 452 LOG(WARNING) << error_message;
296 } 453 }
297 454
298 void Channel::HandleLocalError(const base::StringPiece& error_message) { 455 void Channel::HandleLocalError(const base::StringPiece& error_message) {
299 // TODO(vtl): Is this how we really want to handle this? 456 // TODO(vtl): Is this how we really want to handle this?
457 // Sometimes we'll want to propagate the error back to the message pipe
458 // (endpoint), and notify it that the remote is (effectively) closed.
459 // Sometimes we'll want to kill the channel (and notify all the endpoints that
460 // their remotes are dead.
300 LOG(WARNING) << error_message; 461 LOG(WARNING) << error_message;
301 } 462 }
302 463
303 } // namespace system 464 } // namespace system
304 } // namespace mojo 465 } // namespace mojo
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698