Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1237)

Side by Side Diff: mojo/edk/system/channel.cc

Issue 814543006: Move //mojo/{public, edk} underneath //third_party (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Rebase Created 5 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « mojo/edk/system/channel.h ('k') | mojo/edk/system/channel_endpoint.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "mojo/edk/system/channel.h"
6
7 #include <algorithm>
8
9 #include "base/bind.h"
10 #include "base/logging.h"
11 #include "base/macros.h"
12 #include "base/strings/stringprintf.h"
13 #include "mojo/edk/embedder/platform_handle_vector.h"
14 #include "mojo/edk/system/endpoint_relayer.h"
15 #include "mojo/edk/system/transport_data.h"
16
17 namespace mojo {
18 namespace system {
19
20 namespace {
21
22 struct SerializedEndpoint {
23 // This is the endpoint ID on the receiving side, and should be a "remote ID".
24 // (The receiving side should already have had an endpoint attached and been
25 // run via the |Channel|s. This endpoint will have both IDs assigned, so this
26 // ID is only needed to associate that endpoint with a particular dispatcher.)
27 ChannelEndpointId receiver_endpoint_id;
28 };
29
30 } // namespace
31
32 Channel::Channel(embedder::PlatformSupport* platform_support)
33 : platform_support_(platform_support),
34 is_running_(false),
35 is_shutting_down_(false),
36 channel_manager_(nullptr) {
37 }
38
39 void Channel::Init(scoped_ptr<RawChannel> raw_channel) {
40 DCHECK(creation_thread_checker_.CalledOnValidThread());
41 DCHECK(raw_channel);
42
43 // No need to take |lock_|, since this must be called before this object
44 // becomes thread-safe.
45 DCHECK(!is_running_);
46 raw_channel_ = raw_channel.Pass();
47 raw_channel_->Init(this);
48 is_running_ = true;
49 }
50
51 void Channel::SetChannelManager(ChannelManager* channel_manager) {
52 DCHECK(channel_manager);
53
54 base::AutoLock locker(lock_);
55 DCHECK(!is_shutting_down_);
56 DCHECK(!channel_manager_);
57 channel_manager_ = channel_manager;
58 }
59
60 void Channel::Shutdown() {
61 DCHECK(creation_thread_checker_.CalledOnValidThread());
62
63 IdToEndpointMap to_destroy;
64 {
65 base::AutoLock locker(lock_);
66 if (!is_running_)
67 return;
68
69 // Note: Don't reset |raw_channel_|, in case we're being called from within
70 // |OnReadMessage()| or |OnError()|.
71 raw_channel_->Shutdown();
72 is_running_ = false;
73
74 // We need to deal with it outside the lock.
75 std::swap(to_destroy, local_id_to_endpoint_map_);
76 }
77
78 size_t num_live = 0;
79 size_t num_zombies = 0;
80 for (IdToEndpointMap::iterator it = to_destroy.begin();
81 it != to_destroy.end(); ++it) {
82 if (it->second) {
83 num_live++;
84 it->second->DetachFromChannel();
85 } else {
86 num_zombies++;
87 }
88 }
89 DVLOG_IF(2, num_live || num_zombies) << "Shut down Channel with " << num_live
90 << " live endpoints and " << num_zombies
91 << " zombies";
92 }
93
94 void Channel::WillShutdownSoon() {
95 base::AutoLock locker(lock_);
96 is_shutting_down_ = true;
97 channel_manager_ = nullptr;
98 }
99
100 void Channel::SetBootstrapEndpoint(scoped_refptr<ChannelEndpoint> endpoint) {
101 DCHECK(endpoint);
102
103 // Used for both local and remote IDs.
104 ChannelEndpointId bootstrap_id = ChannelEndpointId::GetBootstrap();
105
106 {
107 base::AutoLock locker(lock_);
108
109 DLOG_IF(WARNING, is_shutting_down_)
110 << "SetBootstrapEndpoint() while shutting down";
111
112 // Bootstrap endpoint should be the first.
113 DCHECK(local_id_to_endpoint_map_.empty());
114
115 local_id_to_endpoint_map_[bootstrap_id] = endpoint;
116 }
117
118 endpoint->AttachAndRun(this, bootstrap_id, bootstrap_id);
119 }
120
121 bool Channel::WriteMessage(scoped_ptr<MessageInTransit> message) {
122 base::AutoLock locker(lock_);
123 if (!is_running_) {
124 // TODO(vtl): I think this is probably not an error condition, but I should
125 // think about it (and the shutdown sequence) more carefully.
126 LOG(WARNING) << "WriteMessage() after shutdown";
127 return false;
128 }
129
130 DLOG_IF(WARNING, is_shutting_down_) << "WriteMessage() while shutting down";
131 return raw_channel_->WriteMessage(message.Pass());
132 }
133
134 bool Channel::IsWriteBufferEmpty() {
135 base::AutoLock locker(lock_);
136 if (!is_running_)
137 return true;
138 return raw_channel_->IsWriteBufferEmpty();
139 }
140
141 void Channel::DetachEndpoint(ChannelEndpoint* endpoint,
142 ChannelEndpointId local_id,
143 ChannelEndpointId remote_id) {
144 DCHECK(endpoint);
145 DCHECK(local_id.is_valid());
146
147 if (!remote_id.is_valid())
148 return; // Nothing to do.
149
150 {
151 base::AutoLock locker_(lock_);
152 if (!is_running_)
153 return;
154
155 IdToEndpointMap::iterator it = local_id_to_endpoint_map_.find(local_id);
156 // We detach immediately if we receive a remove message, so it's possible
157 // that the local ID is no longer in |local_id_to_endpoint_map_|, or even
158 // that it's since been reused for another endpoint. In both cases, there's
159 // nothing more to do.
160 if (it == local_id_to_endpoint_map_.end() || it->second.get() != endpoint)
161 return;
162
163 DCHECK(it->second);
164 it->second = nullptr;
165
166 // Send a remove message outside the lock.
167 }
168
169 if (!SendControlMessage(MessageInTransit::kSubtypeChannelRemoveEndpoint,
170 local_id, remote_id)) {
171 HandleLocalError(base::StringPrintf(
172 "Failed to send message to remove remote endpoint (local ID %u, remote "
173 "ID %u)",
174 static_cast<unsigned>(local_id.value()),
175 static_cast<unsigned>(remote_id.value())));
176 }
177 }
178
179 size_t Channel::GetSerializedEndpointSize() const {
180 return sizeof(SerializedEndpoint);
181 }
182
183 void Channel::SerializeEndpointWithClosedPeer(
184 void* destination,
185 MessageInTransitQueue* message_queue) {
186 // We can actually just pass no client to |SerializeEndpointWithLocalPeer()|.
187 SerializeEndpointWithLocalPeer(destination, message_queue, nullptr, 0);
188 }
189
190 scoped_refptr<ChannelEndpoint> Channel::SerializeEndpointWithLocalPeer(
191 void* destination,
192 MessageInTransitQueue* message_queue,
193 ChannelEndpointClient* endpoint_client,
194 unsigned endpoint_client_port) {
195 DCHECK(destination);
196 // Allow |endpoint_client| to be null, for use by
197 // |SerializeEndpointWithClosedPeer()|.
198
199 scoped_refptr<ChannelEndpoint> endpoint(new ChannelEndpoint(
200 endpoint_client, endpoint_client_port, message_queue));
201
202 SerializedEndpoint* s = static_cast<SerializedEndpoint*>(destination);
203 s->receiver_endpoint_id = AttachAndRunEndpoint(endpoint);
204 DVLOG(2) << "Serializing endpoint with local or closed peer (remote ID = "
205 << s->receiver_endpoint_id << ")";
206
207 return endpoint;
208 }
209
210 void Channel::SerializeEndpointWithRemotePeer(
211 void* destination,
212 MessageInTransitQueue* message_queue,
213 scoped_refptr<ChannelEndpoint> peer_endpoint) {
214 DCHECK(destination);
215 DCHECK(peer_endpoint);
216
217 DLOG(WARNING) << "Direct message pipe passing across multiple channels not "
218 "yet implemented; will proxy";
219 // Create and set up an |EndpointRelayer| to proxy.
220 // TODO(vtl): If we were to own/track the relayer directly (rather than owning
221 // it via its |ChannelEndpoint|s), then we might be able to make
222 // |ChannelEndpoint|'s |client_| pointer a raw pointer.
223 scoped_refptr<EndpointRelayer> relayer(new EndpointRelayer());
224 scoped_refptr<ChannelEndpoint> endpoint(
225 new ChannelEndpoint(relayer.get(), 0, message_queue));
226 relayer->Init(endpoint.get(), peer_endpoint.get());
227 peer_endpoint->ReplaceClient(relayer.get(), 1);
228
229 SerializedEndpoint* s = static_cast<SerializedEndpoint*>(destination);
230 s->receiver_endpoint_id = AttachAndRunEndpoint(endpoint);
231 DVLOG(2) << "Serializing endpoint with remote peer (remote ID = "
232 << s->receiver_endpoint_id << ")";
233 }
234
235 scoped_refptr<IncomingEndpoint> Channel::DeserializeEndpoint(
236 const void* source) {
237 const SerializedEndpoint* s = static_cast<const SerializedEndpoint*>(source);
238 ChannelEndpointId local_id = s->receiver_endpoint_id;
239 // No need to check the validity of |local_id| -- if it's not valid, it simply
240 // won't be in |incoming_endpoints_|.
241 DVLOG_IF(2, !local_id.is_valid() || !local_id.is_remote())
242 << "Attempt to get incoming endpoint for invalid ID " << local_id;
243
244 base::AutoLock locker(lock_);
245
246 auto it = incoming_endpoints_.find(local_id);
247 if (it == incoming_endpoints_.end()) {
248 LOG(ERROR) << "Failed to deserialize endpoint (ID = " << local_id << ")";
249 return nullptr;
250 }
251
252 DVLOG(2) << "Deserializing endpoint (new local ID = " << local_id << ")";
253
254 scoped_refptr<IncomingEndpoint> rv;
255 rv.swap(it->second);
256 incoming_endpoints_.erase(it);
257 return rv;
258 }
259
260 size_t Channel::GetSerializedPlatformHandleSize() const {
261 return raw_channel_->GetSerializedPlatformHandleSize();
262 }
263
264 Channel::~Channel() {
265 // The channel should have been shut down first.
266 DCHECK(!is_running_);
267 }
268
269 void Channel::OnReadMessage(
270 const MessageInTransit::View& message_view,
271 embedder::ScopedPlatformHandleVectorPtr platform_handles) {
272 DCHECK(creation_thread_checker_.CalledOnValidThread());
273
274 switch (message_view.type()) {
275 case MessageInTransit::kTypeEndpoint:
276 OnReadMessageForEndpoint(message_view, platform_handles.Pass());
277 break;
278 case MessageInTransit::kTypeChannel:
279 OnReadMessageForChannel(message_view, platform_handles.Pass());
280 break;
281 default:
282 HandleRemoteError(
283 base::StringPrintf("Received message of invalid type %u",
284 static_cast<unsigned>(message_view.type())));
285 break;
286 }
287 }
288
289 void Channel::OnError(Error error) {
290 DCHECK(creation_thread_checker_.CalledOnValidThread());
291
292 switch (error) {
293 case ERROR_READ_SHUTDOWN:
294 // The other side was cleanly closed, so this isn't actually an error.
295 DVLOG(1) << "RawChannel read error (shutdown)";
296 break;
297 case ERROR_READ_BROKEN: {
298 base::AutoLock locker(lock_);
299 LOG_IF(ERROR, !is_shutting_down_)
300 << "RawChannel read error (connection broken)";
301 break;
302 }
303 case ERROR_READ_BAD_MESSAGE:
304 // Receiving a bad message means either a bug, data corruption, or
305 // malicious attack (probably due to some other bug).
306 LOG(ERROR) << "RawChannel read error (received bad message)";
307 break;
308 case ERROR_READ_UNKNOWN:
309 LOG(ERROR) << "RawChannel read error (unknown)";
310 break;
311 case ERROR_WRITE:
312 // Write errors are slightly notable: they probably shouldn't happen under
313 // normal operation (but maybe the other side crashed).
314 LOG(WARNING) << "RawChannel write error";
315 break;
316 }
317 Shutdown();
318 }
319
320 void Channel::OnReadMessageForEndpoint(
321 const MessageInTransit::View& message_view,
322 embedder::ScopedPlatformHandleVectorPtr platform_handles) {
323 DCHECK(creation_thread_checker_.CalledOnValidThread());
324 DCHECK(message_view.type() == MessageInTransit::kTypeEndpoint);
325
326 ChannelEndpointId local_id = message_view.destination_id();
327 if (!local_id.is_valid()) {
328 HandleRemoteError("Received message with no destination ID");
329 return;
330 }
331
332 scoped_refptr<ChannelEndpoint> endpoint;
333 {
334 base::AutoLock locker(lock_);
335
336 // Since we own |raw_channel_|, and this method and |Shutdown()| should only
337 // be called from the creation thread, |raw_channel_| should never be null
338 // here.
339 DCHECK(is_running_);
340
341 IdToEndpointMap::const_iterator it =
342 local_id_to_endpoint_map_.find(local_id);
343 if (it != local_id_to_endpoint_map_.end()) {
344 // Ignore messages for zombie endpoints (not an error).
345 if (!it->second) {
346 DVLOG(2) << "Ignoring downstream message for zombie endpoint (local ID "
347 "= " << local_id
348 << ", remote ID = " << message_view.source_id() << ")";
349 return;
350 }
351
352 endpoint = it->second;
353 }
354 }
355 if (!endpoint) {
356 HandleRemoteError(base::StringPrintf(
357 "Received a message for nonexistent local destination ID %u",
358 static_cast<unsigned>(local_id.value())));
359 // This is strongly indicative of some problem. However, it's not a fatal
360 // error, since it may indicate a buggy (or hostile) remote process. Don't
361 // die even for Debug builds, since handling this properly needs to be
362 // tested (TODO(vtl)).
363 DLOG(ERROR) << "This should not happen under normal operation.";
364 return;
365 }
366
367 scoped_ptr<MessageInTransit> message(new MessageInTransit(message_view));
368 if (message_view.transport_data_buffer_size() > 0) {
369 DCHECK(message_view.transport_data_buffer());
370 message->SetDispatchers(TransportData::DeserializeDispatchers(
371 message_view.transport_data_buffer(),
372 message_view.transport_data_buffer_size(), platform_handles.Pass(),
373 this));
374 }
375
376 endpoint->OnReadMessage(message.Pass());
377 }
378
379 void Channel::OnReadMessageForChannel(
380 const MessageInTransit::View& message_view,
381 embedder::ScopedPlatformHandleVectorPtr platform_handles) {
382 DCHECK(creation_thread_checker_.CalledOnValidThread());
383 DCHECK_EQ(message_view.type(), MessageInTransit::kTypeChannel);
384
385 // Currently, no channel messages take platform handles.
386 if (platform_handles) {
387 HandleRemoteError(
388 "Received invalid channel message (has platform handles)");
389 NOTREACHED();
390 return;
391 }
392
393 switch (message_view.subtype()) {
394 case MessageInTransit::kSubtypeChannelAttachAndRunEndpoint:
395 DVLOG(2) << "Handling channel message to attach and run endpoint (local "
396 "ID " << message_view.destination_id() << ", remote ID "
397 << message_view.source_id() << ")";
398 if (!OnAttachAndRunEndpoint(message_view.destination_id(),
399 message_view.source_id())) {
400 HandleRemoteError(
401 "Received invalid channel message to attach and run endpoint");
402 }
403 break;
404 case MessageInTransit::kSubtypeChannelRemoveEndpoint:
405 DVLOG(2) << "Handling channel message to remove endpoint (local ID "
406 << message_view.destination_id() << ", remote ID "
407 << message_view.source_id() << ")";
408 if (!OnRemoveEndpoint(message_view.destination_id(),
409 message_view.source_id())) {
410 HandleRemoteError(
411 "Received invalid channel message to remove endpoint");
412 }
413 break;
414 case MessageInTransit::kSubtypeChannelRemoveEndpointAck:
415 DVLOG(2) << "Handling channel message to ack remove endpoint (local ID "
416 << message_view.destination_id() << ", remote ID "
417 << message_view.source_id() << ")";
418 if (!OnRemoveEndpointAck(message_view.destination_id())) {
419 HandleRemoteError(
420 "Received invalid channel message to ack remove endpoint");
421 }
422 break;
423 default:
424 HandleRemoteError("Received invalid channel message");
425 NOTREACHED();
426 break;
427 }
428 }
429
430 bool Channel::OnAttachAndRunEndpoint(ChannelEndpointId local_id,
431 ChannelEndpointId remote_id) {
432 // We should only get this for remotely-created local endpoints, so our local
433 // ID should be "remote".
434 if (!local_id.is_valid() || !local_id.is_remote()) {
435 DVLOG(2) << "Received attach and run endpoint with invalid local ID";
436 return false;
437 }
438
439 // Conversely, the remote end should be "local".
440 if (!remote_id.is_valid() || remote_id.is_remote()) {
441 DVLOG(2) << "Received attach and run endpoint with invalid remote ID";
442 return false;
443 }
444
445 // Create/initialize an |IncomingEndpoint| and thus an endpoint (outside the
446 // lock).
447 scoped_refptr<IncomingEndpoint> incoming_endpoint(new IncomingEndpoint());
448 scoped_refptr<ChannelEndpoint> endpoint = incoming_endpoint->Init();
449
450 bool success = true;
451 {
452 base::AutoLock locker(lock_);
453
454 if (local_id_to_endpoint_map_.find(local_id) ==
455 local_id_to_endpoint_map_.end()) {
456 DCHECK(incoming_endpoints_.find(local_id) == incoming_endpoints_.end());
457
458 // TODO(vtl): Use emplace when we move to C++11 unordered_maps. (It'll
459 // avoid some refcount churn.)
460 local_id_to_endpoint_map_[local_id] = endpoint;
461 incoming_endpoints_[local_id] = incoming_endpoint;
462 } else {
463 // We need to call |Close()| outside the lock.
464 success = false;
465 }
466 }
467 if (!success) {
468 DVLOG(2) << "Received attach and run endpoint for existing local ID";
469 incoming_endpoint->Close();
470 return false;
471 }
472
473 endpoint->AttachAndRun(this, local_id, remote_id);
474 return true;
475 }
476
477 bool Channel::OnRemoveEndpoint(ChannelEndpointId local_id,
478 ChannelEndpointId remote_id) {
479 DCHECK(creation_thread_checker_.CalledOnValidThread());
480
481 scoped_refptr<ChannelEndpoint> endpoint;
482 {
483 base::AutoLock locker(lock_);
484
485 IdToEndpointMap::iterator it = local_id_to_endpoint_map_.find(local_id);
486 if (it == local_id_to_endpoint_map_.end()) {
487 DVLOG(2) << "Remove endpoint error: not found";
488 return false;
489 }
490
491 if (!it->second) {
492 // Remove messages "crossed"; we have to wait for the ack.
493 return true;
494 }
495
496 endpoint = it->second;
497 local_id_to_endpoint_map_.erase(it);
498 // Detach and send the remove ack message outside the lock.
499 }
500
501 endpoint->DetachFromChannel();
502
503 if (!SendControlMessage(MessageInTransit::kSubtypeChannelRemoveEndpointAck,
504 local_id, remote_id)) {
505 HandleLocalError(base::StringPrintf(
506 "Failed to send message to ack remove remote endpoint (local ID %u, "
507 "remote ID %u)",
508 static_cast<unsigned>(local_id.value()),
509 static_cast<unsigned>(remote_id.value())));
510 }
511
512 return true;
513 }
514
515 bool Channel::OnRemoveEndpointAck(ChannelEndpointId local_id) {
516 DCHECK(creation_thread_checker_.CalledOnValidThread());
517
518 base::AutoLock locker(lock_);
519
520 IdToEndpointMap::iterator it = local_id_to_endpoint_map_.find(local_id);
521 if (it == local_id_to_endpoint_map_.end()) {
522 DVLOG(2) << "Remove endpoint ack error: not found";
523 return false;
524 }
525
526 if (it->second) {
527 DVLOG(2) << "Remove endpoint ack error: wrong state";
528 return false;
529 }
530
531 local_id_to_endpoint_map_.erase(it);
532 return true;
533 }
534
535 void Channel::HandleRemoteError(const base::StringPiece& error_message) {
536 // TODO(vtl): Is this how we really want to handle this? Probably we want to
537 // terminate the connection, since it's spewing invalid stuff.
538 LOG(WARNING) << error_message;
539 }
540
541 void Channel::HandleLocalError(const base::StringPiece& error_message) {
542 // TODO(vtl): Is this how we really want to handle this?
543 // Sometimes we'll want to propagate the error back to the message pipe
544 // (endpoint), and notify it that the remote is (effectively) closed.
545 // Sometimes we'll want to kill the channel (and notify all the endpoints that
546 // their remotes are dead.
547 LOG(WARNING) << error_message;
548 }
549
550 // Note: |endpoint| being a |scoped_refptr| makes this function safe, since it
551 // keeps the endpoint alive even after the lock is released. Otherwise, there's
552 // the temptation to simply pass the result of |new ChannelEndpoint(...)|
553 // directly to this function, which wouldn't be sufficient for safety.
554 ChannelEndpointId Channel::AttachAndRunEndpoint(
555 scoped_refptr<ChannelEndpoint> endpoint) {
556 DCHECK(endpoint);
557
558 ChannelEndpointId local_id;
559 ChannelEndpointId remote_id;
560 {
561 base::AutoLock locker(lock_);
562
563 DLOG_IF(WARNING, is_shutting_down_)
564 << "AttachAndRunEndpoint() while shutting down";
565
566 do {
567 local_id = local_id_generator_.GetNext();
568 } while (local_id_to_endpoint_map_.find(local_id) !=
569 local_id_to_endpoint_map_.end());
570
571 // TODO(vtl): We also need to check for collisions of remote IDs here.
572 remote_id = remote_id_generator_.GetNext();
573
574 local_id_to_endpoint_map_[local_id] = endpoint;
575 }
576
577 if (!SendControlMessage(MessageInTransit::kSubtypeChannelAttachAndRunEndpoint,
578 local_id, remote_id)) {
579 HandleLocalError(base::StringPrintf(
580 "Failed to send message to run remote endpoint (local ID %u, remote ID "
581 "%u)",
582 static_cast<unsigned>(local_id.value()),
583 static_cast<unsigned>(remote_id.value())));
584 // TODO(vtl): Should we continue on to |AttachAndRun()|?
585 }
586
587 endpoint->AttachAndRun(this, local_id, remote_id);
588 return remote_id;
589 }
590
591 bool Channel::SendControlMessage(MessageInTransit::Subtype subtype,
592 ChannelEndpointId local_id,
593 ChannelEndpointId remote_id) {
594 DVLOG(2) << "Sending channel control message: subtype " << subtype
595 << ", local ID " << local_id << ", remote ID " << remote_id;
596 scoped_ptr<MessageInTransit> message(new MessageInTransit(
597 MessageInTransit::kTypeChannel, subtype, 0, nullptr));
598 message->set_source_id(local_id);
599 message->set_destination_id(remote_id);
600 return WriteMessage(message.Pass());
601 }
602
603 } // namespace system
604 } // namespace mojo
OLDNEW
« no previous file with comments | « mojo/edk/system/channel.h ('k') | mojo/edk/system/channel_endpoint.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698