Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(203)

Side by Side Diff: mojo/system/channel.cc

Issue 577313002: Mojo: Give ChannelEndpoint the remote ID and ProxyMessagePipeEndpoint the ChannelEndpoint. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 6 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | mojo/system/channel_endpoint.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 The Chromium Authors. All rights reserved. 1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "mojo/system/channel.h" 5 #include "mojo/system/channel.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 8
9 #include "base/bind.h" 9 #include "base/bind.h"
10 #include "base/compiler_specific.h" 10 #include "base/compiler_specific.h"
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after
106 DLOG_IF(WARNING, is_shutting_down_) 106 DLOG_IF(WARNING, is_shutting_down_)
107 << "AttachMessagePipeEndpoint() while shutting down"; 107 << "AttachMessagePipeEndpoint() while shutting down";
108 108
109 while (next_local_id_ == MessageInTransit::kInvalidEndpointId || 109 while (next_local_id_ == MessageInTransit::kInvalidEndpointId ||
110 local_id_to_endpoint_map_.find(next_local_id_) != 110 local_id_to_endpoint_map_.find(next_local_id_) !=
111 local_id_to_endpoint_map_.end()) 111 local_id_to_endpoint_map_.end())
112 next_local_id_++; 112 next_local_id_++;
113 113
114 local_id = next_local_id_; 114 local_id = next_local_id_;
115 next_local_id_++; 115 next_local_id_++;
116 endpoint = new ChannelEndpoint(message_pipe.get(), port, this, local_id); 116 endpoint = new ChannelEndpoint(message_pipe.get(), port);
117 local_id_to_endpoint_map_[local_id] = endpoint; 117 local_id_to_endpoint_map_[local_id] = endpoint;
118 } 118 }
119 119
120 endpoint->AttachToChannel(this, local_id);
120 // This might fail if that port got an |OnPeerClose()| before attaching. 121 // This might fail if that port got an |OnPeerClose()| before attaching.
121 if (message_pipe->Attach(port, scoped_refptr<Channel>(this), local_id)) 122 if (message_pipe->Attach(port, endpoint.get(), this, local_id))
122 return local_id; 123 return local_id;
123 124
124 // Note: If it failed, quite possibly the endpoint info was removed from that 125 // Note: If it failed, quite possibly the endpoint info was removed from that
125 // map (there's a race between us adding it to the map above and calling 126 // map (there's a race between us adding it to the map above and calling
126 // |Attach()|). And even if an entry exists for |local_id|, we need to check 127 // |Attach()|). And even if an entry exists for |local_id|, we need to check
127 // that it's the one we added (and not some other one that was added since). 128 // that it's the one we added (and not some other one that was added since).
128 { 129 {
129 base::AutoLock locker(lock_); 130 base::AutoLock locker(lock_);
130 IdToEndpointMap::iterator it = local_id_to_endpoint_map_.find(local_id); 131 IdToEndpointMap::iterator it = local_id_to_endpoint_map_.find(local_id);
131 if (it != local_id_to_endpoint_map_.end() && 132 if (it != local_id_to_endpoint_map_.end() &&
132 it->second->message_pipe_.get() == message_pipe.get() && 133 it->second->message_pipe_.get() == message_pipe.get() &&
133 it->second->port_ == port) { 134 it->second->port_ == port) {
134 DCHECK_EQ(it->second->state_, ChannelEndpoint::STATE_NORMAL); 135 DCHECK_EQ(it->second->state_, ChannelEndpoint::STATE_NORMAL);
135 // TODO(vtl): FIXME -- This is wrong. We need to specify (to 136 // TODO(vtl): FIXME -- This is wrong. We need to specify (to
136 // |AttachMessagePipeEndpoint()| who's going to be responsible for calling 137 // |AttachMessagePipeEndpoint()| who's going to be responsible for calling
137 // |RunMessagePipeEndpoint()| ("us", or the remote by sending us a 138 // |RunMessagePipeEndpoint()| ("us", or the remote by sending us a
138 // |kSubtypeChannelRunMessagePipeEndpoint|). If the remote is going to 139 // |kSubtypeChannelRunMessagePipeEndpoint|). If the remote is going to
139 // run, then we'll get messages to an "invalid" local ID (for running, for 140 // run, then we'll get messages to an "invalid" local ID (for running, for
140 // removal). 141 // removal).
141 local_id_to_endpoint_map_.erase(it); 142 local_id_to_endpoint_map_.erase(it);
142 } 143 }
143 } 144 }
144 endpoint->DetachFromChannel(); 145 endpoint->DetachFromChannel();
145 return MessageInTransit::kInvalidEndpointId; 146 return MessageInTransit::kInvalidEndpointId;
146 } 147 }
147 148
148 bool Channel::RunMessagePipeEndpoint(MessageInTransit::EndpointId local_id, 149 bool Channel::RunMessagePipeEndpoint(MessageInTransit::EndpointId local_id,
149 MessageInTransit::EndpointId remote_id) { 150 MessageInTransit::EndpointId remote_id) {
151 scoped_refptr<ChannelEndpoint> endpoint;
150 ChannelEndpoint::State state; 152 ChannelEndpoint::State state;
151 scoped_refptr<MessagePipe> message_pipe; 153 scoped_refptr<MessagePipe> message_pipe;
152 unsigned port; 154 unsigned port;
153 { 155 {
154 base::AutoLock locker(lock_); 156 base::AutoLock locker(lock_);
155 157
156 DLOG_IF(WARNING, is_shutting_down_) 158 DLOG_IF(WARNING, is_shutting_down_)
157 << "RunMessagePipeEndpoint() while shutting down"; 159 << "RunMessagePipeEndpoint() while shutting down";
158 160
159 IdToEndpointMap::const_iterator it = 161 IdToEndpointMap::const_iterator it =
160 local_id_to_endpoint_map_.find(local_id); 162 local_id_to_endpoint_map_.find(local_id);
161 if (it == local_id_to_endpoint_map_.end()) 163 if (it == local_id_to_endpoint_map_.end())
162 return false; 164 return false;
165 endpoint = it->second;
163 state = it->second->state_; 166 state = it->second->state_;
164 message_pipe = it->second->message_pipe_; 167 message_pipe = it->second->message_pipe_;
165 port = it->second->port_; 168 port = it->second->port_;
166 } 169 }
167 170
168 // Assume that this was in response to |kSubtypeChannelRunMessagePipeEndpoint| 171 // Assume that this was in response to |kSubtypeChannelRunMessagePipeEndpoint|
169 // and ignore it. 172 // and ignore it.
170 if (state != ChannelEndpoint::STATE_NORMAL) { 173 if (state != ChannelEndpoint::STATE_NORMAL) {
171 DVLOG(2) << "Ignoring run message pipe endpoint for zombie endpoint " 174 DVLOG(2) << "Ignoring run message pipe endpoint for zombie endpoint "
172 "(local ID " << local_id << ", remote ID " << remote_id << ")"; 175 "(local ID " << local_id << ", remote ID " << remote_id << ")";
173 return true; 176 return true;
174 } 177 }
175 178
176 // TODO(vtl): FIXME -- We need to handle the case that message pipe is already 179 // TODO(vtl): FIXME -- We need to handle the case that message pipe is already
177 // running when we're here due to |kSubtypeChannelRunMessagePipeEndpoint|). 180 // running when we're here due to |kSubtypeChannelRunMessagePipeEndpoint|).
181 endpoint->Run(remote_id);
182 // TODO(vtl): Get rid of this.
178 message_pipe->Run(port, remote_id); 183 message_pipe->Run(port, remote_id);
179 return true; 184 return true;
180 } 185 }
181 186
182 void Channel::RunRemoteMessagePipeEndpoint( 187 void Channel::RunRemoteMessagePipeEndpoint(
183 MessageInTransit::EndpointId local_id, 188 MessageInTransit::EndpointId local_id,
184 MessageInTransit::EndpointId remote_id) { 189 MessageInTransit::EndpointId remote_id) {
185 #if DCHECK_IS_ON 190 #if DCHECK_IS_ON
186 { 191 {
187 base::AutoLock locker(lock_); 192 base::AutoLock locker(lock_);
(...skipping 358 matching lines...) Expand 10 before | Expand all | Expand 10 after
546 // TODO(vtl): Is this how we really want to handle this? 551 // TODO(vtl): Is this how we really want to handle this?
547 // Sometimes we'll want to propagate the error back to the message pipe 552 // Sometimes we'll want to propagate the error back to the message pipe
548 // (endpoint), and notify it that the remote is (effectively) closed. 553 // (endpoint), and notify it that the remote is (effectively) closed.
549 // Sometimes we'll want to kill the channel (and notify all the endpoints that 554 // Sometimes we'll want to kill the channel (and notify all the endpoints that
550 // their remotes are dead. 555 // their remotes are dead.
551 LOG(WARNING) << error_message; 556 LOG(WARNING) << error_message;
552 } 557 }
553 558
554 } // namespace system 559 } // namespace system
555 } // namespace mojo 560 } // namespace mojo
OLDNEW
« no previous file with comments | « no previous file | mojo/system/channel_endpoint.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698