Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(235)

Side by Side Diff: mojo/edk/system/broker_state.cc

Issue 1649633002: Remove files that are no longer used in the Port EDK. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 4 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « mojo/edk/system/broker_state.h ('k') | mojo/edk/system/child_broker.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "mojo/edk/system/broker_state.h"
6
7 #include <stddef.h>
8 #include <stdint.h>
9
10 #include "base/bind.h"
11 #include "base/rand_util.h"
12 #include "mojo/edk/embedder/embedder_internal.h"
13 #include "mojo/edk/embedder/platform_channel_pair.h"
14 #include "mojo/edk/system/child_broker_host.h"
15 #include "mojo/edk/system/message_pipe_dispatcher.h"
16 #include "mojo/edk/system/routed_raw_channel.h"
17
18 namespace mojo {
19 namespace edk {
20
21 BrokerState* BrokerState::GetInstance() {
22 return base::Singleton<
23 BrokerState, base::LeakySingletonTraits<BrokerState>>::get();
24 }
25
26 #if defined(OS_WIN)
27 void BrokerState::CreatePlatformChannelPair(
28 ScopedPlatformHandle* server, ScopedPlatformHandle* client) {
29 PlatformChannelPair channel_pair;
30 *server = channel_pair.PassServerHandle();
31 *client = channel_pair.PassClientHandle();
32 }
33
34 void BrokerState::HandleToToken(
35 const PlatformHandle* platform_handles,
36 size_t count,
37 uint64_t* tokens) {
38 base::AutoLock auto_locker(token_map_lock_);
39 for (size_t i = 0; i < count; ++i) {
40 if (platform_handles[i].is_valid()) {
41 uint64_t token;
42 do {
43 token = base::RandUint64();
44 } while (!token || token_map_.find(token) != token_map_.end());
45 tokens[i] = token;
46 token_map_[tokens[i]] = platform_handles[i].handle;
47 } else {
48 DLOG(WARNING) << "BrokerState got invalid handle.";
49 tokens[i] = 0;
50 }
51 }
52 }
53
54 void BrokerState::TokenToHandle(const uint64_t* tokens,
55 size_t count,
56 PlatformHandle* handles) {
57 base::AutoLock auto_locker(token_map_lock_);
58 for (size_t i = 0; i < count; ++i) {
59 auto it = token_map_.find(tokens[i]);
60 if (it == token_map_.end()) {
61 DLOG(WARNING) << "TokenToHandle didn't find token.";
62 } else {
63 handles[i].handle = it->second;
64 token_map_.erase(it);
65 }
66 }
67 }
68 #endif
69
70 void BrokerState::ConnectMessagePipe(uint64_t pipe_id,
71 MessagePipeDispatcher* message_pipe) {
72 DCHECK(internal::g_io_thread_task_runner->RunsTasksOnCurrentThread());
73 base::AutoLock auto_lock(lock_);
74 if (pending_connects_.find(pipe_id) != pending_connects_.end()) {
75 // Both ends of the message pipe are in this process.
76 if (!in_process_pipes_channel1_) {
77 PlatformChannelPair channel_pair;
78 in_process_pipes_channel1_ = new RoutedRawChannel(
79 channel_pair.PassServerHandle(),
80 base::Bind(&BrokerState::ChannelDestructed, base::Unretained(this)));
81 in_process_pipes_channel2_ = new RoutedRawChannel(
82 channel_pair.PassClientHandle(),
83 base::Bind(&BrokerState::ChannelDestructed, base::Unretained(this)));
84 }
85
86 AttachMessagePipe(pending_connects_[pipe_id], pipe_id,
87 in_process_pipes_channel1_);
88 AttachMessagePipe(message_pipe, pipe_id, in_process_pipes_channel2_);
89 pending_connects_.erase(pipe_id);
90 return;
91 }
92
93 if (pending_child_connects_.find(pipe_id) != pending_child_connects_.end()) {
94 // A child process has already tried to connect.
95 ChildBrokerHost* child_host = pending_child_connects_[pipe_id];
96 if (child_host && child_host->channel()) {
97 AttachMessagePipe(message_pipe, pipe_id, child_host->channel());
98 child_host->ConnectMessagePipe(pipe_id, 0);
99 } else {
100 message_pipe->OnError(RawChannel::Delegate::ERROR_READ_SHUTDOWN);
101 }
102
103 pending_child_connects_.erase(pipe_id);
104 return;
105 }
106
107 pending_connects_[pipe_id] = message_pipe;
108 }
109
110 void BrokerState::CloseMessagePipe(uint64_t pipe_id,
111 MessagePipeDispatcher* message_pipe) {
112 DCHECK(internal::g_io_thread_task_runner->RunsTasksOnCurrentThread());
113
114 CHECK(connected_pipes_.find(message_pipe) != connected_pipes_.end());
115 connected_pipes_[message_pipe]->RemoveRoute(pipe_id);
116 connected_pipes_.erase(message_pipe);
117 }
118
119 void BrokerState::ChildBrokerHostCreated(ChildBrokerHost* child_broker_host) {
120 base::AutoLock auto_lock(lock_);
121 CHECK(child_processes_.find(child_broker_host->GetProcessId()) ==
122 child_processes_.end());
123 child_processes_[child_broker_host->GetProcessId()] = child_broker_host;
124 }
125
126 void BrokerState::ChildBrokerHostDestructed(
127 ChildBrokerHost* child_broker_host) {
128 DCHECK(internal::g_io_thread_task_runner->RunsTasksOnCurrentThread());
129 base::AutoLock auto_lock(lock_);
130
131 for (auto it = pending_child_connects_.begin();
132 it != pending_child_connects_.end(); ++it) {
133 if (it->second == child_broker_host) {
134 // Signify that the process has died. When another process tries to
135 // connect to the message pipe, we will tell it that the peer has died so
136 // that it can fire a peer closed notification.
137 it->second = nullptr;
138 }
139 }
140
141 base::ProcessId pid = child_broker_host->GetProcessId();
142 for (auto it = connected_processes_.begin();
143 it != connected_processes_.end();) {
144 if ((*it).first == pid || (*it).second == pid) {
145 // Since we can't do it = connected_processes_.erase(it); until hash_map
146 // uses unordered_map on posix.
147 auto cur = it++;
148 connected_processes_.erase(cur);
149 } else {
150 it++;
151 }
152 }
153
154 CHECK(child_processes_.find(pid) != child_processes_.end());
155 child_processes_.erase(pid);
156 }
157
158 void BrokerState::HandleConnectMessagePipe(ChildBrokerHost* pipe_process,
159 uint64_t pipe_id) {
160 DCHECK(internal::g_io_thread_task_runner->RunsTasksOnCurrentThread());
161 base::AutoLock auto_lock(lock_);
162 if (pending_child_connects_.find(pipe_id) != pending_child_connects_.end()) {
163 // Another child process is waiting to connect to the given pipe.
164 ChildBrokerHost* pending_pipe_process = pending_child_connects_[pipe_id];
165 if (pending_pipe_process && pending_pipe_process->channel()) {
166 EnsureProcessesConnected(pipe_process->GetProcessId(),
167 pending_pipe_process->GetProcessId());
168 pending_pipe_process->ConnectMessagePipe(
169 pipe_id, pipe_process->GetProcessId());
170 pipe_process->ConnectMessagePipe(
171 pipe_id, pending_pipe_process->GetProcessId());
172 } else {
173 pipe_process->PeerDied(pipe_id);
174 }
175 pending_child_connects_.erase(pipe_id);
176 return;
177 }
178
179 if (pending_connects_.find(pipe_id) != pending_connects_.end()) {
180 // This parent process is the other side of the given pipe.
181 MessagePipeDispatcher* pending_pipe = pending_connects_[pipe_id];
182 AttachMessagePipe(pending_pipe, pipe_id, pipe_process->channel());
183 pipe_process->ConnectMessagePipe(pipe_id, 0);
184 pending_connects_.erase(pipe_id);
185 return;
186 }
187
188 // This is the first connection request for pipe_id to reach the parent.
189 pending_child_connects_[pipe_id] = pipe_process;
190 }
191
192 void BrokerState::HandleCancelConnectMessagePipe(uint64_t pipe_id) {
193 DCHECK(internal::g_io_thread_task_runner->RunsTasksOnCurrentThread());
194 base::AutoLock auto_lock(lock_);
195 if (pending_child_connects_.find(pipe_id) == pending_child_connects_.end()) {
196 NOTREACHED() << "Can't find entry for pipe_id " << pipe_id;
197 } else {
198 pending_child_connects_.erase(pipe_id);
199 }
200 }
201
202 BrokerState::BrokerState()
203 : in_process_pipes_channel1_(nullptr),
204 in_process_pipes_channel2_(nullptr) {
205 DCHECK(!internal::g_broker);
206 internal::g_broker = this;
207 }
208
209 BrokerState::~BrokerState() {
210 }
211
212 void BrokerState::EnsureProcessesConnected(base::ProcessId pid1,
213 base::ProcessId pid2) {
214 DCHECK(internal::g_io_thread_task_runner->RunsTasksOnCurrentThread());
215 lock_.AssertAcquired();
216 CHECK_NE(pid1, pid2);
217 CHECK_NE(pid1, base::GetCurrentProcId());
218 CHECK_NE(pid2, base::GetCurrentProcId());
219 std::pair<base::ProcessId, base::ProcessId> processes;
220 processes.first = std::min(pid1, pid2);
221 processes.second = std::max(pid1, pid2);
222 if (connected_processes_.find(processes) != connected_processes_.end())
223 return;
224
225 connected_processes_.insert(processes);
226 PlatformChannelPair channel_pair;
227 CHECK(child_processes_.find(pid1) != child_processes_.end());
228 CHECK(child_processes_.find(pid2) != child_processes_.end());
229 child_processes_[pid1]->ConnectToProcess(pid2,
230 channel_pair.PassServerHandle());
231 child_processes_[pid2]->ConnectToProcess(pid1,
232 channel_pair.PassClientHandle());
233 }
234
235 void BrokerState::ChannelDestructed(RoutedRawChannel* channel) {
236 }
237
238 void BrokerState::AttachMessagePipe(MessagePipeDispatcher* message_pipe,
239 uint64_t pipe_id,
240 RoutedRawChannel* raw_channel) {
241 connected_pipes_[message_pipe] = raw_channel;
242 // Note: we must call GotNonTransferableChannel before AddRoute because there
243 // could be race conditions if the pipe got queued messages in |AddRoute| but
244 // then when it's read it returns no messages because it doesn't have the
245 // channel yet.
246 message_pipe->GotNonTransferableChannel(raw_channel->channel());
247 // The above call could have caused |CloseMessagePipe| to be called.
248 if (connected_pipes_.find(message_pipe) != connected_pipes_.end())
249 raw_channel->AddRoute(pipe_id, message_pipe);
250 }
251
252 } // namespace edk
253 } // namespace mojo
OLDNEW
« no previous file with comments | « mojo/edk/system/broker_state.h ('k') | mojo/edk/system/child_broker.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698