Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(193)

Side by Side Diff: mojo/edk/system/dispatcher.cc

Issue 1350023003: Add a Mojo EDK for Chrome that uses one OS pipe per message pipe. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: more cleanup Created 5 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2013 The Chromium Authors. All rights reserved. 1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "third_party/mojo/src/mojo/edk/system/dispatcher.h" 5 #include "mojo/edk/system/dispatcher.h"
6 6
7 #include "base/logging.h" 7 #include "base/logging.h"
8 #include "third_party/mojo/src/mojo/edk/system/configuration.h" 8 #include "mojo/edk/system/configuration.h"
9 #include "third_party/mojo/src/mojo/edk/system/data_pipe_consumer_dispatcher.h" 9 #include "mojo/edk/system/data_pipe_consumer_dispatcher.h"
10 #include "third_party/mojo/src/mojo/edk/system/data_pipe_producer_dispatcher.h" 10 #include "mojo/edk/system/data_pipe_producer_dispatcher.h"
11 #include "third_party/mojo/src/mojo/edk/system/message_pipe_dispatcher.h" 11 #include "mojo/edk/system/message_pipe_dispatcher.h"
12 #include "third_party/mojo/src/mojo/edk/system/platform_handle_dispatcher.h" 12 #include "mojo/edk/system/platform_handle_dispatcher.h"
13 #include "third_party/mojo/src/mojo/edk/system/shared_buffer_dispatcher.h" 13 #include "mojo/edk/system/shared_buffer_dispatcher.h"
14 14
15 namespace mojo { 15 namespace mojo {
16 namespace system { 16 namespace edk {
17 17
18 namespace test { 18 namespace test {
19 19
20 // TODO(vtl): Maybe this should be defined in a test-only file instead. 20 // TODO(vtl): Maybe this should be defined in a test-only file instead.
21 DispatcherTransport DispatcherTryStartTransport(Dispatcher* dispatcher) { 21 DispatcherTransport DispatcherTryStartTransport(Dispatcher* dispatcher) {
22 return Dispatcher::HandleTableAccess::TryStartTransport(dispatcher); 22 return Dispatcher::HandleTableAccess::TryStartTransport(dispatcher);
23 } 23 }
24 24
25 } // namespace test 25 } // namespace test
26 26
27 // Dispatcher ------------------------------------------------------------------ 27 // Dispatcher ------------------------------------------------------------------
28 28
29 // TODO(vtl): The thread-safety analyzer isn't smart enough to deal with the
30 // fact that we give up if |TryLock()| fails.
31 // static 29 // static
32 DispatcherTransport Dispatcher::HandleTableAccess::TryStartTransport( 30 DispatcherTransport Dispatcher::HandleTableAccess::TryStartTransport(
33 Dispatcher* dispatcher) MOJO_NO_THREAD_SAFETY_ANALYSIS { 31 Dispatcher* dispatcher) {
34 DCHECK(dispatcher); 32 DCHECK(dispatcher);
35 33
36 if (!dispatcher->mutex_.TryLock()) 34 // Our dispatcher implementations hop to IO thread on initialization, so it's
37 return DispatcherTransport(); 35 // valid that while their RawChannel os being initialized on IO thread, the
36 // dispatcher is being sent. We handle this by just acquiring the lock.
37
38 // See comment in header for why we need this.
39 dispatcher->TransportStarted();
40
41 dispatcher->lock_.Acquire();
38 42
39 // We shouldn't race with things that close dispatchers, since closing can 43 // We shouldn't race with things that close dispatchers, since closing can
40 // only take place either under |handle_table_mutex_| or when the handle is 44 // only take place either under |handle_table_lock_| or when the handle is
41 // marked as busy. 45 // marked as busy.
42 DCHECK(!dispatcher->is_closed_); 46 DCHECK(!dispatcher->is_closed_);
43 47
44 return DispatcherTransport(dispatcher); 48 return DispatcherTransport(dispatcher);
45 } 49 }
46 50
47 // static 51 // static
48 void Dispatcher::TransportDataAccess::StartSerialize( 52 void Dispatcher::TransportDataAccess::StartSerialize(
49 Dispatcher* dispatcher, 53 Dispatcher* dispatcher,
50 Channel* channel,
51 size_t* max_size, 54 size_t* max_size,
52 size_t* max_platform_handles) { 55 size_t* max_platform_handles) {
53 DCHECK(dispatcher); 56 DCHECK(dispatcher);
54 dispatcher->StartSerialize(channel, max_size, max_platform_handles); 57 dispatcher->StartSerialize(max_size, max_platform_handles);
55 } 58 }
56 59
57 // static 60 // static
58 bool Dispatcher::TransportDataAccess::EndSerializeAndClose( 61 bool Dispatcher::TransportDataAccess::EndSerializeAndClose(
59 Dispatcher* dispatcher, 62 Dispatcher* dispatcher,
60 Channel* channel,
61 void* destination, 63 void* destination,
62 size_t* actual_size, 64 size_t* actual_size,
63 embedder::PlatformHandleVector* platform_handles) { 65 PlatformHandleVector* platform_handles) {
64 DCHECK(dispatcher); 66 DCHECK(dispatcher);
65 return dispatcher->EndSerializeAndClose(channel, destination, actual_size, 67 return dispatcher->EndSerializeAndClose(destination, actual_size,
66 platform_handles); 68 platform_handles);
67 } 69 }
68 70
69 // static 71 // static
70 scoped_refptr<Dispatcher> Dispatcher::TransportDataAccess::Deserialize( 72 scoped_refptr<Dispatcher> Dispatcher::TransportDataAccess::Deserialize(
71 Channel* channel,
72 int32_t type, 73 int32_t type,
73 const void* source, 74 const void* source,
74 size_t size, 75 size_t size,
75 embedder::PlatformHandleVector* platform_handles) { 76 PlatformHandleVector* platform_handles) {
76 switch (static_cast<Dispatcher::Type>(type)) { 77 switch (static_cast<Dispatcher::Type>(type)) {
77 case Type::UNKNOWN: 78 case Type::UNKNOWN:
78 DVLOG(2) << "Deserializing invalid handle"; 79 DVLOG(2) << "Deserializing invalid handle";
79 return nullptr; 80 return nullptr;
80 case Type::MESSAGE_PIPE: 81 case Type::MESSAGE_PIPE:
81 return scoped_refptr<Dispatcher>( 82 return scoped_refptr<Dispatcher>(MessagePipeDispatcher::Deserialize(
82 MessagePipeDispatcher::Deserialize(channel, source, size)); 83 source, size, platform_handles));
83 case Type::DATA_PIPE_PRODUCER: 84 case Type::DATA_PIPE_PRODUCER:
84 return scoped_refptr<Dispatcher>( 85 return scoped_refptr<Dispatcher>(
85 DataPipeProducerDispatcher::Deserialize(channel, source, size)); 86 DataPipeProducerDispatcher::Deserialize(
87 source, size, platform_handles));
86 case Type::DATA_PIPE_CONSUMER: 88 case Type::DATA_PIPE_CONSUMER:
87 return scoped_refptr<Dispatcher>( 89 return scoped_refptr<Dispatcher>(
88 DataPipeConsumerDispatcher::Deserialize(channel, source, size)); 90 DataPipeConsumerDispatcher::Deserialize(
91 source, size, platform_handles));
89 case Type::SHARED_BUFFER: 92 case Type::SHARED_BUFFER:
90 return scoped_refptr<Dispatcher>(SharedBufferDispatcher::Deserialize( 93 return scoped_refptr<Dispatcher>(SharedBufferDispatcher::Deserialize(
91 channel, source, size, platform_handles)); 94 source, size, platform_handles));
92 case Type::PLATFORM_HANDLE: 95 case Type::PLATFORM_HANDLE:
93 return scoped_refptr<Dispatcher>(PlatformHandleDispatcher::Deserialize( 96 return scoped_refptr<Dispatcher>(PlatformHandleDispatcher::Deserialize(
94 channel, source, size, platform_handles)); 97 source, size, platform_handles));
95 } 98 }
96 LOG(WARNING) << "Unknown dispatcher type " << type; 99 LOG(WARNING) << "Unknown dispatcher type " << type;
97 return nullptr; 100 return nullptr;
98 } 101 }
99 102
100 MojoResult Dispatcher::Close() { 103 MojoResult Dispatcher::Close() {
101 MutexLocker locker(&mutex_); 104 base::AutoLock locker(lock_);
102 if (is_closed_) 105 if (is_closed_)
103 return MOJO_RESULT_INVALID_ARGUMENT; 106 return MOJO_RESULT_INVALID_ARGUMENT;
104 107
105 CloseNoLock(); 108 CloseNoLock();
106 return MOJO_RESULT_OK; 109 return MOJO_RESULT_OK;
107 } 110 }
108 111
109 MojoResult Dispatcher::WriteMessage( 112 MojoResult Dispatcher::WriteMessage(
110 UserPointer<const void> bytes, 113 const void* bytes,
111 uint32_t num_bytes, 114 uint32_t num_bytes,
112 std::vector<DispatcherTransport>* transports, 115 std::vector<DispatcherTransport>* transports,
113 MojoWriteMessageFlags flags) { 116 MojoWriteMessageFlags flags) {
114 DCHECK(!transports || 117 DCHECK(!transports ||
115 (transports->size() > 0 && 118 (transports->size() > 0 &&
116 transports->size() < GetConfiguration().max_message_num_handles)); 119 transports->size() < GetConfiguration().max_message_num_handles));
117 120
118 MutexLocker locker(&mutex_); 121 base::AutoLock locker(lock_);
119 if (is_closed_) 122 if (is_closed_)
120 return MOJO_RESULT_INVALID_ARGUMENT; 123 return MOJO_RESULT_INVALID_ARGUMENT;
121 124
122 return WriteMessageImplNoLock(bytes, num_bytes, transports, flags); 125 return WriteMessageImplNoLock(bytes, num_bytes, transports, flags);
123 } 126 }
124 127
125 MojoResult Dispatcher::ReadMessage(UserPointer<void> bytes, 128 MojoResult Dispatcher::ReadMessage(void* bytes,
126 UserPointer<uint32_t> num_bytes, 129 uint32_t* num_bytes,
127 DispatcherVector* dispatchers, 130 DispatcherVector* dispatchers,
128 uint32_t* num_dispatchers, 131 uint32_t* num_dispatchers,
129 MojoReadMessageFlags flags) { 132 MojoReadMessageFlags flags) {
130 DCHECK(!num_dispatchers || *num_dispatchers == 0 || 133 DCHECK(!num_dispatchers || *num_dispatchers == 0 ||
131 (dispatchers && dispatchers->empty())); 134 (dispatchers && dispatchers->empty()));
132 135
133 MutexLocker locker(&mutex_); 136 base::AutoLock locker(lock_);
134 if (is_closed_) 137 if (is_closed_)
135 return MOJO_RESULT_INVALID_ARGUMENT; 138 return MOJO_RESULT_INVALID_ARGUMENT;
136 139
137 return ReadMessageImplNoLock(bytes, num_bytes, dispatchers, num_dispatchers, 140 return ReadMessageImplNoLock(bytes, num_bytes, dispatchers, num_dispatchers,
138 flags); 141 flags);
139 } 142 }
140 143
141 MojoResult Dispatcher::WriteData(UserPointer<const void> elements, 144 MojoResult Dispatcher::WriteData(const void* elements,
142 UserPointer<uint32_t> num_bytes, 145 uint32_t* num_bytes,
143 MojoWriteDataFlags flags) { 146 MojoWriteDataFlags flags) {
144 MutexLocker locker(&mutex_); 147 base::AutoLock locker(lock_);
145 if (is_closed_) 148 if (is_closed_)
146 return MOJO_RESULT_INVALID_ARGUMENT; 149 return MOJO_RESULT_INVALID_ARGUMENT;
147 150
148 return WriteDataImplNoLock(elements, num_bytes, flags); 151 return WriteDataImplNoLock(elements, num_bytes, flags);
149 } 152 }
150 153
151 MojoResult Dispatcher::BeginWriteData(UserPointer<void*> buffer, 154 MojoResult Dispatcher::BeginWriteData(void** buffer,
152 UserPointer<uint32_t> buffer_num_bytes, 155 uint32_t* buffer_num_bytes,
153 MojoWriteDataFlags flags) { 156 MojoWriteDataFlags flags) {
154 MutexLocker locker(&mutex_); 157 base::AutoLock locker(lock_);
155 if (is_closed_) 158 if (is_closed_)
156 return MOJO_RESULT_INVALID_ARGUMENT; 159 return MOJO_RESULT_INVALID_ARGUMENT;
157 160
158 return BeginWriteDataImplNoLock(buffer, buffer_num_bytes, flags); 161 return BeginWriteDataImplNoLock(buffer, buffer_num_bytes, flags);
159 } 162 }
160 163
161 MojoResult Dispatcher::EndWriteData(uint32_t num_bytes_written) { 164 MojoResult Dispatcher::EndWriteData(uint32_t num_bytes_written) {
162 MutexLocker locker(&mutex_); 165 base::AutoLock locker(lock_);
163 if (is_closed_) 166 if (is_closed_)
164 return MOJO_RESULT_INVALID_ARGUMENT; 167 return MOJO_RESULT_INVALID_ARGUMENT;
165 168
166 return EndWriteDataImplNoLock(num_bytes_written); 169 return EndWriteDataImplNoLock(num_bytes_written);
167 } 170 }
168 171
169 MojoResult Dispatcher::ReadData(UserPointer<void> elements, 172 MojoResult Dispatcher::ReadData(void* elements,
170 UserPointer<uint32_t> num_bytes, 173 uint32_t* num_bytes,
171 MojoReadDataFlags flags) { 174 MojoReadDataFlags flags) {
172 MutexLocker locker(&mutex_); 175 base::AutoLock locker(lock_);
173 if (is_closed_) 176 if (is_closed_)
174 return MOJO_RESULT_INVALID_ARGUMENT; 177 return MOJO_RESULT_INVALID_ARGUMENT;
175 178
176 return ReadDataImplNoLock(elements, num_bytes, flags); 179 return ReadDataImplNoLock(elements, num_bytes, flags);
177 } 180 }
178 181
179 MojoResult Dispatcher::BeginReadData(UserPointer<const void*> buffer, 182 MojoResult Dispatcher::BeginReadData(const void** buffer,
180 UserPointer<uint32_t> buffer_num_bytes, 183 uint32_t* buffer_num_bytes,
181 MojoReadDataFlags flags) { 184 MojoReadDataFlags flags) {
182 MutexLocker locker(&mutex_); 185 base::AutoLock locker(lock_);
183 if (is_closed_) 186 if (is_closed_)
184 return MOJO_RESULT_INVALID_ARGUMENT; 187 return MOJO_RESULT_INVALID_ARGUMENT;
185 188
186 return BeginReadDataImplNoLock(buffer, buffer_num_bytes, flags); 189 return BeginReadDataImplNoLock(buffer, buffer_num_bytes, flags);
187 } 190 }
188 191
189 MojoResult Dispatcher::EndReadData(uint32_t num_bytes_read) { 192 MojoResult Dispatcher::EndReadData(uint32_t num_bytes_read) {
190 MutexLocker locker(&mutex_); 193 base::AutoLock locker(lock_);
191 if (is_closed_) 194 if (is_closed_)
192 return MOJO_RESULT_INVALID_ARGUMENT; 195 return MOJO_RESULT_INVALID_ARGUMENT;
193 196
194 return EndReadDataImplNoLock(num_bytes_read); 197 return EndReadDataImplNoLock(num_bytes_read);
195 } 198 }
196 199
197 MojoResult Dispatcher::DuplicateBufferHandle( 200 MojoResult Dispatcher::DuplicateBufferHandle(
198 UserPointer<const MojoDuplicateBufferHandleOptions> options, 201 const MojoDuplicateBufferHandleOptions* options,
199 scoped_refptr<Dispatcher>* new_dispatcher) { 202 scoped_refptr<Dispatcher>* new_dispatcher) {
200 MutexLocker locker(&mutex_); 203 base::AutoLock locker(lock_);
201 if (is_closed_) 204 if (is_closed_)
202 return MOJO_RESULT_INVALID_ARGUMENT; 205 return MOJO_RESULT_INVALID_ARGUMENT;
203 206
204 return DuplicateBufferHandleImplNoLock(options, new_dispatcher); 207 return DuplicateBufferHandleImplNoLock(options, new_dispatcher);
205 } 208 }
206 209
207 MojoResult Dispatcher::MapBuffer( 210 MojoResult Dispatcher::MapBuffer(
208 uint64_t offset, 211 uint64_t offset,
209 uint64_t num_bytes, 212 uint64_t num_bytes,
210 MojoMapBufferFlags flags, 213 MojoMapBufferFlags flags,
211 scoped_ptr<embedder::PlatformSharedBufferMapping>* mapping) { 214 scoped_ptr<PlatformSharedBufferMapping>* mapping) {
212 MutexLocker locker(&mutex_); 215 base::AutoLock locker(lock_);
213 if (is_closed_) 216 if (is_closed_)
214 return MOJO_RESULT_INVALID_ARGUMENT; 217 return MOJO_RESULT_INVALID_ARGUMENT;
215 218
216 return MapBufferImplNoLock(offset, num_bytes, flags, mapping); 219 return MapBufferImplNoLock(offset, num_bytes, flags, mapping);
217 } 220 }
218 221
219 HandleSignalsState Dispatcher::GetHandleSignalsState() const { 222 HandleSignalsState Dispatcher::GetHandleSignalsState() const {
220 MutexLocker locker(&mutex_); 223 base::AutoLock locker(lock_);
221 if (is_closed_) 224 if (is_closed_)
222 return HandleSignalsState(); 225 return HandleSignalsState();
223 226
224 return GetHandleSignalsStateImplNoLock(); 227 return GetHandleSignalsStateImplNoLock();
225 } 228 }
226 229
227 MojoResult Dispatcher::AddAwakable(Awakable* awakable, 230 MojoResult Dispatcher::AddAwakable(Awakable* awakable,
228 MojoHandleSignals signals, 231 MojoHandleSignals signals,
229 uint32_t context, 232 uint32_t context,
230 HandleSignalsState* signals_state) { 233 HandleSignalsState* signals_state) {
231 MutexLocker locker(&mutex_); 234 base::AutoLock locker(lock_);
232 if (is_closed_) { 235 if (is_closed_) {
233 if (signals_state) 236 if (signals_state)
234 *signals_state = HandleSignalsState(); 237 *signals_state = HandleSignalsState();
235 return MOJO_RESULT_INVALID_ARGUMENT; 238 return MOJO_RESULT_INVALID_ARGUMENT;
236 } 239 }
237 240
238 return AddAwakableImplNoLock(awakable, signals, context, signals_state); 241 return AddAwakableImplNoLock(awakable, signals, context, signals_state);
239 } 242 }
240 243
241 void Dispatcher::RemoveAwakable(Awakable* awakable, 244 void Dispatcher::RemoveAwakable(Awakable* awakable,
242 HandleSignalsState* handle_signals_state) { 245 HandleSignalsState* handle_signals_state) {
243 MutexLocker locker(&mutex_); 246 base::AutoLock locker(lock_);
244 if (is_closed_) { 247 if (is_closed_) {
245 if (handle_signals_state) 248 if (handle_signals_state)
246 *handle_signals_state = HandleSignalsState(); 249 *handle_signals_state = HandleSignalsState();
247 return; 250 return;
248 } 251 }
249 252
250 RemoveAwakableImplNoLock(awakable, handle_signals_state); 253 RemoveAwakableImplNoLock(awakable, handle_signals_state);
251 } 254 }
252 255
253 Dispatcher::Dispatcher() : is_closed_(false) { 256 Dispatcher::Dispatcher() : is_closed_(false) {
254 } 257 }
255 258
256 Dispatcher::~Dispatcher() { 259 Dispatcher::~Dispatcher() {
257 // Make sure that |Close()| was called. 260 // Make sure that |Close()| was called.
258 DCHECK(is_closed_); 261 DCHECK(is_closed_);
259 } 262 }
260 263
261 void Dispatcher::CancelAllAwakablesNoLock() { 264 void Dispatcher::CancelAllAwakablesNoLock() {
262 mutex_.AssertHeld(); 265 lock_.AssertAcquired();
263 DCHECK(is_closed_); 266 DCHECK(is_closed_);
264 // By default, waiting isn't supported. Only dispatchers that can be waited on 267 // By default, waiting isn't supported. Only dispatchers that can be waited on
265 // will do something nontrivial. 268 // will do something nontrivial.
266 } 269 }
267 270
268 void Dispatcher::CloseImplNoLock() { 271 void Dispatcher::CloseImplNoLock() {
269 mutex_.AssertHeld(); 272 lock_.AssertAcquired();
270 DCHECK(is_closed_); 273 DCHECK(is_closed_);
271 // This may not need to do anything. Dispatchers should override this to do 274 // This may not need to do anything. Dispatchers should override this to do
272 // any actual close-time cleanup necessary. 275 // any actual close-time cleanup necessary.
273 } 276 }
274 277
275 MojoResult Dispatcher::WriteMessageImplNoLock( 278 MojoResult Dispatcher::WriteMessageImplNoLock(
276 UserPointer<const void> /*bytes*/, 279 const void* /*bytes*/,
277 uint32_t /*num_bytes*/, 280 uint32_t /*num_bytes*/,
278 std::vector<DispatcherTransport>* /*transports*/, 281 std::vector<DispatcherTransport>* /*transports*/,
279 MojoWriteMessageFlags /*flags*/) { 282 MojoWriteMessageFlags /*flags*/) {
280 mutex_.AssertHeld(); 283 lock_.AssertAcquired();
281 DCHECK(!is_closed_); 284 DCHECK(!is_closed_);
282 // By default, not supported. Only needed for message pipe dispatchers. 285 // By default, not supported. Only needed for message pipe dispatchers.
283 return MOJO_RESULT_INVALID_ARGUMENT; 286 return MOJO_RESULT_INVALID_ARGUMENT;
284 } 287 }
285 288
286 MojoResult Dispatcher::ReadMessageImplNoLock( 289 MojoResult Dispatcher::ReadMessageImplNoLock(
287 UserPointer<void> /*bytes*/, 290 void* /*bytes*/,
288 UserPointer<uint32_t> /*num_bytes*/, 291 uint32_t* /*num_bytes*/,
289 DispatcherVector* /*dispatchers*/, 292 DispatcherVector* /*dispatchers*/,
290 uint32_t* /*num_dispatchers*/, 293 uint32_t* /*num_dispatchers*/,
291 MojoReadMessageFlags /*flags*/) { 294 MojoReadMessageFlags /*flags*/) {
292 mutex_.AssertHeld(); 295 lock_.AssertAcquired();
293 DCHECK(!is_closed_); 296 DCHECK(!is_closed_);
294 // By default, not supported. Only needed for message pipe dispatchers. 297 // By default, not supported. Only needed for message pipe dispatchers.
295 return MOJO_RESULT_INVALID_ARGUMENT; 298 return MOJO_RESULT_INVALID_ARGUMENT;
296 } 299 }
297 300
298 MojoResult Dispatcher::WriteDataImplNoLock(UserPointer<const void> /*elements*/, 301 MojoResult Dispatcher::WriteDataImplNoLock(const void* /*elements*/,
299 UserPointer<uint32_t> /*num_bytes*/, 302 uint32_t* /*num_bytes*/,
300 MojoWriteDataFlags /*flags*/) { 303 MojoWriteDataFlags /*flags*/) {
301 mutex_.AssertHeld(); 304 lock_.AssertAcquired();
302 DCHECK(!is_closed_); 305 DCHECK(!is_closed_);
303 // By default, not supported. Only needed for data pipe dispatchers. 306 // By default, not supported. Only needed for data pipe dispatchers.
304 return MOJO_RESULT_INVALID_ARGUMENT; 307 return MOJO_RESULT_INVALID_ARGUMENT;
305 } 308 }
306 309
307 MojoResult Dispatcher::BeginWriteDataImplNoLock( 310 MojoResult Dispatcher::BeginWriteDataImplNoLock(
308 UserPointer<void*> /*buffer*/, 311 void** /*buffer*/,
309 UserPointer<uint32_t> /*buffer_num_bytes*/, 312 uint32_t* /*buffer_num_bytes*/,
310 MojoWriteDataFlags /*flags*/) { 313 MojoWriteDataFlags /*flags*/) {
311 mutex_.AssertHeld(); 314 lock_.AssertAcquired();
312 DCHECK(!is_closed_); 315 DCHECK(!is_closed_);
313 // By default, not supported. Only needed for data pipe dispatchers. 316 // By default, not supported. Only needed for data pipe dispatchers.
314 return MOJO_RESULT_INVALID_ARGUMENT; 317 return MOJO_RESULT_INVALID_ARGUMENT;
315 } 318 }
316 319
317 MojoResult Dispatcher::EndWriteDataImplNoLock(uint32_t /*num_bytes_written*/) { 320 MojoResult Dispatcher::EndWriteDataImplNoLock(uint32_t /*num_bytes_written*/) {
318 mutex_.AssertHeld(); 321 lock_.AssertAcquired();
319 DCHECK(!is_closed_); 322 DCHECK(!is_closed_);
320 // By default, not supported. Only needed for data pipe dispatchers. 323 // By default, not supported. Only needed for data pipe dispatchers.
321 return MOJO_RESULT_INVALID_ARGUMENT; 324 return MOJO_RESULT_INVALID_ARGUMENT;
322 } 325 }
323 326
324 MojoResult Dispatcher::ReadDataImplNoLock(UserPointer<void> /*elements*/, 327 MojoResult Dispatcher::ReadDataImplNoLock(void* /*elements*/,
325 UserPointer<uint32_t> /*num_bytes*/, 328 uint32_t* /*num_bytes*/,
326 MojoReadDataFlags /*flags*/) { 329 MojoReadDataFlags /*flags*/) {
327 mutex_.AssertHeld(); 330 lock_.AssertAcquired();
328 DCHECK(!is_closed_); 331 DCHECK(!is_closed_);
329 // By default, not supported. Only needed for data pipe dispatchers. 332 // By default, not supported. Only needed for data pipe dispatchers.
330 return MOJO_RESULT_INVALID_ARGUMENT; 333 return MOJO_RESULT_INVALID_ARGUMENT;
331 } 334 }
332 335
333 MojoResult Dispatcher::BeginReadDataImplNoLock( 336 MojoResult Dispatcher::BeginReadDataImplNoLock(
334 UserPointer<const void*> /*buffer*/, 337 const void** /*buffer*/,
335 UserPointer<uint32_t> /*buffer_num_bytes*/, 338 uint32_t* /*buffer_num_bytes*/,
336 MojoReadDataFlags /*flags*/) { 339 MojoReadDataFlags /*flags*/) {
337 mutex_.AssertHeld(); 340 lock_.AssertAcquired();
338 DCHECK(!is_closed_); 341 DCHECK(!is_closed_);
339 // By default, not supported. Only needed for data pipe dispatchers. 342 // By default, not supported. Only needed for data pipe dispatchers.
340 return MOJO_RESULT_INVALID_ARGUMENT; 343 return MOJO_RESULT_INVALID_ARGUMENT;
341 } 344 }
342 345
343 MojoResult Dispatcher::EndReadDataImplNoLock(uint32_t /*num_bytes_read*/) { 346 MojoResult Dispatcher::EndReadDataImplNoLock(uint32_t /*num_bytes_read*/) {
344 mutex_.AssertHeld(); 347 lock_.AssertAcquired();
345 DCHECK(!is_closed_); 348 DCHECK(!is_closed_);
346 // By default, not supported. Only needed for data pipe dispatchers. 349 // By default, not supported. Only needed for data pipe dispatchers.
347 return MOJO_RESULT_INVALID_ARGUMENT; 350 return MOJO_RESULT_INVALID_ARGUMENT;
348 } 351 }
349 352
350 MojoResult Dispatcher::DuplicateBufferHandleImplNoLock( 353 MojoResult Dispatcher::DuplicateBufferHandleImplNoLock(
351 UserPointer<const MojoDuplicateBufferHandleOptions> /*options*/, 354 const MojoDuplicateBufferHandleOptions* /*options*/,
352 scoped_refptr<Dispatcher>* /*new_dispatcher*/) { 355 scoped_refptr<Dispatcher>* /*new_dispatcher*/) {
353 mutex_.AssertHeld(); 356 lock_.AssertAcquired();
354 DCHECK(!is_closed_); 357 DCHECK(!is_closed_);
355 // By default, not supported. Only needed for buffer dispatchers. 358 // By default, not supported. Only needed for buffer dispatchers.
356 return MOJO_RESULT_INVALID_ARGUMENT; 359 return MOJO_RESULT_INVALID_ARGUMENT;
357 } 360 }
358 361
359 MojoResult Dispatcher::MapBufferImplNoLock( 362 MojoResult Dispatcher::MapBufferImplNoLock(
360 uint64_t /*offset*/, 363 uint64_t /*offset*/,
361 uint64_t /*num_bytes*/, 364 uint64_t /*num_bytes*/,
362 MojoMapBufferFlags /*flags*/, 365 MojoMapBufferFlags /*flags*/,
363 scoped_ptr<embedder::PlatformSharedBufferMapping>* /*mapping*/) { 366 scoped_ptr<PlatformSharedBufferMapping>* /*mapping*/) {
364 mutex_.AssertHeld(); 367 lock_.AssertAcquired();
365 DCHECK(!is_closed_); 368 DCHECK(!is_closed_);
366 // By default, not supported. Only needed for buffer dispatchers. 369 // By default, not supported. Only needed for buffer dispatchers.
367 return MOJO_RESULT_INVALID_ARGUMENT; 370 return MOJO_RESULT_INVALID_ARGUMENT;
368 } 371 }
369 372
370 HandleSignalsState Dispatcher::GetHandleSignalsStateImplNoLock() const { 373 HandleSignalsState Dispatcher::GetHandleSignalsStateImplNoLock() const {
371 mutex_.AssertHeld(); 374 lock_.AssertAcquired();
372 DCHECK(!is_closed_); 375 DCHECK(!is_closed_);
373 // By default, waiting isn't supported. Only dispatchers that can be waited on 376 // By default, waiting isn't supported. Only dispatchers that can be waited on
374 // will do something nontrivial. 377 // will do something nontrivial.
375 return HandleSignalsState(); 378 return HandleSignalsState();
376 } 379 }
377 380
378 MojoResult Dispatcher::AddAwakableImplNoLock( 381 MojoResult Dispatcher::AddAwakableImplNoLock(
379 Awakable* /*awakable*/, 382 Awakable* /*awakable*/,
380 MojoHandleSignals /*signals*/, 383 MojoHandleSignals /*signals*/,
381 uint32_t /*context*/, 384 uint32_t /*context*/,
382 HandleSignalsState* signals_state) { 385 HandleSignalsState* signals_state) {
383 mutex_.AssertHeld(); 386 lock_.AssertAcquired();
384 DCHECK(!is_closed_); 387 DCHECK(!is_closed_);
385 // By default, waiting isn't supported. Only dispatchers that can be waited on 388 // By default, waiting isn't supported. Only dispatchers that can be waited on
386 // will do something nontrivial. 389 // will do something nontrivial.
387 if (signals_state) 390 if (signals_state)
388 *signals_state = HandleSignalsState(); 391 *signals_state = HandleSignalsState();
389 return MOJO_RESULT_FAILED_PRECONDITION; 392 return MOJO_RESULT_FAILED_PRECONDITION;
390 } 393 }
391 394
392 void Dispatcher::RemoveAwakableImplNoLock(Awakable* /*awakable*/, 395 void Dispatcher::RemoveAwakableImplNoLock(Awakable* /*awakable*/,
393 HandleSignalsState* signals_state) { 396 HandleSignalsState* signals_state) {
394 mutex_.AssertHeld(); 397 lock_.AssertAcquired();
395 DCHECK(!is_closed_); 398 DCHECK(!is_closed_);
396 // By default, waiting isn't supported. Only dispatchers that can be waited on 399 // By default, waiting isn't supported. Only dispatchers that can be waited on
397 // will do something nontrivial. 400 // will do something nontrivial.
398 if (signals_state) 401 if (signals_state)
399 *signals_state = HandleSignalsState(); 402 *signals_state = HandleSignalsState();
400 } 403 }
401 404
402 void Dispatcher::StartSerializeImplNoLock(Channel* /*channel*/, 405 void Dispatcher::StartSerializeImplNoLock(size_t* max_size,
403 size_t* max_size,
404 size_t* max_platform_handles) { 406 size_t* max_platform_handles) {
405 DCHECK(HasOneRef()); // Only one ref => no need to take the lock. 407 DCHECK(HasOneRef()); // Only one ref => no need to take the lock.
406 DCHECK(!is_closed_); 408 DCHECK(!is_closed_);
407 *max_size = 0; 409 *max_size = 0;
408 *max_platform_handles = 0; 410 *max_platform_handles = 0;
409 } 411 }
410 412
411 bool Dispatcher::EndSerializeAndCloseImplNoLock( 413 bool Dispatcher::EndSerializeAndCloseImplNoLock(
412 Channel* /*channel*/,
413 void* /*destination*/, 414 void* /*destination*/,
414 size_t* /*actual_size*/, 415 size_t* /*actual_size*/,
415 embedder::PlatformHandleVector* /*platform_handles*/) { 416 PlatformHandleVector* /*platform_handles*/) {
416 DCHECK(HasOneRef()); // Only one ref => no need to take the lock. 417 DCHECK(HasOneRef()); // Only one ref => no need to take the lock.
417 DCHECK(is_closed_); 418 DCHECK(is_closed_);
418 // By default, serializing isn't supported, so just close. 419 // By default, serializing isn't supported, so just close.
419 CloseImplNoLock(); 420 CloseImplNoLock();
420 return false; 421 return false;
421 } 422 }
422 423
423 bool Dispatcher::IsBusyNoLock() const { 424 bool Dispatcher::IsBusyNoLock() const {
424 mutex_.AssertHeld(); 425 lock_.AssertAcquired();
425 DCHECK(!is_closed_); 426 DCHECK(!is_closed_);
426 // Most dispatchers support only "atomic" operations, so they are never busy 427 // Most dispatchers support only "atomic" operations, so they are never busy
427 // (in this sense). 428 // (in this sense).
428 return false; 429 return false;
429 } 430 }
430 431
431 void Dispatcher::CloseNoLock() { 432 void Dispatcher::CloseNoLock() {
432 mutex_.AssertHeld(); 433 lock_.AssertAcquired();
433 DCHECK(!is_closed_); 434 DCHECK(!is_closed_);
434 435
435 is_closed_ = true; 436 is_closed_ = true;
436 CancelAllAwakablesNoLock(); 437 CancelAllAwakablesNoLock();
437 CloseImplNoLock(); 438 CloseImplNoLock();
438 } 439 }
439 440
440 scoped_refptr<Dispatcher> 441 scoped_refptr<Dispatcher>
441 Dispatcher::CreateEquivalentDispatcherAndCloseNoLock() { 442 Dispatcher::CreateEquivalentDispatcherAndCloseNoLock() {
442 mutex_.AssertHeld(); 443 lock_.AssertAcquired();
443 DCHECK(!is_closed_); 444 DCHECK(!is_closed_);
444 445
445 is_closed_ = true; 446 is_closed_ = true;
446 CancelAllAwakablesNoLock(); 447 CancelAllAwakablesNoLock();
447 return CreateEquivalentDispatcherAndCloseImplNoLock(); 448 return CreateEquivalentDispatcherAndCloseImplNoLock();
448 } 449 }
449 450
450 void Dispatcher::StartSerialize(Channel* channel, 451 void Dispatcher::StartSerialize(size_t* max_size,
451 size_t* max_size,
452 size_t* max_platform_handles) { 452 size_t* max_platform_handles) {
453 DCHECK(channel);
454 DCHECK(max_size); 453 DCHECK(max_size);
455 DCHECK(max_platform_handles); 454 DCHECK(max_platform_handles);
456 DCHECK(HasOneRef()); // Only one ref => no need to take the lock.
457 DCHECK(!is_closed_); 455 DCHECK(!is_closed_);
458 StartSerializeImplNoLock(channel, max_size, max_platform_handles); 456 StartSerializeImplNoLock(max_size, max_platform_handles);
459 } 457 }
460 458
461 bool Dispatcher::EndSerializeAndClose( 459 bool Dispatcher::EndSerializeAndClose(void* destination,
462 Channel* channel, 460 size_t* actual_size,
463 void* destination, 461 PlatformHandleVector* platform_handles) {
464 size_t* actual_size,
465 embedder::PlatformHandleVector* platform_handles) {
466 DCHECK(channel);
467 DCHECK(actual_size); 462 DCHECK(actual_size);
468 DCHECK(HasOneRef()); // Only one ref => no need to take the lock.
469 DCHECK(!is_closed_); 463 DCHECK(!is_closed_);
470 464
471 // Like other |...Close()| methods, we mark ourselves as closed before calling 465 // Like other |...Close()| methods, we mark ourselves as closed before calling
472 // the impl. But there's no need to cancel waiters: we shouldn't have any (and 466 // the impl. But there's no need to cancel waiters: we shouldn't have any (and
473 // shouldn't be in |Core|'s handle table. 467 // shouldn't be in |Core|'s handle table.
474 is_closed_ = true; 468 is_closed_ = true;
475 469
476 #if !defined(NDEBUG) 470 #if !defined(NDEBUG)
477 // See the comment above |EndSerializeAndCloseImplNoLock()|. In brief: Locking 471 // See the comment above |EndSerializeAndCloseImplNoLock()|. In brief: Locking
478 // isn't actually needed, but we need to satisfy assertions (which we don't 472 // isn't actually needed, but we need to satisfy assertions (which we don't
479 // want to remove or weaken). 473 // want to remove or weaken).
480 MutexLocker locker(&mutex_); 474 base::AutoLock locker(lock_);
481 #endif 475 #endif
482 476
483 return EndSerializeAndCloseImplNoLock(channel, destination, actual_size, 477 return EndSerializeAndCloseImplNoLock(destination, actual_size,
484 platform_handles); 478 platform_handles);
485 } 479 }
486 480
487 // DispatcherTransport --------------------------------------------------------- 481 // DispatcherTransport ---------------------------------------------------------
488 482
489 void DispatcherTransport::End() { 483 void DispatcherTransport::End() {
490 DCHECK(dispatcher_); 484 DCHECK(dispatcher_);
491 dispatcher_->mutex_.Unlock(); 485 dispatcher_->lock_.Release();
486
487 dispatcher_->TransportEnded();
488
492 dispatcher_ = nullptr; 489 dispatcher_ = nullptr;
493 } 490 }
494 491
495 } // namespace system 492 } // namespace edk
496 } // namespace mojo 493 } // namespace mojo
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698