Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(75)

Side by Side Diff: trunk/src/base/message_loop/message_pump_win.cc

Issue 16897005: Revert 206507 "Move message_pump to base/message_loop." (Closed) Base URL: svn://svn.chromium.org/chrome/
Patch Set: Created 7 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « trunk/src/base/message_loop/message_pump_win.h ('k') | trunk/src/base/message_pump.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/message_loop/message_pump_win.h"
6
7 #include <math.h>
8
9 #include "base/debug/trace_event.h"
10 #include "base/message_loop/message_loop.h"
11 #include "base/metrics/histogram.h"
12 #include "base/process_util.h"
13 #include "base/stringprintf.h"
14 #include "base/win/wrapped_window_proc.h"
15
16 namespace base {
17
18 namespace {
19
20 enum MessageLoopProblems {
21 MESSAGE_POST_ERROR,
22 COMPLETION_POST_ERROR,
23 SET_TIMER_ERROR,
24 MESSAGE_LOOP_PROBLEM_MAX,
25 };
26
27 } // namespace
28
29 static const wchar_t kWndClassFormat[] = L"Chrome_MessagePumpWindow_%p";
30
31 // Message sent to get an additional time slice for pumping (processing) another
32 // task (a series of such messages creates a continuous task pump).
33 static const int kMsgHaveWork = WM_USER + 1;
34
35 //-----------------------------------------------------------------------------
36 // MessagePumpWin public:
37
38 void MessagePumpWin::AddObserver(MessagePumpObserver* observer) {
39 observers_.AddObserver(observer);
40 }
41
42 void MessagePumpWin::RemoveObserver(MessagePumpObserver* observer) {
43 observers_.RemoveObserver(observer);
44 }
45
46 void MessagePumpWin::WillProcessMessage(const MSG& msg) {
47 FOR_EACH_OBSERVER(MessagePumpObserver, observers_, WillProcessEvent(msg));
48 }
49
50 void MessagePumpWin::DidProcessMessage(const MSG& msg) {
51 FOR_EACH_OBSERVER(MessagePumpObserver, observers_, DidProcessEvent(msg));
52 }
53
54 void MessagePumpWin::RunWithDispatcher(
55 Delegate* delegate, MessagePumpDispatcher* dispatcher) {
56 RunState s;
57 s.delegate = delegate;
58 s.dispatcher = dispatcher;
59 s.should_quit = false;
60 s.run_depth = state_ ? state_->run_depth + 1 : 1;
61
62 RunState* previous_state = state_;
63 state_ = &s;
64
65 DoRunLoop();
66
67 state_ = previous_state;
68 }
69
70 void MessagePumpWin::Quit() {
71 DCHECK(state_);
72 state_->should_quit = true;
73 }
74
75 //-----------------------------------------------------------------------------
76 // MessagePumpWin protected:
77
78 int MessagePumpWin::GetCurrentDelay() const {
79 if (delayed_work_time_.is_null())
80 return -1;
81
82 // Be careful here. TimeDelta has a precision of microseconds, but we want a
83 // value in milliseconds. If there are 5.5ms left, should the delay be 5 or
84 // 6? It should be 6 to avoid executing delayed work too early.
85 double timeout =
86 ceil((delayed_work_time_ - TimeTicks::Now()).InMillisecondsF());
87
88 // If this value is negative, then we need to run delayed work soon.
89 int delay = static_cast<int>(timeout);
90 if (delay < 0)
91 delay = 0;
92
93 return delay;
94 }
95
96 //-----------------------------------------------------------------------------
97 // MessagePumpForUI public:
98
99 MessagePumpForUI::MessagePumpForUI()
100 : atom_(0),
101 message_filter_(new MessageFilter) {
102 InitMessageWnd();
103 }
104
105 MessagePumpForUI::~MessagePumpForUI() {
106 DestroyWindow(message_hwnd_);
107 UnregisterClass(MAKEINTATOM(atom_),
108 GetModuleFromAddress(&WndProcThunk));
109 }
110
111 void MessagePumpForUI::ScheduleWork() {
112 if (InterlockedExchange(&have_work_, 1))
113 return; // Someone else continued the pumping.
114
115 // Make sure the MessagePump does some work for us.
116 BOOL ret = PostMessage(message_hwnd_, kMsgHaveWork,
117 reinterpret_cast<WPARAM>(this), 0);
118 if (ret)
119 return; // There was room in the Window Message queue.
120
121 // We have failed to insert a have-work message, so there is a chance that we
122 // will starve tasks/timers while sitting in a nested message loop. Nested
123 // loops only look at Windows Message queues, and don't look at *our* task
124 // queues, etc., so we might not get a time slice in such. :-(
125 // We could abort here, but the fear is that this failure mode is plausibly
126 // common (queue is full, of about 2000 messages), so we'll do a near-graceful
127 // recovery. Nested loops are pretty transient (we think), so this will
128 // probably be recoverable.
129 InterlockedExchange(&have_work_, 0); // Clarify that we didn't really insert.
130 UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", MESSAGE_POST_ERROR,
131 MESSAGE_LOOP_PROBLEM_MAX);
132 }
133
134 void MessagePumpForUI::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
135 //
136 // We would *like* to provide high resolution timers. Windows timers using
137 // SetTimer() have a 10ms granularity. We have to use WM_TIMER as a wakeup
138 // mechanism because the application can enter modal windows loops where it
139 // is not running our MessageLoop; the only way to have our timers fire in
140 // these cases is to post messages there.
141 //
142 // To provide sub-10ms timers, we process timers directly from our run loop.
143 // For the common case, timers will be processed there as the run loop does
144 // its normal work. However, we *also* set the system timer so that WM_TIMER
145 // events fire. This mops up the case of timers not being able to work in
146 // modal message loops. It is possible for the SetTimer to pop and have no
147 // pending timers, because they could have already been processed by the
148 // run loop itself.
149 //
150 // We use a single SetTimer corresponding to the timer that will expire
151 // soonest. As new timers are created and destroyed, we update SetTimer.
152 // Getting a spurrious SetTimer event firing is benign, as we'll just be
153 // processing an empty timer queue.
154 //
155 delayed_work_time_ = delayed_work_time;
156
157 int delay_msec = GetCurrentDelay();
158 DCHECK_GE(delay_msec, 0);
159 if (delay_msec < USER_TIMER_MINIMUM)
160 delay_msec = USER_TIMER_MINIMUM;
161
162 // Create a WM_TIMER event that will wake us up to check for any pending
163 // timers (in case we are running within a nested, external sub-pump).
164 BOOL ret = SetTimer(message_hwnd_, reinterpret_cast<UINT_PTR>(this),
165 delay_msec, NULL);
166 if (ret)
167 return;
168 // If we can't set timers, we are in big trouble... but cross our fingers for
169 // now.
170 // TODO(jar): If we don't see this error, use a CHECK() here instead.
171 UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", SET_TIMER_ERROR,
172 MESSAGE_LOOP_PROBLEM_MAX);
173 }
174
175 void MessagePumpForUI::PumpOutPendingPaintMessages() {
176 // If we are being called outside of the context of Run, then don't try to do
177 // any work.
178 if (!state_)
179 return;
180
181 // Create a mini-message-pump to force immediate processing of only Windows
182 // WM_PAINT messages. Don't provide an infinite loop, but do enough peeking
183 // to get the job done. Actual common max is 4 peeks, but we'll be a little
184 // safe here.
185 const int kMaxPeekCount = 20;
186 int peek_count;
187 for (peek_count = 0; peek_count < kMaxPeekCount; ++peek_count) {
188 MSG msg;
189 if (!PeekMessage(&msg, NULL, 0, 0, PM_REMOVE | PM_QS_PAINT))
190 break;
191 ProcessMessageHelper(msg);
192 if (state_->should_quit) // Handle WM_QUIT.
193 break;
194 }
195 // Histogram what was really being used, to help to adjust kMaxPeekCount.
196 DHISTOGRAM_COUNTS("Loop.PumpOutPendingPaintMessages Peeks", peek_count);
197 }
198
199 //-----------------------------------------------------------------------------
200 // MessagePumpForUI private:
201
202 // static
203 LRESULT CALLBACK MessagePumpForUI::WndProcThunk(
204 HWND hwnd, UINT message, WPARAM wparam, LPARAM lparam) {
205 switch (message) {
206 case kMsgHaveWork:
207 reinterpret_cast<MessagePumpForUI*>(wparam)->HandleWorkMessage();
208 break;
209 case WM_TIMER:
210 reinterpret_cast<MessagePumpForUI*>(wparam)->HandleTimerMessage();
211 break;
212 }
213 return DefWindowProc(hwnd, message, wparam, lparam);
214 }
215
216 void MessagePumpForUI::DoRunLoop() {
217 // IF this was just a simple PeekMessage() loop (servicing all possible work
218 // queues), then Windows would try to achieve the following order according
219 // to MSDN documentation about PeekMessage with no filter):
220 // * Sent messages
221 // * Posted messages
222 // * Sent messages (again)
223 // * WM_PAINT messages
224 // * WM_TIMER messages
225 //
226 // Summary: none of the above classes is starved, and sent messages has twice
227 // the chance of being processed (i.e., reduced service time).
228
229 for (;;) {
230 // If we do any work, we may create more messages etc., and more work may
231 // possibly be waiting in another task group. When we (for example)
232 // ProcessNextWindowsMessage(), there is a good chance there are still more
233 // messages waiting. On the other hand, when any of these methods return
234 // having done no work, then it is pretty unlikely that calling them again
235 // quickly will find any work to do. Finally, if they all say they had no
236 // work, then it is a good time to consider sleeping (waiting) for more
237 // work.
238
239 bool more_work_is_plausible = ProcessNextWindowsMessage();
240 if (state_->should_quit)
241 break;
242
243 more_work_is_plausible |= state_->delegate->DoWork();
244 if (state_->should_quit)
245 break;
246
247 more_work_is_plausible |=
248 state_->delegate->DoDelayedWork(&delayed_work_time_);
249 // If we did not process any delayed work, then we can assume that our
250 // existing WM_TIMER if any will fire when delayed work should run. We
251 // don't want to disturb that timer if it is already in flight. However,
252 // if we did do all remaining delayed work, then lets kill the WM_TIMER.
253 if (more_work_is_plausible && delayed_work_time_.is_null())
254 KillTimer(message_hwnd_, reinterpret_cast<UINT_PTR>(this));
255 if (state_->should_quit)
256 break;
257
258 if (more_work_is_plausible)
259 continue;
260
261 more_work_is_plausible = state_->delegate->DoIdleWork();
262 if (state_->should_quit)
263 break;
264
265 if (more_work_is_plausible)
266 continue;
267
268 WaitForWork(); // Wait (sleep) until we have work to do again.
269 }
270 }
271
272 void MessagePumpForUI::InitMessageWnd() {
273 // Generate a unique window class name.
274 string16 class_name = StringPrintf(kWndClassFormat, this);
275
276 HINSTANCE instance = GetModuleFromAddress(&WndProcThunk);
277 WNDCLASSEX wc = {0};
278 wc.cbSize = sizeof(wc);
279 wc.lpfnWndProc = base::win::WrappedWindowProc<WndProcThunk>;
280 wc.hInstance = instance;
281 wc.lpszClassName = class_name.c_str();
282 atom_ = RegisterClassEx(&wc);
283 DCHECK(atom_);
284
285 message_hwnd_ = CreateWindow(MAKEINTATOM(atom_), 0, 0, 0, 0, 0, 0,
286 HWND_MESSAGE, 0, instance, 0);
287 DCHECK(message_hwnd_);
288 }
289
290 void MessagePumpForUI::WaitForWork() {
291 // Wait until a message is available, up to the time needed by the timer
292 // manager to fire the next set of timers.
293 int delay = GetCurrentDelay();
294 if (delay < 0) // Negative value means no timers waiting.
295 delay = INFINITE;
296
297 DWORD result;
298 result = MsgWaitForMultipleObjectsEx(0, NULL, delay, QS_ALLINPUT,
299 MWMO_INPUTAVAILABLE);
300
301 if (WAIT_OBJECT_0 == result) {
302 // A WM_* message is available.
303 // If a parent child relationship exists between windows across threads
304 // then their thread inputs are implicitly attached.
305 // This causes the MsgWaitForMultipleObjectsEx API to return indicating
306 // that messages are ready for processing (Specifically, mouse messages
307 // intended for the child window may appear if the child window has
308 // capture).
309 // The subsequent PeekMessages call may fail to return any messages thus
310 // causing us to enter a tight loop at times.
311 // The WaitMessage call below is a workaround to give the child window
312 // some time to process its input messages.
313 MSG msg = {0};
314 DWORD queue_status = GetQueueStatus(QS_MOUSE);
315 if (HIWORD(queue_status) & QS_MOUSE &&
316 !PeekMessage(&msg, NULL, WM_MOUSEFIRST, WM_MOUSELAST, PM_NOREMOVE)) {
317 WaitMessage();
318 }
319 return;
320 }
321
322 DCHECK_NE(WAIT_FAILED, result) << GetLastError();
323 }
324
325 void MessagePumpForUI::HandleWorkMessage() {
326 // If we are being called outside of the context of Run, then don't try to do
327 // any work. This could correspond to a MessageBox call or something of that
328 // sort.
329 if (!state_) {
330 // Since we handled a kMsgHaveWork message, we must still update this flag.
331 InterlockedExchange(&have_work_, 0);
332 return;
333 }
334
335 // Let whatever would have run had we not been putting messages in the queue
336 // run now. This is an attempt to make our dummy message not starve other
337 // messages that may be in the Windows message queue.
338 ProcessPumpReplacementMessage();
339
340 // Now give the delegate a chance to do some work. He'll let us know if he
341 // needs to do more work.
342 if (state_->delegate->DoWork())
343 ScheduleWork();
344 }
345
346 void MessagePumpForUI::HandleTimerMessage() {
347 KillTimer(message_hwnd_, reinterpret_cast<UINT_PTR>(this));
348
349 // If we are being called outside of the context of Run, then don't do
350 // anything. This could correspond to a MessageBox call or something of
351 // that sort.
352 if (!state_)
353 return;
354
355 state_->delegate->DoDelayedWork(&delayed_work_time_);
356 if (!delayed_work_time_.is_null()) {
357 // A bit gratuitous to set delayed_work_time_ again, but oh well.
358 ScheduleDelayedWork(delayed_work_time_);
359 }
360 }
361
362 bool MessagePumpForUI::ProcessNextWindowsMessage() {
363 // If there are sent messages in the queue then PeekMessage internally
364 // dispatches the message and returns false. We return true in this
365 // case to ensure that the message loop peeks again instead of calling
366 // MsgWaitForMultipleObjectsEx again.
367 bool sent_messages_in_queue = false;
368 DWORD queue_status = GetQueueStatus(QS_SENDMESSAGE);
369 if (HIWORD(queue_status) & QS_SENDMESSAGE)
370 sent_messages_in_queue = true;
371
372 MSG msg;
373 if (message_filter_->DoPeekMessage(&msg, NULL, 0, 0, PM_REMOVE))
374 return ProcessMessageHelper(msg);
375
376 return sent_messages_in_queue;
377 }
378
379 bool MessagePumpForUI::ProcessMessageHelper(const MSG& msg) {
380 TRACE_EVENT1("base", "MessagePumpForUI::ProcessMessageHelper",
381 "message", msg.message);
382 if (WM_QUIT == msg.message) {
383 // Repost the QUIT message so that it will be retrieved by the primary
384 // GetMessage() loop.
385 state_->should_quit = true;
386 PostQuitMessage(static_cast<int>(msg.wParam));
387 return false;
388 }
389
390 // While running our main message pump, we discard kMsgHaveWork messages.
391 if (msg.message == kMsgHaveWork && msg.hwnd == message_hwnd_)
392 return ProcessPumpReplacementMessage();
393
394 if (CallMsgFilter(const_cast<MSG*>(&msg), kMessageFilterCode))
395 return true;
396
397 WillProcessMessage(msg);
398
399 if (!message_filter_->ProcessMessage(msg)) {
400 if (state_->dispatcher) {
401 if (!state_->dispatcher->Dispatch(msg))
402 state_->should_quit = true;
403 } else {
404 TranslateMessage(&msg);
405 DispatchMessage(&msg);
406 }
407 }
408
409 DidProcessMessage(msg);
410 return true;
411 }
412
413 bool MessagePumpForUI::ProcessPumpReplacementMessage() {
414 // When we encounter a kMsgHaveWork message, this method is called to peek
415 // and process a replacement message, such as a WM_PAINT or WM_TIMER. The
416 // goal is to make the kMsgHaveWork as non-intrusive as possible, even though
417 // a continuous stream of such messages are posted. This method carefully
418 // peeks a message while there is no chance for a kMsgHaveWork to be pending,
419 // then resets the have_work_ flag (allowing a replacement kMsgHaveWork to
420 // possibly be posted), and finally dispatches that peeked replacement. Note
421 // that the re-post of kMsgHaveWork may be asynchronous to this thread!!
422
423 bool have_message = false;
424 MSG msg;
425 // We should not process all window messages if we are in the context of an
426 // OS modal loop, i.e. in the context of a windows API call like MessageBox.
427 // This is to ensure that these messages are peeked out by the OS modal loop.
428 if (MessageLoop::current()->os_modal_loop()) {
429 // We only peek out WM_PAINT and WM_TIMER here for reasons mentioned above.
430 have_message = PeekMessage(&msg, NULL, WM_PAINT, WM_PAINT, PM_REMOVE) ||
431 PeekMessage(&msg, NULL, WM_TIMER, WM_TIMER, PM_REMOVE);
432 } else {
433 have_message = !!message_filter_->DoPeekMessage(&msg, NULL, 0, 0,
434 PM_REMOVE);
435 }
436
437 DCHECK(!have_message || kMsgHaveWork != msg.message ||
438 msg.hwnd != message_hwnd_);
439
440 // Since we discarded a kMsgHaveWork message, we must update the flag.
441 int old_have_work = InterlockedExchange(&have_work_, 0);
442 DCHECK(old_have_work);
443
444 // We don't need a special time slice if we didn't have_message to process.
445 if (!have_message)
446 return false;
447
448 // Guarantee we'll get another time slice in the case where we go into native
449 // windows code. This ScheduleWork() may hurt performance a tiny bit when
450 // tasks appear very infrequently, but when the event queue is busy, the
451 // kMsgHaveWork events get (percentage wise) rarer and rarer.
452 ScheduleWork();
453 return ProcessMessageHelper(msg);
454 }
455
456 void MessagePumpForUI::SetMessageFilter(
457 scoped_ptr<MessageFilter> message_filter) {
458 message_filter_ = message_filter.Pass();
459 }
460
461 //-----------------------------------------------------------------------------
462 // MessagePumpForIO public:
463
464 MessagePumpForIO::MessagePumpForIO() {
465 port_.Set(CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, NULL, 1));
466 DCHECK(port_.IsValid());
467 }
468
469 void MessagePumpForIO::ScheduleWork() {
470 if (InterlockedExchange(&have_work_, 1))
471 return; // Someone else continued the pumping.
472
473 // Make sure the MessagePump does some work for us.
474 BOOL ret = PostQueuedCompletionStatus(port_, 0,
475 reinterpret_cast<ULONG_PTR>(this),
476 reinterpret_cast<OVERLAPPED*>(this));
477 if (ret)
478 return; // Post worked perfectly.
479
480 // See comment in MessagePumpForUI::ScheduleWork() for this error recovery.
481 InterlockedExchange(&have_work_, 0); // Clarify that we didn't succeed.
482 UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", COMPLETION_POST_ERROR,
483 MESSAGE_LOOP_PROBLEM_MAX);
484 }
485
486 void MessagePumpForIO::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
487 // We know that we can't be blocked right now since this method can only be
488 // called on the same thread as Run, so we only need to update our record of
489 // how long to sleep when we do sleep.
490 delayed_work_time_ = delayed_work_time;
491 }
492
493 void MessagePumpForIO::RegisterIOHandler(HANDLE file_handle,
494 IOHandler* handler) {
495 ULONG_PTR key = HandlerToKey(handler, true);
496 HANDLE port = CreateIoCompletionPort(file_handle, port_, key, 1);
497 DPCHECK(port);
498 }
499
500 bool MessagePumpForIO::RegisterJobObject(HANDLE job_handle,
501 IOHandler* handler) {
502 // Job object notifications use the OVERLAPPED pointer to carry the message
503 // data. Mark the completion key correspondingly, so we will not try to
504 // convert OVERLAPPED* to IOContext*.
505 ULONG_PTR key = HandlerToKey(handler, false);
506 JOBOBJECT_ASSOCIATE_COMPLETION_PORT info;
507 info.CompletionKey = reinterpret_cast<void*>(key);
508 info.CompletionPort = port_;
509 return SetInformationJobObject(job_handle,
510 JobObjectAssociateCompletionPortInformation,
511 &info,
512 sizeof(info)) != FALSE;
513 }
514
515 //-----------------------------------------------------------------------------
516 // MessagePumpForIO private:
517
518 void MessagePumpForIO::DoRunLoop() {
519 for (;;) {
520 // If we do any work, we may create more messages etc., and more work may
521 // possibly be waiting in another task group. When we (for example)
522 // WaitForIOCompletion(), there is a good chance there are still more
523 // messages waiting. On the other hand, when any of these methods return
524 // having done no work, then it is pretty unlikely that calling them
525 // again quickly will find any work to do. Finally, if they all say they
526 // had no work, then it is a good time to consider sleeping (waiting) for
527 // more work.
528
529 bool more_work_is_plausible = state_->delegate->DoWork();
530 if (state_->should_quit)
531 break;
532
533 more_work_is_plausible |= WaitForIOCompletion(0, NULL);
534 if (state_->should_quit)
535 break;
536
537 more_work_is_plausible |=
538 state_->delegate->DoDelayedWork(&delayed_work_time_);
539 if (state_->should_quit)
540 break;
541
542 if (more_work_is_plausible)
543 continue;
544
545 more_work_is_plausible = state_->delegate->DoIdleWork();
546 if (state_->should_quit)
547 break;
548
549 if (more_work_is_plausible)
550 continue;
551
552 WaitForWork(); // Wait (sleep) until we have work to do again.
553 }
554 }
555
556 // Wait until IO completes, up to the time needed by the timer manager to fire
557 // the next set of timers.
558 void MessagePumpForIO::WaitForWork() {
559 // We do not support nested IO message loops. This is to avoid messy
560 // recursion problems.
561 DCHECK_EQ(1, state_->run_depth) << "Cannot nest an IO message loop!";
562
563 int timeout = GetCurrentDelay();
564 if (timeout < 0) // Negative value means no timers waiting.
565 timeout = INFINITE;
566
567 WaitForIOCompletion(timeout, NULL);
568 }
569
570 bool MessagePumpForIO::WaitForIOCompletion(DWORD timeout, IOHandler* filter) {
571 IOItem item;
572 if (completed_io_.empty() || !MatchCompletedIOItem(filter, &item)) {
573 // We have to ask the system for another IO completion.
574 if (!GetIOItem(timeout, &item))
575 return false;
576
577 if (ProcessInternalIOItem(item))
578 return true;
579 }
580
581 // If |item.has_valid_io_context| is false then |item.context| does not point
582 // to a context structure, and so should not be dereferenced, although it may
583 // still hold valid non-pointer data.
584 if (!item.has_valid_io_context || item.context->handler) {
585 if (filter && item.handler != filter) {
586 // Save this item for later
587 completed_io_.push_back(item);
588 } else {
589 DCHECK(!item.has_valid_io_context ||
590 (item.context->handler == item.handler));
591 WillProcessIOEvent();
592 item.handler->OnIOCompleted(item.context, item.bytes_transfered,
593 item.error);
594 DidProcessIOEvent();
595 }
596 } else {
597 // The handler must be gone by now, just cleanup the mess.
598 delete item.context;
599 }
600 return true;
601 }
602
603 // Asks the OS for another IO completion result.
604 bool MessagePumpForIO::GetIOItem(DWORD timeout, IOItem* item) {
605 memset(item, 0, sizeof(*item));
606 ULONG_PTR key = NULL;
607 OVERLAPPED* overlapped = NULL;
608 if (!GetQueuedCompletionStatus(port_.Get(), &item->bytes_transfered, &key,
609 &overlapped, timeout)) {
610 if (!overlapped)
611 return false; // Nothing in the queue.
612 item->error = GetLastError();
613 item->bytes_transfered = 0;
614 }
615
616 item->handler = KeyToHandler(key, &item->has_valid_io_context);
617 item->context = reinterpret_cast<IOContext*>(overlapped);
618 return true;
619 }
620
621 bool MessagePumpForIO::ProcessInternalIOItem(const IOItem& item) {
622 if (this == reinterpret_cast<MessagePumpForIO*>(item.context) &&
623 this == reinterpret_cast<MessagePumpForIO*>(item.handler)) {
624 // This is our internal completion.
625 DCHECK(!item.bytes_transfered);
626 InterlockedExchange(&have_work_, 0);
627 return true;
628 }
629 return false;
630 }
631
632 // Returns a completion item that was previously received.
633 bool MessagePumpForIO::MatchCompletedIOItem(IOHandler* filter, IOItem* item) {
634 DCHECK(!completed_io_.empty());
635 for (std::list<IOItem>::iterator it = completed_io_.begin();
636 it != completed_io_.end(); ++it) {
637 if (!filter || it->handler == filter) {
638 *item = *it;
639 completed_io_.erase(it);
640 return true;
641 }
642 }
643 return false;
644 }
645
646 void MessagePumpForIO::AddIOObserver(IOObserver *obs) {
647 io_observers_.AddObserver(obs);
648 }
649
650 void MessagePumpForIO::RemoveIOObserver(IOObserver *obs) {
651 io_observers_.RemoveObserver(obs);
652 }
653
654 void MessagePumpForIO::WillProcessIOEvent() {
655 FOR_EACH_OBSERVER(IOObserver, io_observers_, WillProcessIOEvent());
656 }
657
658 void MessagePumpForIO::DidProcessIOEvent() {
659 FOR_EACH_OBSERVER(IOObserver, io_observers_, DidProcessIOEvent());
660 }
661
662 // static
663 ULONG_PTR MessagePumpForIO::HandlerToKey(IOHandler* handler,
664 bool has_valid_io_context) {
665 ULONG_PTR key = reinterpret_cast<ULONG_PTR>(handler);
666
667 // |IOHandler| is at least pointer-size aligned, so the lowest two bits are
668 // always cleared. We use the lowest bit to distinguish completion keys with
669 // and without the associated |IOContext|.
670 DCHECK((key & 1) == 0);
671
672 // Mark the completion key as context-less.
673 if (!has_valid_io_context)
674 key = key | 1;
675 return key;
676 }
677
678 // static
679 MessagePumpForIO::IOHandler* MessagePumpForIO::KeyToHandler(
680 ULONG_PTR key,
681 bool* has_valid_io_context) {
682 *has_valid_io_context = ((key & 1) == 0);
683 return reinterpret_cast<IOHandler*>(key & ~static_cast<ULONG_PTR>(1));
684 }
685
686 } // namespace base
OLDNEW
« no previous file with comments | « trunk/src/base/message_loop/message_pump_win.h ('k') | trunk/src/base/message_pump.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698