Chromium Code Reviews| Index: base/message_pump_win.cc |
| diff --git a/base/message_pump_win.cc b/base/message_pump_win.cc |
| index 4658573f5b9ef75cdc380da5bc0eae7e6820714c..b61d0d225aba7d83b05ae418351e33e26dce1e2b 100644 |
| --- a/base/message_pump_win.cc |
| +++ b/base/message_pump_win.cc |
| @@ -22,6 +22,18 @@ enum MessageLoopProblems { |
| MESSAGE_LOOP_PROBLEM_MAX, |
| }; |
| +// Possible states of the message pump: |
| +// - kPumpIdle: the thread is sleeping, waiting for work to be posted. |
| +// - kPumpHaveWork: a window message or completion packet has been posted. |
| +// - kPumpDisabled: the pump is disabled, no more work can be posted. |
| +static const LONG kPumpIdle = 0; |
|
rvargas (doing something else)
2013/06/12 00:22:22
nit: don't need static
alexeypa (please no reviews)
2013/06/12 18:05:22
Done.
|
| +static const LONG kPumpHaveWork = 1; |
| +static const LONG kPumpDisabled = 2; |
| + |
| +// Used to wake up the message loop thread in the case if it is waiting for |
|
rvargas (doing something else)
2013/06/12 00:22:22
nit: remove "in the case"
alexeypa (please no reviews)
2013/06/12 18:05:22
Done.
|
| +// another thread to exit MessagePumpForUI::ScheduleWork(). |
| +VOID CALLBACK DummyApc(ULONG_PTR) {} |
| + |
| } // namespace |
| namespace base { |
| @@ -35,6 +47,8 @@ static const int kMsgHaveWork = WM_USER + 1; |
| //----------------------------------------------------------------------------- |
| // MessagePumpWin public: |
| +MessagePumpWin::MessagePumpWin() : pump_state_(kPumpIdle), state_(NULL) {} |
| + |
| void MessagePumpWin::AddObserver(MessagePumpObserver* observer) { |
| observers_.AddObserver(observer); |
| } |
| @@ -99,24 +113,33 @@ int MessagePumpWin::GetCurrentDelay() const { |
| MessagePumpForUI::MessagePumpForUI() |
| : atom_(0), |
| message_filter_(new MessageFilter) { |
| + CHECK(DuplicateHandle(GetCurrentProcess(), |
| + GetCurrentThread(), |
| + GetCurrentProcess(), |
| + thread_.Receive(), |
| + THREAD_SET_CONTEXT, |
| + FALSE, // no not inherit this handle |
|
rvargas (doing something else)
2013/06/12 00:22:22
fix comment
alexeypa (please no reviews)
2013/06/12 18:05:22
Done.
|
| + 0)); |
| InitMessageWnd(); |
| } |
| MessagePumpForUI::~MessagePumpForUI() { |
| - DestroyWindow(message_hwnd_); |
| - UnregisterClass(MAKEINTATOM(atom_), |
| - base::GetModuleFromAddress(&WndProcThunk)); |
| + DCHECK(!atom_); |
| + DCHECK(!message_hwnd_); |
| } |
| void MessagePumpForUI::ScheduleWork() { |
| - if (InterlockedExchange(&have_work_, 1)) |
| - return; // Someone else continued the pumping. |
| + if (InterlockedCompareExchange(&pump_state_, kPumpHaveWork, |
| + kPumpIdle) != kPumpIdle) { |
| + // Either someone else continued the pumping or the pump is disabled. |
| + return; |
| + } |
| // Make sure the MessagePump does some work for us. |
| - BOOL ret = PostMessage(message_hwnd_, kMsgHaveWork, |
| - reinterpret_cast<WPARAM>(this), 0); |
| - if (ret) |
| - return; // There was room in the Window Message queue. |
| + if (PostMessage(message_hwnd_, kMsgHaveWork, reinterpret_cast<WPARAM>(this), |
| + 0)) { |
| + return; |
| + } |
| // We have failed to insert a have-work message, so there is a chance that we |
| // will starve tasks/timers while sitting in a nested message loop. Nested |
| @@ -126,9 +149,24 @@ void MessagePumpForUI::ScheduleWork() { |
| // common (queue is full, of about 2000 messages), so we'll do a near-graceful |
| // recovery. Nested loops are pretty transient (we think), so this will |
| // probably be recoverable. |
| - InterlockedExchange(&have_work_, 0); // Clarify that we didn't really insert. |
| UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", MESSAGE_POST_ERROR, |
| MESSAGE_LOOP_PROBLEM_MAX); |
| + |
| + |
| + |
| + // Clarify that we didn't really insert. |
| + InterlockedExchange(&pump_state_, kPumpIdle); |
| + |
| + // Try to wake up the message loop thread by posting an APC. This might not |
| + // work while a nested loop still running (see the comment above) but this |
| + // will unblock WillDestroyCurrentMessageLoop() if it is waiting for |
| + // ScheduleWork() to complete. |
| + // |
| + // According to the UMA metrics posting an I/O completion packet has very low |
| + // error rate. Queuing an APC hits roughly the same path in the kernel so |
| + // the error rate should be low as well. Given that we do it only when |
| + // PostMessage() fails it should be safe to CHECK() here. |
| + CHECK(QueueUserAPC(&DummyApc, thread_, 0)); |
| } |
| void MessagePumpForUI::ScheduleDelayedWork(const TimeTicks& delayed_work_time) { |
| @@ -172,6 +210,35 @@ void MessagePumpForUI::ScheduleDelayedWork(const TimeTicks& delayed_work_time) { |
| MESSAGE_LOOP_PROBLEM_MAX); |
| } |
| +void MessagePumpForUI::WillDestroyCurrentMessageLoop() { |
| + // Disable the message pump. If |pump_state_ == kPumpHaveWork| then |
| + // ScheduleWork() might be still running on a different thread. Wait until |
| + // |kMsgHaveWork| is received or |pump_state_| is reset back to |kPumpIdle|. |
| + while (InterlockedCompareExchange(&pump_state_, kPumpDisabled, |
| + kPumpIdle) == kPumpHaveWork) { |
| + MSG msg; |
| + if (PeekMessage(&msg, NULL, 0, 0, PM_REMOVE)) { |
| + if (msg.message == kMsgHaveWork && msg.hwnd == message_hwnd_) { |
| + // Now that we received |kMsgHaveWork| the pump can be safely disabled. |
| + InterlockedExchange(&pump_state_, kPumpDisabled); |
| + break; |
| + } |
| + } |
| + |
| + // Wait until |kMsgHaveWork| is posted or an APC is received. |
| + WaitForWork(); |
| + } |
| + |
| + // At this point the pump is disabled and other threads exited ScheduleWork(). |
| + DestroyWindow(message_hwnd_); |
| + UnregisterClass(MAKEINTATOM(atom_), |
| + base::GetModuleFromAddress(&WndProcThunk)); |
| + |
| + // Let the destructor know that the resources have been freed. |
| + message_hwnd_ = NULL; |
| + atom_ = 0; |
| +} |
| + |
| void MessagePumpForUI::PumpOutPendingPaintMessages() { |
| // If we are being called outside of the context of Run, then don't try to do |
| // any work. |
| @@ -297,7 +364,7 @@ void MessagePumpForUI::WaitForWork() { |
| DWORD result; |
| result = MsgWaitForMultipleObjectsEx(0, NULL, delay, QS_ALLINPUT, |
| - MWMO_INPUTAVAILABLE); |
| + MWMO_ALERTABLE | MWMO_INPUTAVAILABLE); |
| if (WAIT_OBJECT_0 == result) { |
| // A WM_* message is available. |
| @@ -318,6 +385,11 @@ void MessagePumpForUI::WaitForWork() { |
| WaitMessage(); |
| } |
| return; |
| + } else if (WAIT_IO_COMPLETION == result) { |
| + // The wait was ended by one or more APCs. It could be cause |
| + // MessagePumpUI::ScheduleWork() is trying to wake up the message loop |
| + // thread. |
| + return; |
| } |
| DCHECK_NE(WAIT_FAILED, result) << GetLastError(); |
| @@ -329,7 +401,7 @@ void MessagePumpForUI::HandleWorkMessage() { |
| // sort. |
| if (!state_) { |
| // Since we handled a kMsgHaveWork message, we must still update this flag. |
| - InterlockedExchange(&have_work_, 0); |
| + InterlockedExchange(&pump_state_, kPumpIdle); |
| return; |
| } |
| @@ -417,7 +489,7 @@ bool MessagePumpForUI::ProcessPumpReplacementMessage() { |
| // goal is to make the kMsgHaveWork as non-intrusive as possible, even though |
| // a continuous stream of such messages are posted. This method carefully |
| // peeks a message while there is no chance for a kMsgHaveWork to be pending, |
| - // then resets the have_work_ flag (allowing a replacement kMsgHaveWork to |
| + // then resets the pump_state_ flag (allowing a replacement kMsgHaveWork to |
|
rvargas (doing something else)
2013/06/12 00:22:22
nit: not a flag to be reset anymore.
alexeypa (please no reviews)
2013/06/12 18:05:22
Done.
|
| // possibly be posted), and finally dispatches that peeked replacement. Note |
| // that the re-post of kMsgHaveWork may be asynchronous to this thread!! |
| @@ -439,7 +511,7 @@ bool MessagePumpForUI::ProcessPumpReplacementMessage() { |
| msg.hwnd != message_hwnd_); |
| // Since we discarded a kMsgHaveWork message, we must update the flag. |
| - int old_have_work = InterlockedExchange(&have_work_, 0); |
| + int old_have_work = InterlockedExchange(&pump_state_, kPumpIdle); |
| DCHECK(old_have_work); |
| // We don't need a special time slice if we didn't have_message to process. |
| @@ -468,7 +540,7 @@ MessagePumpForIO::MessagePumpForIO() { |
| } |
| void MessagePumpForIO::ScheduleWork() { |
| - if (InterlockedExchange(&have_work_, 1)) |
| + if (InterlockedExchange(&pump_state_, kPumpHaveWork)) |
| return; // Someone else continued the pumping. |
| // Make sure the MessagePump does some work for us. |
| @@ -479,7 +551,7 @@ void MessagePumpForIO::ScheduleWork() { |
| return; // Post worked perfectly. |
| // See comment in MessagePumpForUI::ScheduleWork() for this error recovery. |
| - InterlockedExchange(&have_work_, 0); // Clarify that we didn't succeed. |
| + InterlockedExchange(&pump_state_, kPumpIdle); |
| UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", COMPLETION_POST_ERROR, |
| MESSAGE_LOOP_PROBLEM_MAX); |
| } |
| @@ -624,7 +696,7 @@ bool MessagePumpForIO::ProcessInternalIOItem(const IOItem& item) { |
| this == reinterpret_cast<MessagePumpForIO*>(item.handler)) { |
| // This is our internal completion. |
| DCHECK(!item.bytes_transfered); |
| - InterlockedExchange(&have_work_, 0); |
| + InterlockedExchange(&pump_state_, kPumpIdle); |
| return true; |
| } |
| return false; |