Index: base/message_loop/message_pump_win.cc |
diff --git a/base/message_loop/message_pump_win.cc b/base/message_loop/message_pump_win.cc |
index fd4a2e8966cba20ddac61d71ffa5ff5c629f9eaf..9d81eccc787b43ca0c6a9104eebfdfa87edbf045 100644 |
--- a/base/message_loop/message_pump_win.cc |
+++ b/base/message_loop/message_pump_win.cc |
@@ -94,7 +94,7 @@ MessagePumpForUI::~MessagePumpForUI() { |
} |
void MessagePumpForUI::ScheduleWork() { |
- if (InterlockedExchange(&have_work_, 1)) |
+ if (READY != InterlockedExchange(&work_state_, HAVE_WORK)) |
Lei Zhang
2016/04/07 21:32:26
nit: Just write it normally with "READY" on the ri
stanisc
2016/04/08 01:07:12
Done.
|
return; // Someone else continued the pumping. |
// Make sure the MessagePump does some work for us. |
@@ -111,7 +111,9 @@ void MessagePumpForUI::ScheduleWork() { |
// common (queue is full, of about 2000 messages), so we'll do a near-graceful |
// recovery. Nested loops are pretty transient (we think), so this will |
// probably be recoverable. |
- InterlockedExchange(&have_work_, 0); // Clarify that we didn't really insert. |
+ |
+ // Clarify that we didn't really insert. |
+ InterlockedExchange(&work_state_, READY); |
UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", MESSAGE_POST_ERROR, |
MESSAGE_LOOP_PROBLEM_MAX); |
} |
@@ -254,7 +256,7 @@ void MessagePumpForUI::HandleWorkMessage() { |
// sort. |
if (!state_) { |
// Since we handled a kMsgHaveWork message, we must still update this flag. |
- InterlockedExchange(&have_work_, 0); |
+ InterlockedExchange(&work_state_, READY); |
return; |
} |
@@ -397,8 +399,8 @@ bool MessagePumpForUI::ProcessPumpReplacementMessage() { |
msg.hwnd != message_hwnd_); |
// Since we discarded a kMsgHaveWork message, we must update the flag. |
- int old_have_work = InterlockedExchange(&have_work_, 0); |
- DCHECK(old_have_work); |
+ int old_work_state_ = InterlockedExchange(&work_state_, READY); |
+ DCHECK_EQ(HAVE_WORK, old_work_state_); |
// We don't need a special time slice if we didn't have_message to process. |
if (!have_message) |
@@ -413,6 +415,151 @@ bool MessagePumpForUI::ProcessPumpReplacementMessage() { |
} |
//----------------------------------------------------------------------------- |
+// MessagePumpForGpu public: |
+ |
+MessagePumpForGpu::MessagePumpForGpu() |
+ : thread_id_(GetCurrentThreadId()) { |
+ // Init the message queue. |
+ MSG msg; |
+ PeekMessage(&msg, NULL, 0, 0, PM_NOREMOVE); |
Lei Zhang
2016/04/07 21:32:26
nullptr in new code please.
stanisc
2016/04/08 01:07:12
Done.
|
+} |
+ |
+MessagePumpForGpu::~MessagePumpForGpu() { |
+} |
+ |
+// static |
+void MessagePumpForGpu::InitFactory() { |
+ MessageLoop::InitMessagePumpForUIFactory( |
Lei Zhang
2016/04/07 21:32:26
DCHECK() the return result?
stanisc
2016/04/08 01:07:12
Done.
|
+ &MessagePumpForGpu::CreateMessagePumpForGpu); |
+} |
+ |
+// static |
+scoped_ptr<MessagePump> MessagePumpForGpu::CreateMessagePumpForGpu() { |
+ return scoped_ptr<MessagePump>(new MessagePumpForGpu); |
Lei Zhang
2016/04/07 21:32:26
Use base::WrapUnique
stanisc
2016/04/08 01:07:12
Done.
|
+} |
+ |
+void MessagePumpForGpu::ScheduleWork() { |
+ if (READY != InterlockedExchange(&work_state_, HAVE_WORK)) |
+ return; // Someone else continued the pumping. |
+ |
+ // Make sure the MessagePump does some work for us. |
+ BOOL ret = PostThreadMessage(thread_id_, kMsgHaveWork, 0, 0); |
+ if (ret) |
+ return; // There was room in the Window Message queue. |
+ |
+ // We have failed to insert a have-work message, so there is a chance that we |
Lei Zhang
2016/04/07 21:32:26
Do you really want to repeat this paragraph from M
stanisc
2016/04/08 01:07:12
Replaced the comment with a short one that referen
|
+ // will starve tasks/timers while sitting in a nested message loop. Nested |
+ // loops only look at Windows Message queues, and don't look at *our* task |
+ // queues, etc., so we might not get a time slice in such. :-( |
+ // We could abort here, but the fear is that this failure mode is plausibly |
+ // common (queue is full, of about 2000 messages), so we'll do a near-graceful |
+ // recovery. Nested loops are pretty transient (we think), so this will |
+ // probably be recoverable. |
+ |
+ // Clarify that we didn't really insert. |
+ InterlockedExchange(&work_state_, READY); |
+ UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", MESSAGE_POST_ERROR, |
Lei Zhang
2016/04/07 21:32:26
I'm not familiar with this histogram, but do you c
stanisc
2016/04/08 01:07:12
Good question. I think it should be OK to reuse th
|
+ MESSAGE_LOOP_PROBLEM_MAX); |
+} |
+ |
+void MessagePumpForGpu::ScheduleDelayedWork( |
+ const TimeTicks& delayed_work_time) { |
+ // We know that we can't be blocked right now since this method can only be |
+ // called on the same thread as Run, so we only need to update our record of |
+ // how long to sleep when we do sleep. |
+ delayed_work_time_ = delayed_work_time; |
+} |
+ |
+//----------------------------------------------------------------------------- |
+// MessagePumpForGpu private: |
+ |
+void MessagePumpForGpu::DoRunLoop() { |
+ while (!state_->should_quit) { |
+ // Indicate that the loop is handling the work. |
+ // If there is a race condition between switching to WORKING state here and |
+ // the producer thread setting the HAVE_WORK state after exiting the wait, |
+ // the event might remain in the signalled state. That might be less than |
+ // optimal but wouldn't result in failing to handle the work. |
+ InterlockedExchange(&work_state_, WORKING); |
+ |
+ bool more_work_is_plausible = state_->delegate->DoWork(); |
+ if (state_->should_quit) |
+ break; |
+ |
+ more_work_is_plausible |= |
+ state_->delegate->DoDelayedWork(&delayed_work_time_); |
+ if (state_->should_quit) |
+ break; |
+ |
+ if (more_work_is_plausible) |
+ continue; |
+ |
+ more_work_is_plausible = state_->delegate->DoIdleWork(); |
+ if (state_->should_quit) |
+ break; |
+ |
+ if (more_work_is_plausible) |
+ continue; |
+ |
+ // Switch that working state to READY to indicate that the loop is |
+ // waiting for accepting new work if it is still in WORKING state and hasn't |
+ // been signalled. Otherwise if it is in HAVE_WORK state skip the wait |
+ // and proceed to handing the work. |
+ if (HAVE_WORK == InterlockedCompareExchange(&work_state_, READY, WORKING)) |
+ continue; // Skip wait, more work was requested. |
+ |
+ WaitForWork(); // Wait (sleep) until we have work to do again. |
+ } |
+} |
+ |
+void MessagePumpForGpu::WaitForWork() { |
+ // Wait until a message is available, up to the time needed by the timer |
+ // manager to fire the next set of timers. |
+ int delay; |
+ |
+ // The while loop handles the situation where on Windows 7 and later versions |
+ // MsgWaitForMultipleObjectsEx might time out slightly earlier (less than one |
+ // ms) than the specified |delay|. In that situation it is more optimal to |
+ // just wait again rather than waste a DoRunLoop cycle. |
+ while((delay = GetCurrentDelay()) != 0) { |
Lei Zhang
2016/04/07 21:32:26
nit: space after "while" - remember to run: git cl
stanisc
2016/04/08 01:07:12
Done.
|
+ if (delay < 0) // Negative value means no timers waiting. |
+ delay = INFINITE; |
+ |
+ DWORD result = MsgWaitForMultipleObjectsEx(0, NULL, delay, QS_ALLINPUT, 0); |
+ if (WAIT_OBJECT_0 == result) { |
+ // A WM_* message is available. |
+ if (ProcessMessages()) |
+ return; |
+ } |
+ |
+ DCHECK_NE(WAIT_FAILED, result) << GetLastError(); |
+ } |
+} |
+ |
+bool MessagePumpForGpu::ProcessMessages() { |
+ MSG msg; |
+ bool have_work = false; |
+ while (PeekMessage(&msg, NULL, 0, 0, PM_REMOVE) != FALSE) { |
+ if (WM_QUIT == msg.message) { |
+ // Repost the QUIT message so that it will be retrieved by the primary |
+ // GetMessage() loop. |
+ state_->should_quit = true; |
+ PostQuitMessage(static_cast<int>(msg.wParam)); |
+ return true; |
+ } |
+ |
+ if (msg.hwnd == NULL && msg.message == kMsgHaveWork) { |
+ have_work = true; |
+ } else { |
+ TranslateMessage(&msg); |
+ DispatchMessage(&msg); |
+ } |
+ } |
+ |
+ return have_work; |
+} |
+ |
+//----------------------------------------------------------------------------- |
// MessagePumpForIO public: |
MessagePumpForIO::MessagePumpForIO() { |
@@ -424,7 +571,7 @@ MessagePumpForIO::~MessagePumpForIO() { |
} |
void MessagePumpForIO::ScheduleWork() { |
- if (InterlockedExchange(&have_work_, 1)) |
+ if (READY != InterlockedExchange(&work_state_, HAVE_WORK)) |
return; // Someone else continued the pumping. |
// Make sure the MessagePump does some work for us. |
@@ -435,7 +582,7 @@ void MessagePumpForIO::ScheduleWork() { |
return; // Post worked perfectly. |
// See comment in MessagePumpForUI::ScheduleWork() for this error recovery. |
- InterlockedExchange(&have_work_, 0); // Clarify that we didn't succeed. |
+ InterlockedExchange(&work_state_, READY); // Clarify that we didn't succeed. |
UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", COMPLETION_POST_ERROR, |
MESSAGE_LOOP_PROBLEM_MAX); |
} |
@@ -580,7 +727,7 @@ bool MessagePumpForIO::ProcessInternalIOItem(const IOItem& item) { |
reinterpret_cast<void*>(this) == reinterpret_cast<void*>(item.handler)) { |
// This is our internal completion. |
DCHECK(!item.bytes_transfered); |
- InterlockedExchange(&have_work_, 0); |
+ InterlockedExchange(&work_state_, READY); |
return true; |
} |
return false; |