Index: base/message_loop/message_pump_win.cc |
diff --git a/base/message_loop/message_pump_win.cc b/base/message_loop/message_pump_win.cc |
index cdbf0c260a9a49b773699e38b58193c00cba72e8..a84e04e93a5ef90d1b03fce2afd0545f897333b7 100644 |
--- a/base/message_loop/message_pump_win.cc |
+++ b/base/message_loop/message_pump_win.cc |
@@ -12,6 +12,7 @@ |
#include "base/process/memory.h" |
#include "base/profiler/scoped_tracker.h" |
#include "base/strings/stringprintf.h" |
+#include "base/threading/thread.h" |
#include "base/trace_event/trace_event.h" |
#include "base/win/wrapped_window_proc.h" |
@@ -34,6 +35,10 @@ static const wchar_t kWndClassFormat[] = L"Chrome_MessagePumpWindow_%p"; |
// task (a series of such messages creates a continuous task pump). |
static const int kMsgHaveWork = WM_USER + 1; |
+// The default delay for the waitable timer used to wake up the UI worker |
+// thread. |
+static const int64 kDefaultUIWorkerThreadWakeupTimerMs = 3; |
+ |
//----------------------------------------------------------------------------- |
// MessagePumpWin public: |
@@ -90,35 +95,39 @@ int MessagePumpWin::GetCurrentDelay() const { |
MessagePumpForUI::MessagePumpForUI() |
: atom_(0) { |
InitMessageWnd(); |
+ |
+ ui_worker_thread_timer_.Set(::CreateWaitableTimer(NULL, FALSE, NULL)); |
+ ui_worker_thread_.reset(new base::Thread("UI Pump Worker thread")); |
+ ui_worker_thread_->Start(); |
+ ui_worker_thread_->WaitUntilThreadStarted(); |
+ ui_worker_thread_->task_runner()->PostTask( |
+ FROM_HERE, |
+ base::Bind(&MessagePumpForUI::DoWorkerThreadRunLoop, |
+ base::Unretained(this))); |
} |
MessagePumpForUI::~MessagePumpForUI() { |
DestroyWindow(message_hwnd_); |
UnregisterClass(MAKEINTATOM(atom_), |
GetModuleFromAddress(&WndProcThunk)); |
+ |
+ ::QueueUserAPC( |
+ reinterpret_cast<PAPCFUNC>(&MessagePumpForUI::ShutdownWorkerThread), |
+ ui_worker_thread_->thread_handle().platform_handle(), NULL); |
+ ui_worker_thread_->Stop(); |
} |
void MessagePumpForUI::ScheduleWork() { |
- if (InterlockedExchange(&have_work_, 1)) |
- return; // Someone else continued the pumping. |
- |
- // Make sure the MessagePump does some work for us. |
- BOOL ret = PostMessage(message_hwnd_, kMsgHaveWork, |
- reinterpret_cast<WPARAM>(this), 0); |
- if (ret) |
- return; // There was room in the Window Message queue. |
- |
- // We have failed to insert a have-work message, so there is a chance that we |
- // will starve tasks/timers while sitting in a nested message loop. Nested |
- // loops only look at Windows Message queues, and don't look at *our* task |
- // queues, etc., so we might not get a time slice in such. :-( |
- // We could abort here, but the fear is that this failure mode is plausibly |
- // common (queue is full, of about 2000 messages), so we'll do a near-graceful |
- // recovery. Nested loops are pretty transient (we think), so this will |
- // probably be recoverable. |
- InterlockedExchange(&have_work_, 0); // Clarify that we didn't really insert. |
- UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", MESSAGE_POST_ERROR, |
- MESSAGE_LOOP_PROBLEM_MAX); |
+ // If we have a regular posted task at the head of queue then we need to |
+ // process it quickly. |
+ if (state_ && state_->delegate->GetNewlyAddedTaskDelay().is_null()) { |
+ // Make sure the MessagePump does some work for us. |
+ PostWorkMessage(); |
+ return; |
+ } |
+ // Set a one shot timer to fire after 3 milliseconds. The actual resolution |
+ // of the timer is dependent on timeBeginPeriod being called. |
+ SetWakeupTimer(kDefaultUIWorkerThreadWakeupTimerMs); |
} |
void MessagePumpForUI::ScheduleDelayedWork(const TimeTicks& delayed_work_time) { |
@@ -409,45 +418,65 @@ bool MessagePumpForUI::ProcessMessageHelper(const MSG& msg) { |
} |
bool MessagePumpForUI::ProcessPumpReplacementMessage() { |
- // When we encounter a kMsgHaveWork message, this method is called to peek |
- // and process a replacement message, such as a WM_PAINT or WM_TIMER. The |
- // goal is to make the kMsgHaveWork as non-intrusive as possible, even though |
- // a continuous stream of such messages are posted. This method carefully |
- // peeks a message while there is no chance for a kMsgHaveWork to be pending, |
- // then resets the have_work_ flag (allowing a replacement kMsgHaveWork to |
- // possibly be posted), and finally dispatches that peeked replacement. Note |
- // that the re-post of kMsgHaveWork may be asynchronous to this thread!! |
- |
- bool have_message = false; |
- MSG msg; |
- // We should not process all window messages if we are in the context of an |
- // OS modal loop, i.e. in the context of a windows API call like MessageBox. |
- // This is to ensure that these messages are peeked out by the OS modal loop. |
- if (MessageLoop::current()->os_modal_loop()) { |
- // We only peek out WM_PAINT and WM_TIMER here for reasons mentioned above. |
- have_message = PeekMessage(&msg, NULL, WM_PAINT, WM_PAINT, PM_REMOVE) || |
- PeekMessage(&msg, NULL, WM_TIMER, WM_TIMER, PM_REMOVE); |
- } else { |
- have_message = PeekMessage(&msg, NULL, 0, 0, PM_REMOVE) != FALSE; |
- } |
+ // Since we discarded a kMsgHaveWork message, we must update the flag. |
+ InterlockedExchange(&have_work_, 0); |
+ return true; |
+} |
- DCHECK(!have_message || kMsgHaveWork != msg.message || |
- msg.hwnd != message_hwnd_); |
+void MessagePumpForUI::DoWorkerThreadRunLoop() { |
+ DCHECK(ui_worker_thread_timer_.Get()); |
+ while (TRUE) { |
+ DWORD ret = WaitForSingleObjectEx( |
+ ui_worker_thread_timer_.Get(), INFINITE, TRUE); |
+ // The only APC this thread could receive is the Shutdown APC. |
+ if (ret == WAIT_IO_COMPLETION) |
+ return; |
- // Since we discarded a kMsgHaveWork message, we must update the flag. |
- int old_have_work = InterlockedExchange(&have_work_, 0); |
- DCHECK(old_have_work); |
+ // Make sure the MessagePump does some work for us. |
+ PostWorkMessage(); |
- // We don't need a special time slice if we didn't have_message to process. |
- if (!have_message) |
- return false; |
+ // Set a one shot timer to process pending delayed tasks if any in the |
+ // queue. The actual resolution of the timer is dependent on the |
+ // timeBeginPeriod API being called. |
+ SetWakeupTimer(kDefaultUIWorkerThreadWakeupTimerMs); |
+ } |
+} |
+ |
+// static |
+void CALLBACK MessagePumpForUI::ShutdownWorkerThread(ULONG_PTR param) { |
+ // This function is empty because we only use the fact that an APC was posted |
+ // to the worker thread to shut it down. |
+ return; |
+} |
+ |
+void MessagePumpForUI::PostWorkMessage() { |
+ BOOL posted = PostMessage(message_hwnd_, kMsgHaveWork, |
+ reinterpret_cast<WPARAM>(this), |
+ 0); |
+ if (!posted) { |
+ // We have failed to insert a have-work message, so there is a chance |
+ // that we will starve tasks/timers while sitting in a nested message |
+ // loop. Nested loops only look at Windows Message queues, and don't |
+ // look at *our* task queues, etc., so we might not get a time slice in |
+ // such. :-( |
+ // We could abort here, but the fear is that this failure mode is |
+ // plausibly common (queue is full, of about 2000 messages), so we'll |
+ // do a near-graceful recovery. Nested loops are pretty transient |
+ // (we think), so this will probably be recoverable. |
+ UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", |
+ MESSAGE_POST_ERROR, |
+ MESSAGE_LOOP_PROBLEM_MAX); |
+ } |
+} |
- // Guarantee we'll get another time slice in the case where we go into native |
- // windows code. This ScheduleWork() may hurt performance a tiny bit when |
- // tasks appear very infrequently, but when the event queue is busy, the |
- // kMsgHaveWork events get (percentage wise) rarer and rarer. |
- ScheduleWork(); |
- return ProcessMessageHelper(msg); |
+void MessagePumpForUI::SetWakeupTimer(int64 delay_ms) { |
+ // Set the timer for the delay passed in. The actual resolution of the |
+ // timer is dependent on whether timeBeginPeriod was called. |
+ LARGE_INTEGER due_time = {0}; |
+ due_time.QuadPart = -delay_ms * 10000; |
+ BOOL timer_set = ::SetWaitableTimer(ui_worker_thread_timer_.Get(), |
+ &due_time, 0, NULL, NULL, FALSE); |
+ CHECK(timer_set); |
} |
//----------------------------------------------------------------------------- |