Chromium Code Reviews| Index: base/message_loop/message_pump_win.cc |
| diff --git a/base/message_loop/message_pump_win.cc b/base/message_loop/message_pump_win.cc |
| index 226747e5ab855bd64c8ea36b01f9cdb905e3c7dd..c35cbc28cda08ed3348fdc5485ff3ad0a55b4d53 100644 |
| --- a/base/message_loop/message_pump_win.cc |
| +++ b/base/message_loop/message_pump_win.cc |
| @@ -12,6 +12,7 @@ |
| #include "base/process/memory.h" |
| #include "base/profiler/scoped_tracker.h" |
| #include "base/strings/stringprintf.h" |
| +#include "base/threading/thread.h" |
| #include "base/trace_event/trace_event.h" |
| #include "base/win/wrapped_window_proc.h" |
| @@ -88,37 +89,31 @@ int MessagePumpWin::GetCurrentDelay() const { |
| // MessagePumpForUI public: |
| MessagePumpForUI::MessagePumpForUI() |
| - : atom_(0) { |
| + : atom_(0), |
| + exit_ui_worker_thread_(false) { |
| InitMessageWnd(); |
| + ui_worker_thread_.reset(new base::Thread("UI Pump Worker thread")); |
|
cpu_(ooo_6.6-7.5)
2015/05/28 22:45:51
can we do this only when we are nested?
ananta
2015/05/29 00:51:28
We only do this now when we have tasks. Difficult
|
| + ui_worker_thread_->Start(); |
| + ui_worker_thread_->task_runner()->PostTask( |
| + FROM_HERE, |
| + base::Bind(&MessagePumpForUI::DoWorkerThreadRunLoop, |
| + base::Unretained(this))); |
| } |
| MessagePumpForUI::~MessagePumpForUI() { |
| DestroyWindow(message_hwnd_); |
| UnregisterClass(MAKEINTATOM(atom_), |
| GetModuleFromAddress(&WndProcThunk)); |
| + |
| + ::QueueUserAPC( |
| + reinterpret_cast<PAPCFUNC>(&MessagePumpForUI::ShutdownWorkerThread), |
| + ui_worker_thread_->thread_handle().platform_handle(), |
| + reinterpret_cast<ULONG_PTR>(this)); |
| + ui_worker_thread_->Stop(); |
| } |
| void MessagePumpForUI::ScheduleWork() { |
| - if (InterlockedExchange(&have_work_, 1)) |
| - return; // Someone else continued the pumping. |
| - |
| - // Make sure the MessagePump does some work for us. |
| - BOOL ret = PostMessage(message_hwnd_, kMsgHaveWork, |
| - reinterpret_cast<WPARAM>(this), 0); |
| - if (ret) |
| - return; // There was room in the Window Message queue. |
| - |
| - // We have failed to insert a have-work message, so there is a chance that we |
| - // will starve tasks/timers while sitting in a nested message loop. Nested |
| - // loops only look at Windows Message queues, and don't look at *our* task |
| - // queues, etc., so we might not get a time slice in such. :-( |
| - // We could abort here, but the fear is that this failure mode is plausibly |
| - // common (queue is full, of about 2000 messages), so we'll do a near-graceful |
| - // recovery. Nested loops are pretty transient (we think), so this will |
| - // probably be recoverable. |
| - InterlockedExchange(&have_work_, 0); // Clarify that we didn't really insert. |
| - UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", MESSAGE_POST_ERROR, |
| - MESSAGE_LOOP_PROBLEM_MAX); |
| + return; |
| } |
| void MessagePumpForUI::ScheduleDelayedWork(const TimeTicks& delayed_work_time) { |
| @@ -280,8 +275,7 @@ void MessagePumpForUI::HandleWorkMessage() { |
| // Now give the delegate a chance to do some work. He'll let us know if he |
| // needs to do more work. |
| - if (state_->delegate->DoWork()) |
| - ScheduleWork(); |
| + state_->delegate->DoWork(); |
| state_->delegate->DoDelayedWork(&delayed_work_time_); |
| RescheduleTimer(); |
| } |
| @@ -435,47 +429,55 @@ bool MessagePumpForUI::ProcessMessageHelper(const MSG& msg) { |
| } |
| bool MessagePumpForUI::ProcessPumpReplacementMessage() { |
| - // When we encounter a kMsgHaveWork message, this method is called to peek |
| - // and process a replacement message, such as a WM_PAINT or WM_TIMER. The |
| - // goal is to make the kMsgHaveWork as non-intrusive as possible, even though |
| - // a continuous stream of such messages are posted. This method carefully |
| - // peeks a message while there is no chance for a kMsgHaveWork to be pending, |
| - // then resets the have_work_ flag (allowing a replacement kMsgHaveWork to |
| - // possibly be posted), and finally dispatches that peeked replacement. Note |
| - // that the re-post of kMsgHaveWork may be asynchronous to this thread!! |
| - |
| - bool have_message = false; |
| - MSG msg; |
| - // We should not process all window messages if we are in the context of an |
| - // OS modal loop, i.e. in the context of a windows API call like MessageBox. |
| - // This is to ensure that these messages are peeked out by the OS modal loop. |
| - if (MessageLoop::current()->os_modal_loop()) { |
| - // We only peek out WM_PAINT and WM_TIMER here for reasons mentioned above. |
| - have_message = PeekMessage(&msg, NULL, WM_PAINT, WM_PAINT, PM_REMOVE) || |
| - PeekMessage(&msg, NULL, WM_TIMER, WM_TIMER, PM_REMOVE); |
| - } else { |
| - have_message = PeekMessage(&msg, NULL, 0, 0, PM_REMOVE) != FALSE; |
| - } |
| - |
| - DCHECK(!have_message || kMsgHaveWork != msg.message || |
| - msg.hwnd != message_hwnd_); |
| - |
| // Since we discarded a kMsgHaveWork message, we must update the flag. |
| int old_have_work = InterlockedExchange(&have_work_, 0); |
| DCHECK(old_have_work); |
| + return true; |
| +} |
| - // We don't need a special time slice if we didn't have_message to process. |
| - if (!have_message) |
| - return false; |
| - |
| - // Guarantee we'll get another time slice in the case where we go into native |
| - // windows code. This ScheduleWork() may hurt performance a tiny bit when |
| - // tasks appear very infrequently, but when the event queue is busy, the |
| - // kMsgHaveWork events get (percentage wise) rarer and rarer. |
| - ScheduleWork(); |
| - return ProcessMessageHelper(msg); |
| +void MessagePumpForUI::DoWorkerThreadRunLoop() { |
| + base::win::ScopedHandle timer(::CreateWaitableTimer(NULL, FALSE, NULL)); |
| + LARGE_INTEGER due_time = {0}; |
| + // 3 milliseconds. |
| + due_time.QuadPart = -30000; |
| + BOOL ret = ::SetWaitableTimer(timer.Get(), &due_time, 3, NULL, NULL, FALSE); |
| + CHECK(ret); |
| + |
| + while (!exit_ui_worker_thread_) { |
| + DWORD ret = WaitForSingleObjectEx(timer.Get(), INFINITE, TRUE); |
| + if (ret == WAIT_OBJECT_0) { |
|
cpu_(ooo_6.6-7.5)
2015/05/28 22:45:12
instead of using exit_ui_worker_thread_ rather use
ananta
2015/05/29 00:51:28
Done.
|
| + if (InterlockedExchange(&have_work_, 1)) |
| + continue; // Someone else continued the pumping. |
| + |
| + // Make sure the MessagePump does some work for us. |
| + BOOL ret = PostMessage(message_hwnd_, kMsgHaveWork, |
| + reinterpret_cast<WPARAM>(this), |
| + 0); |
| + if (!ret) { |
| + // We have failed to insert a have-work message, so there is a chance |
| + // that we will starve tasks/timers while sitting in a nested message |
| + // loop. Nested loops only look at Windows Message queues, and don't |
| + // look at *our* task queues, etc., so we might not get a time slice in |
| + // such. :-( |
| + // We could abort here, but the fear is that this failure mode is |
| + // plausibly common (queue is full, of about 2000 messages), so we'll |
| + // do a near-graceful recovery. Nested loops are pretty transient |
| + // (we think), so this will probably be recoverable. |
| + UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", |
| + MESSAGE_POST_ERROR, |
| + MESSAGE_LOOP_PROBLEM_MAX); |
| + } |
| + } |
| + } |
| } |
| +// static |
| +void CALLBACK MessagePumpForUI::ShutdownWorkerThread(ULONG_PTR param) { |
| + MessagePumpForUI* instance = reinterpret_cast<MessagePumpForUI*>(param); |
| + CHECK(instance); |
|
cpu_(ooo_6.6-7.5)
2015/05/28 22:45:12
you can remove the 477 check
ananta
2015/05/29 00:51:28
Done.
|
| + instance->exit_ui_worker_thread_ = true; |
| +} |
| + |
| //----------------------------------------------------------------------------- |
| // MessagePumpForIO public: |