Index: base/message_loop/message_pump_win.cc |
diff --git a/base/message_loop/message_pump_win.cc b/base/message_loop/message_pump_win.cc |
index 226747e5ab855bd64c8ea36b01f9cdb905e3c7dd..1764b4cb39a6d7c3e8d8dc17cf7ddf6e93b4d47e 100644 |
--- a/base/message_loop/message_pump_win.cc |
+++ b/base/message_loop/message_pump_win.cc |
@@ -12,6 +12,7 @@ |
#include "base/process/memory.h" |
#include "base/profiler/scoped_tracker.h" |
#include "base/strings/stringprintf.h" |
+#include "base/threading/thread.h" |
#include "base/trace_event/trace_event.h" |
#include "base/win/wrapped_window_proc.h" |
@@ -88,37 +89,31 @@ int MessagePumpWin::GetCurrentDelay() const { |
// MessagePumpForUI public: |
MessagePumpForUI::MessagePumpForUI() |
- : atom_(0) { |
+ : atom_(0), |
+ ui_worker_thread_signal_(false, false) { |
InitMessageWnd(); |
+ ui_worker_thread_.reset(new base::Thread("UI Pump Worker thread")); |
+ ui_worker_thread_->Start(); |
+ ui_worker_thread_->task_runner()->PostTask( |
+ FROM_HERE, |
+ base::Bind(&MessagePumpForUI::DoWorkerThreadRunLoop, |
+ base::Unretained(this))); |
} |
MessagePumpForUI::~MessagePumpForUI() { |
DestroyWindow(message_hwnd_); |
UnregisterClass(MAKEINTATOM(atom_), |
GetModuleFromAddress(&WndProcThunk)); |
+ |
+ ::QueueUserAPC( |
+ reinterpret_cast<PAPCFUNC>(&MessagePumpForUI::ShutdownWorkerThread), |
+ ui_worker_thread_->thread_handle().platform_handle(), NULL); |
+ ui_worker_thread_->Stop(); |
} |
void MessagePumpForUI::ScheduleWork() { |
- if (InterlockedExchange(&have_work_, 1)) |
- return; // Someone else continued the pumping. |
- |
- // Make sure the MessagePump does some work for us. |
- BOOL ret = PostMessage(message_hwnd_, kMsgHaveWork, |
- reinterpret_cast<WPARAM>(this), 0); |
- if (ret) |
- return; // There was room in the Window Message queue. |
- |
- // We have failed to insert a have-work message, so there is a chance that we |
- // will starve tasks/timers while sitting in a nested message loop. Nested |
- // loops only look at Windows Message queues, and don't look at *our* task |
- // queues, etc., so we might not get a time slice in such. :-( |
- // We could abort here, but the fear is that this failure mode is plausibly |
- // common (queue is full, of about 2000 messages), so we'll do a near-graceful |
- // recovery. Nested loops are pretty transient (we think), so this will |
- // probably be recoverable. |
- InterlockedExchange(&have_work_, 0); // Clarify that we didn't really insert. |
- UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", MESSAGE_POST_ERROR, |
- MESSAGE_LOOP_PROBLEM_MAX); |
+ ui_worker_thread_signal_.Signal(); |
+ return; |
} |
void MessagePumpForUI::ScheduleDelayedWork(const TimeTicks& delayed_work_time) { |
@@ -435,47 +430,57 @@ bool MessagePumpForUI::ProcessMessageHelper(const MSG& msg) { |
} |
bool MessagePumpForUI::ProcessPumpReplacementMessage() { |
- // When we encounter a kMsgHaveWork message, this method is called to peek |
- // and process a replacement message, such as a WM_PAINT or WM_TIMER. The |
- // goal is to make the kMsgHaveWork as non-intrusive as possible, even though |
- // a continuous stream of such messages are posted. This method carefully |
- // peeks a message while there is no chance for a kMsgHaveWork to be pending, |
- // then resets the have_work_ flag (allowing a replacement kMsgHaveWork to |
- // possibly be posted), and finally dispatches that peeked replacement. Note |
- // that the re-post of kMsgHaveWork may be asynchronous to this thread!! |
- |
- bool have_message = false; |
- MSG msg; |
- // We should not process all window messages if we are in the context of an |
- // OS modal loop, i.e. in the context of a windows API call like MessageBox. |
- // This is to ensure that these messages are peeked out by the OS modal loop. |
- if (MessageLoop::current()->os_modal_loop()) { |
- // We only peek out WM_PAINT and WM_TIMER here for reasons mentioned above. |
- have_message = PeekMessage(&msg, NULL, WM_PAINT, WM_PAINT, PM_REMOVE) || |
- PeekMessage(&msg, NULL, WM_TIMER, WM_TIMER, PM_REMOVE); |
- } else { |
- have_message = PeekMessage(&msg, NULL, 0, 0, PM_REMOVE) != FALSE; |
- } |
- |
- DCHECK(!have_message || kMsgHaveWork != msg.message || |
- msg.hwnd != message_hwnd_); |
- |
// Since we discarded a kMsgHaveWork message, we must update the flag. |
int old_have_work = InterlockedExchange(&have_work_, 0); |
DCHECK(old_have_work); |
+ return true; |
+} |
- // We don't need a special time slice if we didn't have_message to process. |
- if (!have_message) |
- return false; |
+void MessagePumpForUI::DoWorkerThreadRunLoop() { |
+ base::win::ScopedHandle timer(::CreateWaitableTimer(NULL, FALSE, NULL)); |
+ LARGE_INTEGER due_time = {0}; |
+ // 3 milliseconds. |
+ due_time.QuadPart = -30000; |
+ BOOL ret = ::SetWaitableTimer(timer.Get(), &due_time, 3, NULL, NULL, FALSE); |
+ CHECK(ret); |
+ |
+ while (TRUE) { |
+ HANDLE objects[] = {ui_worker_thread_signal_.handle(), timer.Get()}; |
cpu_(ooo_6.6-7.5)
2015/06/02 00:17:27
I don't think you need the ui_worker_thread_signal
ananta
2015/06/03 01:27:30
We are using the ui_worker_thread_signal_ to avoid
|
+ DWORD ret = WaitForMultipleObjectsEx(arraysize(objects), objects, TRUE, |
+ INFINITE, TRUE); |
+ // The only APC this thread could receive is the Shutdown APC. |
+ if (ret == WAIT_IO_COMPLETION) |
+ return; |
- // Guarantee we'll get another time slice in the case where we go into native |
- // windows code. This ScheduleWork() may hurt performance a tiny bit when |
- // tasks appear very infrequently, but when the event queue is busy, the |
- // kMsgHaveWork events get (percentage wise) rarer and rarer. |
- ScheduleWork(); |
- return ProcessMessageHelper(msg); |
+ if (InterlockedExchange(&have_work_, 1)) |
+ continue; // Someone else continued the pumping. |
+ |
+ // Make sure the MessagePump does some work for us. |
+ BOOL posted = PostMessage(message_hwnd_, kMsgHaveWork, |
+ reinterpret_cast<WPARAM>(this), |
+ 0); |
+ if (!posted) { |
+ // We have failed to insert a have-work message, so there is a chance |
+ // that we will starve tasks/timers while sitting in a nested message |
+ // loop. Nested loops only look at Windows Message queues, and don't |
+ // look at *our* task queues, etc., so we might not get a time slice in |
+ // such. :-( |
+ // We could abort here, but the fear is that this failure mode is |
+ // plausibly common (queue is full, of about 2000 messages), so we'll |
+ // do a near-graceful recovery. Nested loops are pretty transient |
+ // (we think), so this will probably be recoverable. |
+ UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", |
+ MESSAGE_POST_ERROR, |
+ MESSAGE_LOOP_PROBLEM_MAX); |
+ } |
cpu_(ooo_6.6-7.5)
2015/06/02 00:17:27
the original code seems to reset have_work_ here,
ananta
2015/06/03 01:27:29
Removed the check for have_work in this function.
|
+ } |
} |
+// static |
+void CALLBACK MessagePumpForUI::ShutdownWorkerThread(ULONG_PTR param) { |
+ return; |
cpu_(ooo_6.6-7.5)
2015/06/02 00:17:27
have a comment in 481 explaining why the empty fun
ananta
2015/06/03 01:27:29
Done.
|
+} |
+ |
//----------------------------------------------------------------------------- |
// MessagePumpForIO public: |