Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(632)

Side by Side Diff: base/message_loop/message_pump_win.cc

Issue 1156503005: Don't peek messages in the MessagePumpForUI class when we receive our kMsgHaveWork message. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Provide a way to process posted tasks quicker Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « base/message_loop/message_pump_win.h ('k') | content/gpu/gpu_main.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/message_loop/message_pump_win.h" 5 #include "base/message_loop/message_pump_win.h"
6 6
7 #include <limits> 7 #include <limits>
8 #include <math.h> 8 #include <math.h>
9 9
10 #include "base/message_loop/message_loop.h" 10 #include "base/message_loop/message_loop.h"
11 #include "base/metrics/histogram.h" 11 #include "base/metrics/histogram.h"
12 #include "base/process/memory.h" 12 #include "base/process/memory.h"
13 #include "base/profiler/scoped_tracker.h" 13 #include "base/profiler/scoped_tracker.h"
14 #include "base/strings/stringprintf.h" 14 #include "base/strings/stringprintf.h"
15 #include "base/threading/thread.h"
15 #include "base/trace_event/trace_event.h" 16 #include "base/trace_event/trace_event.h"
16 #include "base/win/wrapped_window_proc.h" 17 #include "base/win/wrapped_window_proc.h"
17 18
18 namespace base { 19 namespace base {
19 20
20 namespace { 21 namespace {
21 22
22 enum MessageLoopProblems { 23 enum MessageLoopProblems {
23 MESSAGE_POST_ERROR, 24 MESSAGE_POST_ERROR,
24 COMPLETION_POST_ERROR, 25 COMPLETION_POST_ERROR,
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
83 (timeout > std::numeric_limits<int>::max() ? 84 (timeout > std::numeric_limits<int>::max() ?
84 std::numeric_limits<int>::max() : static_cast<int>(timeout)); 85 std::numeric_limits<int>::max() : static_cast<int>(timeout));
85 } 86 }
86 87
87 //----------------------------------------------------------------------------- 88 //-----------------------------------------------------------------------------
88 // MessagePumpForUI public: 89 // MessagePumpForUI public:
89 90
90 MessagePumpForUI::MessagePumpForUI() 91 MessagePumpForUI::MessagePumpForUI()
91 : atom_(0) { 92 : atom_(0) {
92 InitMessageWnd(); 93 InitMessageWnd();
94
95 ui_worker_thread_timer_.Set(::CreateWaitableTimer(NULL, FALSE, NULL));
96 ui_worker_thread_.reset(new base::Thread("UI Pump Worker thread"));
97 ui_worker_thread_->Start();
98 ui_worker_thread_->task_runner()->PostTask(
99 FROM_HERE,
100 base::Bind(&MessagePumpForUI::DoWorkerThreadRunLoop,
101 base::Unretained(this)));
93 } 102 }
94 103
95 MessagePumpForUI::~MessagePumpForUI() { 104 MessagePumpForUI::~MessagePumpForUI() {
96 DestroyWindow(message_hwnd_); 105 DestroyWindow(message_hwnd_);
97 UnregisterClass(MAKEINTATOM(atom_), 106 UnregisterClass(MAKEINTATOM(atom_),
98 GetModuleFromAddress(&WndProcThunk)); 107 GetModuleFromAddress(&WndProcThunk));
108
109 ::QueueUserAPC(
110 reinterpret_cast<PAPCFUNC>(&MessagePumpForUI::ShutdownWorkerThread),
111 ui_worker_thread_->thread_handle().platform_handle(), NULL);
112 ui_worker_thread_->Stop();
99 } 113 }
100 114
101 void MessagePumpForUI::ScheduleWork() { 115 void MessagePumpForUI::ScheduleWork() {
102 if (InterlockedExchange(&have_work_, 1)) 116 // If we have a regular posted task at the head of queue then we need to
103 return; // Someone else continued the pumping. 117 // process it quickly.
118 if (state_ && state_->delegate->GetNewlyAddedTaskDelay().is_null()) {
119 // Make sure the MessagePump does some work for us.
120 PostWorkMessage();
121 return;
122 }
104 123
105 // Make sure the MessagePump does some work for us. 124 LARGE_INTEGER due_time = {0};
106 BOOL ret = PostMessage(message_hwnd_, kMsgHaveWork, 125 // Set the timer to fire every 3 milliseconds. The actual resolution of the
107 reinterpret_cast<WPARAM>(this), 0); 126 // timer is dependent on timeBeginPeriod being called.
108 if (ret) 127 due_time.QuadPart = -30000;
scottmg 2015/06/08 21:43:31 How did we pick 3?
ananta 2015/06/08 21:46:54 That was a consensus number based on the fact that
109 return; // There was room in the Window Message queue. 128 BOOL ret = ::SetWaitableTimer(ui_worker_thread_timer_.Get(), &due_time, 3,
110 129 NULL, NULL, FALSE);
111 // We have failed to insert a have-work message, so there is a chance that we 130 CHECK(ret);
112 // will starve tasks/timers while sitting in a nested message loop. Nested
113 // loops only look at Windows Message queues, and don't look at *our* task
114 // queues, etc., so we might not get a time slice in such. :-(
115 // We could abort here, but the fear is that this failure mode is plausibly
116 // common (queue is full, of about 2000 messages), so we'll do a near-graceful
117 // recovery. Nested loops are pretty transient (we think), so this will
118 // probably be recoverable.
119 InterlockedExchange(&have_work_, 0); // Clarify that we didn't really insert.
120 UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", MESSAGE_POST_ERROR,
121 MESSAGE_LOOP_PROBLEM_MAX);
122 } 131 }
123 132
124 void MessagePumpForUI::ScheduleDelayedWork(const TimeTicks& delayed_work_time) { 133 void MessagePumpForUI::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
125 delayed_work_time_ = delayed_work_time; 134 delayed_work_time_ = delayed_work_time;
126 RescheduleTimer(); 135 RescheduleTimer();
127 } 136 }
128 137
129 //----------------------------------------------------------------------------- 138 //-----------------------------------------------------------------------------
130 // MessagePumpForUI private: 139 // MessagePumpForUI private:
131 140
(...skipping 296 matching lines...) Expand 10 before | Expand all | Expand 10 after
428 FROM_HERE_WITH_EXPLICIT_FUNCTION( 437 FROM_HERE_WITH_EXPLICIT_FUNCTION(
429 "440919 MessagePumpForUI::ProcessMessageHelper6")); 438 "440919 MessagePumpForUI::ProcessMessageHelper6"));
430 439
431 DispatchMessage(&msg); 440 DispatchMessage(&msg);
432 } 441 }
433 442
434 return true; 443 return true;
435 } 444 }
436 445
437 bool MessagePumpForUI::ProcessPumpReplacementMessage() { 446 bool MessagePumpForUI::ProcessPumpReplacementMessage() {
438 // When we encounter a kMsgHaveWork message, this method is called to peek 447 // Since we discarded a kMsgHaveWork message, we must update the flag.
439 // and process a replacement message, such as a WM_PAINT or WM_TIMER. The 448 InterlockedExchange(&have_work_, 0);
440 // goal is to make the kMsgHaveWork as non-intrusive as possible, even though 449 return true;
441 // a continuous stream of such messages are posted. This method carefully 450 }
442 // peeks a message while there is no chance for a kMsgHaveWork to be pending,
443 // then resets the have_work_ flag (allowing a replacement kMsgHaveWork to
444 // possibly be posted), and finally dispatches that peeked replacement. Note
445 // that the re-post of kMsgHaveWork may be asynchronous to this thread!!
446 451
447 bool have_message = false; 452 void MessagePumpForUI::DoWorkerThreadRunLoop() {
448 MSG msg; 453 DCHECK(ui_worker_thread_timer_.Get());
449 // We should not process all window messages if we are in the context of an 454 while (TRUE) {
450 // OS modal loop, i.e. in the context of a windows API call like MessageBox. 455 DWORD ret = WaitForSingleObjectEx(
451 // This is to ensure that these messages are peeked out by the OS modal loop. 456 ui_worker_thread_timer_.Get(), INFINITE, TRUE);
452 if (MessageLoop::current()->os_modal_loop()) { 457 // The only APC this thread could receive is the Shutdown APC.
453 // We only peek out WM_PAINT and WM_TIMER here for reasons mentioned above. 458 if (ret == WAIT_IO_COMPLETION)
454 have_message = PeekMessage(&msg, NULL, WM_PAINT, WM_PAINT, PM_REMOVE) || 459 return;
455 PeekMessage(&msg, NULL, WM_TIMER, WM_TIMER, PM_REMOVE); 460
456 } else { 461 // Make sure the MessagePump does some work for us.
457 have_message = PeekMessage(&msg, NULL, 0, 0, PM_REMOVE) != FALSE; 462 PostWorkMessage();
scottmg 2015/06/08 21:43:31 Is it possible for this only to runwhen SetNestabl
ananta 2015/06/08 21:46:54 It does not do that. Tricky to detect modal loops
458 } 463 }
464 }
459 465
460 DCHECK(!have_message || kMsgHaveWork != msg.message || 466 // static
461 msg.hwnd != message_hwnd_); 467 void CALLBACK MessagePumpForUI::ShutdownWorkerThread(ULONG_PTR param) {
468 // This function is empty because we only use the fact that an APC was posted
469 // to the worker thread to shut it down.
470 return;
471 }
462 472
463 // Since we discarded a kMsgHaveWork message, we must update the flag. 473 void MessagePumpForUI::PostWorkMessage() {
464 int old_have_work = InterlockedExchange(&have_work_, 0); 474 BOOL posted = PostMessage(message_hwnd_, kMsgHaveWork,
465 DCHECK(old_have_work); 475 reinterpret_cast<WPARAM>(this),
466 476 0);
467 // We don't need a special time slice if we didn't have_message to process. 477 if (!posted) {
468 if (!have_message) 478 // We have failed to insert a have-work message, so there is a chance
469 return false; 479 // that we will starve tasks/timers while sitting in a nested message
470 480 // loop. Nested loops only look at Windows Message queues, and don't
471 // Guarantee we'll get another time slice in the case where we go into native 481 // look at *our* task queues, etc., so we might not get a time slice in
472 // windows code. This ScheduleWork() may hurt performance a tiny bit when 482 // such. :-(
473 // tasks appear very infrequently, but when the event queue is busy, the 483 // We could abort here, but the fear is that this failure mode is
474 // kMsgHaveWork events get (percentage wise) rarer and rarer. 484 // plausibly common (queue is full, of about 2000 messages), so we'll
475 ScheduleWork(); 485 // do a near-graceful recovery. Nested loops are pretty transient
476 return ProcessMessageHelper(msg); 486 // (we think), so this will probably be recoverable.
487 UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem",
488 MESSAGE_POST_ERROR,
489 MESSAGE_LOOP_PROBLEM_MAX);
490 }
477 } 491 }
478 492
479 //----------------------------------------------------------------------------- 493 //-----------------------------------------------------------------------------
480 // MessagePumpForIO public: 494 // MessagePumpForIO public:
481 495
482 MessagePumpForIO::MessagePumpForIO() { 496 MessagePumpForIO::MessagePumpForIO() {
483 port_.Set(CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, NULL, 1)); 497 port_.Set(CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, NULL, 1));
484 DCHECK(port_.IsValid()); 498 DCHECK(port_.IsValid());
485 } 499 }
486 500
(...skipping 211 matching lines...) Expand 10 before | Expand all | Expand 10 after
698 712
699 // static 713 // static
700 MessagePumpForIO::IOHandler* MessagePumpForIO::KeyToHandler( 714 MessagePumpForIO::IOHandler* MessagePumpForIO::KeyToHandler(
701 ULONG_PTR key, 715 ULONG_PTR key,
702 bool* has_valid_io_context) { 716 bool* has_valid_io_context) {
703 *has_valid_io_context = ((key & 1) == 0); 717 *has_valid_io_context = ((key & 1) == 0);
704 return reinterpret_cast<IOHandler*>(key & ~static_cast<ULONG_PTR>(1)); 718 return reinterpret_cast<IOHandler*>(key & ~static_cast<ULONG_PTR>(1));
705 } 719 }
706 720
707 } // namespace base 721 } // namespace base
OLDNEW
« no previous file with comments | « base/message_loop/message_pump_win.h ('k') | content/gpu/gpu_main.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698