Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(120)

Side by Side Diff: base/message_loop/message_pump_win.cc

Issue 1714263002: Version of MessagePumpForUI optimized for GPU process (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Addressed CR feedback Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « base/message_loop/message_pump_win.h ('k') | content/gpu/gpu_main.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/message_loop/message_pump_win.h" 5 #include "base/message_loop/message_pump_win.h"
6 6
7 #include <math.h> 7 #include <math.h>
8 #include <stdint.h> 8 #include <stdint.h>
9 9
10 #include <limits> 10 #include <limits>
11 11
12 #include "base/memory/ptr_util.h"
12 #include "base/message_loop/message_loop.h" 13 #include "base/message_loop/message_loop.h"
13 #include "base/metrics/histogram.h" 14 #include "base/metrics/histogram.h"
14 #include "base/strings/stringprintf.h" 15 #include "base/strings/stringprintf.h"
15 #include "base/trace_event/trace_event.h" 16 #include "base/trace_event/trace_event.h"
16 #include "base/win/current_module.h" 17 #include "base/win/current_module.h"
17 #include "base/win/wrapped_window_proc.h" 18 #include "base/win/wrapped_window_proc.h"
18 19
19 namespace base { 20 namespace base {
20 21
21 namespace { 22 namespace {
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after
86 : atom_(0) { 87 : atom_(0) {
87 InitMessageWnd(); 88 InitMessageWnd();
88 } 89 }
89 90
90 MessagePumpForUI::~MessagePumpForUI() { 91 MessagePumpForUI::~MessagePumpForUI() {
91 DestroyWindow(message_hwnd_); 92 DestroyWindow(message_hwnd_);
92 UnregisterClass(MAKEINTATOM(atom_), CURRENT_MODULE()); 93 UnregisterClass(MAKEINTATOM(atom_), CURRENT_MODULE());
93 } 94 }
94 95
95 void MessagePumpForUI::ScheduleWork() { 96 void MessagePumpForUI::ScheduleWork() {
96 if (InterlockedExchange(&have_work_, 1)) 97 if (InterlockedExchange(&work_state_, HAVE_WORK) != READY)
97 return; // Someone else continued the pumping. 98 return; // Someone else continued the pumping.
98 99
99 // Make sure the MessagePump does some work for us. 100 // Make sure the MessagePump does some work for us.
100 BOOL ret = PostMessage(message_hwnd_, kMsgHaveWork, 101 BOOL ret = PostMessage(message_hwnd_, kMsgHaveWork,
101 reinterpret_cast<WPARAM>(this), 0); 102 reinterpret_cast<WPARAM>(this), 0);
102 if (ret) 103 if (ret)
103 return; // There was room in the Window Message queue. 104 return; // There was room in the Window Message queue.
104 105
105 // We have failed to insert a have-work message, so there is a chance that we 106 // We have failed to insert a have-work message, so there is a chance that we
106 // will starve tasks/timers while sitting in a nested message loop. Nested 107 // will starve tasks/timers while sitting in a nested message loop. Nested
107 // loops only look at Windows Message queues, and don't look at *our* task 108 // loops only look at Windows Message queues, and don't look at *our* task
108 // queues, etc., so we might not get a time slice in such. :-( 109 // queues, etc., so we might not get a time slice in such. :-(
109 // We could abort here, but the fear is that this failure mode is plausibly 110 // We could abort here, but the fear is that this failure mode is plausibly
110 // common (queue is full, of about 2000 messages), so we'll do a near-graceful 111 // common (queue is full, of about 2000 messages), so we'll do a near-graceful
111 // recovery. Nested loops are pretty transient (we think), so this will 112 // recovery. Nested loops are pretty transient (we think), so this will
112 // probably be recoverable. 113 // probably be recoverable.
113 InterlockedExchange(&have_work_, 0); // Clarify that we didn't really insert. 114
115 // Clarify that we didn't really insert.
116 InterlockedExchange(&work_state_, READY);
114 UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", MESSAGE_POST_ERROR, 117 UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", MESSAGE_POST_ERROR,
115 MESSAGE_LOOP_PROBLEM_MAX); 118 MESSAGE_LOOP_PROBLEM_MAX);
116 } 119 }
117 120
118 void MessagePumpForUI::ScheduleDelayedWork(const TimeTicks& delayed_work_time) { 121 void MessagePumpForUI::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
119 delayed_work_time_ = delayed_work_time; 122 delayed_work_time_ = delayed_work_time;
120 RescheduleTimer(); 123 RescheduleTimer();
121 } 124 }
122 125
123 //----------------------------------------------------------------------------- 126 //-----------------------------------------------------------------------------
(...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after
246 249
247 DCHECK_NE(WAIT_FAILED, result) << GetLastError(); 250 DCHECK_NE(WAIT_FAILED, result) << GetLastError();
248 } 251 }
249 252
250 void MessagePumpForUI::HandleWorkMessage() { 253 void MessagePumpForUI::HandleWorkMessage() {
251 // If we are being called outside of the context of Run, then don't try to do 254 // If we are being called outside of the context of Run, then don't try to do
252 // any work. This could correspond to a MessageBox call or something of that 255 // any work. This could correspond to a MessageBox call or something of that
253 // sort. 256 // sort.
254 if (!state_) { 257 if (!state_) {
255 // Since we handled a kMsgHaveWork message, we must still update this flag. 258 // Since we handled a kMsgHaveWork message, we must still update this flag.
256 InterlockedExchange(&have_work_, 0); 259 InterlockedExchange(&work_state_, READY);
257 return; 260 return;
258 } 261 }
259 262
260 // Let whatever would have run had we not been putting messages in the queue 263 // Let whatever would have run had we not been putting messages in the queue
261 // run now. This is an attempt to make our dummy message not starve other 264 // run now. This is an attempt to make our dummy message not starve other
262 // messages that may be in the Windows message queue. 265 // messages that may be in the Windows message queue.
263 ProcessPumpReplacementMessage(); 266 ProcessPumpReplacementMessage();
264 267
265 // Now give the delegate a chance to do some work. He'll let us know if he 268 // Now give the delegate a chance to do some work. He'll let us know if he
266 // needs to do more work. 269 // needs to do more work.
(...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after
389 have_message = PeekMessage(&msg, NULL, WM_PAINT, WM_PAINT, PM_REMOVE) || 392 have_message = PeekMessage(&msg, NULL, WM_PAINT, WM_PAINT, PM_REMOVE) ||
390 PeekMessage(&msg, NULL, WM_TIMER, WM_TIMER, PM_REMOVE); 393 PeekMessage(&msg, NULL, WM_TIMER, WM_TIMER, PM_REMOVE);
391 } else { 394 } else {
392 have_message = PeekMessage(&msg, NULL, 0, 0, PM_REMOVE) != FALSE; 395 have_message = PeekMessage(&msg, NULL, 0, 0, PM_REMOVE) != FALSE;
393 } 396 }
394 397
395 DCHECK(!have_message || kMsgHaveWork != msg.message || 398 DCHECK(!have_message || kMsgHaveWork != msg.message ||
396 msg.hwnd != message_hwnd_); 399 msg.hwnd != message_hwnd_);
397 400
398 // Since we discarded a kMsgHaveWork message, we must update the flag. 401 // Since we discarded a kMsgHaveWork message, we must update the flag.
399 int old_have_work = InterlockedExchange(&have_work_, 0); 402 int old_work_state_ = InterlockedExchange(&work_state_, READY);
400 DCHECK(old_have_work); 403 DCHECK_EQ(HAVE_WORK, old_work_state_);
401 404
402 // We don't need a special time slice if we didn't have_message to process. 405 // We don't need a special time slice if we didn't have_message to process.
403 if (!have_message) 406 if (!have_message)
404 return false; 407 return false;
405 408
406 // Guarantee we'll get another time slice in the case where we go into native 409 // Guarantee we'll get another time slice in the case where we go into native
407 // windows code. This ScheduleWork() may hurt performance a tiny bit when 410 // windows code. This ScheduleWork() may hurt performance a tiny bit when
408 // tasks appear very infrequently, but when the event queue is busy, the 411 // tasks appear very infrequently, but when the event queue is busy, the
409 // kMsgHaveWork events get (percentage wise) rarer and rarer. 412 // kMsgHaveWork events get (percentage wise) rarer and rarer.
410 ScheduleWork(); 413 ScheduleWork();
411 return ProcessMessageHelper(msg); 414 return ProcessMessageHelper(msg);
412 } 415 }
413 416
414 //----------------------------------------------------------------------------- 417 //-----------------------------------------------------------------------------
418 // MessagePumpForGpu public:
419
420 MessagePumpForGpu::MessagePumpForGpu() : thread_id_(GetCurrentThreadId()) {
421 // Init the message queue.
422 MSG msg;
423 PeekMessage(&msg, nullptr, 0, 0, PM_NOREMOVE);
424 }
425
426 MessagePumpForGpu::~MessagePumpForGpu() {}
427
428 // static
429 void MessagePumpForGpu::InitFactory() {
430 DCHECK(MessageLoop::InitMessagePumpForUIFactory(
Lei Zhang 2016/04/08 19:10:21 You can't put this in a DCHECK. It won't run in re
stanisc 2016/04/11 21:52:59 What I was thinking! Fixed.
431 &MessagePumpForGpu::CreateMessagePumpForGpu));
432 }
433
434 // static
435 std::unique_ptr<MessagePump> MessagePumpForGpu::CreateMessagePumpForGpu() {
436 return WrapUnique<MessagePump>(new MessagePumpForGpu);
437 }
438
439 void MessagePumpForGpu::ScheduleWork() {
440 if (InterlockedExchange(&work_state_, HAVE_WORK) != READY)
441 return; // Someone else continued the pumping.
442
443 // Make sure the MessagePump does some work for us.
444 BOOL ret = PostThreadMessage(thread_id_, kMsgHaveWork, 0, 0);
445 if (ret)
446 return; // There was room in the Window Message queue.
447
448 // See comment in MessagePumpForUI::ScheduleWork.
449 InterlockedExchange(&work_state_, READY);
450 UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", MESSAGE_POST_ERROR,
451 MESSAGE_LOOP_PROBLEM_MAX);
452 }
453
454 void MessagePumpForGpu::ScheduleDelayedWork(
455 const TimeTicks& delayed_work_time) {
456 // We know that we can't be blocked right now since this method can only be
457 // called on the same thread as Run, so we only need to update our record of
458 // how long to sleep when we do sleep.
459 delayed_work_time_ = delayed_work_time;
460 }
461
462 //-----------------------------------------------------------------------------
463 // MessagePumpForGpu private:
464
465 void MessagePumpForGpu::DoRunLoop() {
466 while (!state_->should_quit) {
467 // Indicate that the loop is handling the work.
468 // If there is a race condition between switching to WORKING state here and
469 // the producer thread setting the HAVE_WORK state after exiting the wait,
470 // the event might remain in the signalled state. That might be less than
471 // optimal but wouldn't result in failing to handle the work.
472 InterlockedExchange(&work_state_, WORKING);
473
474 bool more_work_is_plausible = state_->delegate->DoWork();
475 if (state_->should_quit)
476 break;
477
478 more_work_is_plausible |=
479 state_->delegate->DoDelayedWork(&delayed_work_time_);
480 if (state_->should_quit)
481 break;
482
483 if (more_work_is_plausible)
484 continue;
485
486 more_work_is_plausible = state_->delegate->DoIdleWork();
487 if (state_->should_quit)
488 break;
489
490 if (more_work_is_plausible)
491 continue;
492
493 // Switch that working state to READY to indicate that the loop is
494 // waiting for accepting new work if it is still in WORKING state and hasn't
495 // been signalled. Otherwise if it is in HAVE_WORK state skip the wait
496 // and proceed to handing the work.
497 if (HAVE_WORK == InterlockedCompareExchange(&work_state_, READY, WORKING))
Lei Zhang 2016/04/08 19:10:21 More comparsions to flip. Also line 518, 532.
stanisc 2016/04/11 21:52:59 Sorry, overlooked this one. Done.
498 continue; // Skip wait, more work was requested.
499
500 WaitForWork(); // Wait (sleep) until we have work to do again.
501 }
502 }
503
504 void MessagePumpForGpu::WaitForWork() {
505 // Wait until a message is available, up to the time needed by the timer
506 // manager to fire the next set of timers.
507 int delay;
508
509 // The while loop handles the situation where on Windows 7 and later versions
510 // MsgWaitForMultipleObjectsEx might time out slightly earlier (less than one
511 // ms) than the specified |delay|. In that situation it is more optimal to
512 // just wait again rather than waste a DoRunLoop cycle.
513 while ((delay = GetCurrentDelay()) != 0) {
514 if (delay < 0) // Negative value means no timers waiting.
515 delay = INFINITE;
516
517 DWORD result = MsgWaitForMultipleObjectsEx(0, NULL, delay, QS_ALLINPUT, 0);
518 if (WAIT_OBJECT_0 == result) {
519 // A WM_* message is available.
520 if (ProcessMessages())
521 return;
522 }
523
524 DCHECK_NE(WAIT_FAILED, result) << GetLastError();
525 }
526 }
527
528 bool MessagePumpForGpu::ProcessMessages() {
529 MSG msg;
530 bool have_work = false;
531 while (PeekMessage(&msg, nullptr, 0, 0, PM_REMOVE) != FALSE) {
Lei Zhang 2016/04/08 19:10:21 You can just omit " != FALSE"
stanisc 2016/04/11 21:52:59 Done.
532 if (WM_QUIT == msg.message) {
533 // Repost the QUIT message so that it will be retrieved by the primary
534 // GetMessage() loop.
535 state_->should_quit = true;
536 PostQuitMessage(static_cast<int>(msg.wParam));
537 return true;
538 }
539
540 if (msg.hwnd == NULL && msg.message == kMsgHaveWork) {
Lei Zhang 2016/04/08 19:10:21 nullptr or !msg.hwnd.
stanisc 2016/04/11 21:52:59 Done.
541 have_work = true;
542 } else {
543 TranslateMessage(&msg);
544 DispatchMessage(&msg);
545 }
546 }
547
548 return have_work;
549 }
550
551 //-----------------------------------------------------------------------------
415 // MessagePumpForIO public: 552 // MessagePumpForIO public:
416 553
417 MessagePumpForIO::MessagePumpForIO() { 554 MessagePumpForIO::MessagePumpForIO() {
418 port_.Set(CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, NULL, 1)); 555 port_.Set(CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, NULL, 1));
419 DCHECK(port_.IsValid()); 556 DCHECK(port_.IsValid());
420 } 557 }
421 558
422 MessagePumpForIO::~MessagePumpForIO() { 559 MessagePumpForIO::~MessagePumpForIO() {
423 } 560 }
424 561
425 void MessagePumpForIO::ScheduleWork() { 562 void MessagePumpForIO::ScheduleWork() {
426 if (InterlockedExchange(&have_work_, 1)) 563 if (InterlockedExchange(&work_state_, HAVE_WORK) != READY)
427 return; // Someone else continued the pumping. 564 return; // Someone else continued the pumping.
428 565
429 // Make sure the MessagePump does some work for us. 566 // Make sure the MessagePump does some work for us.
430 BOOL ret = PostQueuedCompletionStatus(port_.Get(), 0, 567 BOOL ret = PostQueuedCompletionStatus(port_.Get(), 0,
431 reinterpret_cast<ULONG_PTR>(this), 568 reinterpret_cast<ULONG_PTR>(this),
432 reinterpret_cast<OVERLAPPED*>(this)); 569 reinterpret_cast<OVERLAPPED*>(this));
433 if (ret) 570 if (ret)
434 return; // Post worked perfectly. 571 return; // Post worked perfectly.
435 572
436 // See comment in MessagePumpForUI::ScheduleWork() for this error recovery. 573 // See comment in MessagePumpForUI::ScheduleWork() for this error recovery.
437 InterlockedExchange(&have_work_, 0); // Clarify that we didn't succeed. 574 InterlockedExchange(&work_state_, READY); // Clarify that we didn't succeed.
438 UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", COMPLETION_POST_ERROR, 575 UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", COMPLETION_POST_ERROR,
439 MESSAGE_LOOP_PROBLEM_MAX); 576 MESSAGE_LOOP_PROBLEM_MAX);
440 } 577 }
441 578
442 void MessagePumpForIO::ScheduleDelayedWork(const TimeTicks& delayed_work_time) { 579 void MessagePumpForIO::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
443 // We know that we can't be blocked right now since this method can only be 580 // We know that we can't be blocked right now since this method can only be
444 // called on the same thread as Run, so we only need to update our record of 581 // called on the same thread as Run, so we only need to update our record of
445 // how long to sleep when we do sleep. 582 // how long to sleep when we do sleep.
446 delayed_work_time_ = delayed_work_time; 583 delayed_work_time_ = delayed_work_time;
447 } 584 }
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after
572 item->handler = KeyToHandler(key, &item->has_valid_io_context); 709 item->handler = KeyToHandler(key, &item->has_valid_io_context);
573 item->context = reinterpret_cast<IOContext*>(overlapped); 710 item->context = reinterpret_cast<IOContext*>(overlapped);
574 return true; 711 return true;
575 } 712 }
576 713
577 bool MessagePumpForIO::ProcessInternalIOItem(const IOItem& item) { 714 bool MessagePumpForIO::ProcessInternalIOItem(const IOItem& item) {
578 if (reinterpret_cast<void*>(this) == reinterpret_cast<void*>(item.context) && 715 if (reinterpret_cast<void*>(this) == reinterpret_cast<void*>(item.context) &&
579 reinterpret_cast<void*>(this) == reinterpret_cast<void*>(item.handler)) { 716 reinterpret_cast<void*>(this) == reinterpret_cast<void*>(item.handler)) {
580 // This is our internal completion. 717 // This is our internal completion.
581 DCHECK(!item.bytes_transfered); 718 DCHECK(!item.bytes_transfered);
582 InterlockedExchange(&have_work_, 0); 719 InterlockedExchange(&work_state_, READY);
583 return true; 720 return true;
584 } 721 }
585 return false; 722 return false;
586 } 723 }
587 724
588 // Returns a completion item that was previously received. 725 // Returns a completion item that was previously received.
589 bool MessagePumpForIO::MatchCompletedIOItem(IOHandler* filter, IOItem* item) { 726 bool MessagePumpForIO::MatchCompletedIOItem(IOHandler* filter, IOItem* item) {
590 DCHECK(!completed_io_.empty()); 727 DCHECK(!completed_io_.empty());
591 for (std::list<IOItem>::iterator it = completed_io_.begin(); 728 for (std::list<IOItem>::iterator it = completed_io_.begin();
592 it != completed_io_.end(); ++it) { 729 it != completed_io_.end(); ++it) {
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
633 770
634 // static 771 // static
635 MessagePumpForIO::IOHandler* MessagePumpForIO::KeyToHandler( 772 MessagePumpForIO::IOHandler* MessagePumpForIO::KeyToHandler(
636 ULONG_PTR key, 773 ULONG_PTR key,
637 bool* has_valid_io_context) { 774 bool* has_valid_io_context) {
638 *has_valid_io_context = ((key & 1) == 0); 775 *has_valid_io_context = ((key & 1) == 0);
639 return reinterpret_cast<IOHandler*>(key & ~static_cast<ULONG_PTR>(1)); 776 return reinterpret_cast<IOHandler*>(key & ~static_cast<ULONG_PTR>(1));
640 } 777 }
641 778
642 } // namespace base 779 } // namespace base
OLDNEW
« no previous file with comments | « base/message_loop/message_pump_win.h ('k') | content/gpu/gpu_main.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698