Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(96)

Side by Side Diff: content/common/gpu/gpu_channel.cc

Issue 619453002: gpu: Remove Echo and SwapCompletion GL interfacess (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@tits
Patch Set: rebase Created 6 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #if defined(OS_WIN) 5 #if defined(OS_WIN)
6 #include <windows.h> 6 #include <windows.h>
7 #endif 7 #endif
8 8
9 #include "content/common/gpu/gpu_channel.h" 9 #include "content/common/gpu/gpu_channel.h"
10 10
(...skipping 644 matching lines...) Expand 10 before | Expand all | Expand 10 after
655 IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStartEventsRecording, 655 IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStartEventsRecording,
656 OnDevToolsStartEventsRecording) 656 OnDevToolsStartEventsRecording)
657 IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStopEventsRecording, 657 IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStopEventsRecording,
658 OnDevToolsStopEventsRecording) 658 OnDevToolsStopEventsRecording)
659 IPC_MESSAGE_UNHANDLED(handled = false) 659 IPC_MESSAGE_UNHANDLED(handled = false)
660 IPC_END_MESSAGE_MAP() 660 IPC_END_MESSAGE_MAP()
661 DCHECK(handled) << msg.type(); 661 DCHECK(handled) << msg.type();
662 return handled; 662 return handled;
663 } 663 }
664 664
665 size_t GpuChannel::MatchSwapBufferMessagesPattern( 665 size_t GpuChannel::MatchRetireSyncPointPattern(IPC::Message* current_message) {
666 IPC::Message* current_message) {
667 DCHECK(current_message); 666 DCHECK(current_message);
668 if (deferred_messages_.empty() || !current_message) 667 if (deferred_messages_.empty() || !current_message)
669 return 0; 668 return 0;
670 // Only care about AsyncFlush message. 669 // Only care about AsyncFlush message.
671 if (current_message->type() != GpuCommandBufferMsg_AsyncFlush::ID) 670 if (current_message->type() != GpuCommandBufferMsg_AsyncFlush::ID)
672 return 0; 671 return 0;
673 672
674 size_t index = 0; 673 size_t index = 0;
675 int32 routing_id = current_message->routing_id(); 674 int32 routing_id = current_message->routing_id();
676 675
677 // Fetch the first message and move index to point to the second message. 676 // Fetch the first message and move index to point to the second message.
678 IPC::Message* first_message = deferred_messages_[index++]; 677 IPC::Message* first_message = deferred_messages_[index++];
679 678
680 // If the current message is AsyncFlush, the expected message sequence for 679 // If the current message is AsyncFlush, the expected message sequence for
681 // SwapBuffer should be AsyncFlush->Echo. We only try to match Echo message. 680 // SwapBuffer should be AsyncFlush->Echo. We only try to match Echo message.
jbauman 2014/10/01 02:15:38 Change this comment.
jbauman 2014/10/01 02:20:59 Although, to keep the logic working in a similar w
682 if (current_message->type() == GpuCommandBufferMsg_AsyncFlush::ID && 681 if (first_message->type() == GpuCommandBufferMsg_RetireSyncPoint::ID &&
683 first_message->type() == GpuCommandBufferMsg_Echo::ID &&
684 first_message->routing_id() == routing_id) { 682 first_message->routing_id() == routing_id) {
685 return 1; 683 return 1;
686 } 684 }
687 685
688 // No matched message is found. 686 // No matched message is found.
689 return 0; 687 return 0;
690 } 688 }
691 689
692 void GpuChannel::HandleMessage() { 690 void GpuChannel::HandleMessage() {
693 handle_messages_scheduled_ = false; 691 handle_messages_scheduled_ = false;
694 if (deferred_messages_.empty()) 692 if (deferred_messages_.empty())
695 return; 693 return;
696 694
697 size_t matched_messages_num = 0; 695 size_t matched_messages_num = 0;
698 bool should_handle_swapbuffer_msgs_immediate = false; 696 bool should_handle_swapbuffer_msgs_immediate = false;
699 IPC::Message* m = NULL; 697 IPC::Message* m = NULL;
700 GpuCommandBufferStub* stub = NULL; 698 GpuCommandBufferStub* stub = NULL;
701 699
702 do { 700 do {
703 m = deferred_messages_.front(); 701 m = deferred_messages_.front();
704 stub = stubs_.Lookup(m->routing_id()); 702 stub = stubs_.Lookup(m->routing_id());
705 if (stub) { 703 if (stub) {
706 if (!stub->IsScheduled()) 704 if (!stub->IsScheduled())
707 return; 705 return;
706 // TODO: We might want to still retire sync points, and mostly
707 // care about preempting flushes here.
708 if (stub->IsPreempted()) { 708 if (stub->IsPreempted()) {
709 OnScheduled(); 709 OnScheduled();
710 return; 710 return;
711 } 711 }
712 } 712 }
713 713
714 scoped_ptr<IPC::Message> message(m); 714 scoped_ptr<IPC::Message> message(m);
715 deferred_messages_.pop_front(); 715 deferred_messages_.pop_front();
716 bool message_processed = true; 716 bool message_processed = true;
717 717
(...skipping 24 matching lines...) Expand all
742 } 742 }
743 } 743 }
744 } 744 }
745 if (message_processed) 745 if (message_processed)
746 MessageProcessed(); 746 MessageProcessed();
747 747
748 if (deferred_messages_.empty()) 748 if (deferred_messages_.empty())
749 break; 749 break;
750 750
751 // We process the pending messages immediately if these messages matches 751 // We process the pending messages immediately if these messages matches
752 // the pattern of SwapBuffers, for example, GLRenderer always issues 752 // the pattern of Flush followed by Insert/RetireSyncPoint since
753 // SwapBuffers calls with a specific IPC message patterns, for example, 753 // sync points imply a Flush.
754 // it should be AsyncFlush->Echo sequence.
755 // 754 //
756 // Instead of posting a task to message loop, it could avoid the possibility 755 // Instead of posting a task to message loop, it could avoid the possibility
757 // of being blocked by other channels, and make SwapBuffers executed as soon 756 // of being blocked by other channels, and signal clients or unblock waiting
758 // as possible. 757 // contexts as soon as possible.
759 if (!should_handle_swapbuffer_msgs_immediate) { 758 if (!should_handle_swapbuffer_msgs_immediate) {
760 // Start from the current processing message to match SwapBuffer pattern. 759 // Start from the current processing message to match RetireSyncPoint
761 matched_messages_num = MatchSwapBufferMessagesPattern(message.get()); 760 // pattern.
761 matched_messages_num = MatchRetireSyncPointPattern(message.get());
762 should_handle_swapbuffer_msgs_immediate = 762 should_handle_swapbuffer_msgs_immediate =
763 matched_messages_num > 0 && stub; 763 matched_messages_num > 0 && stub;
764 } else { 764 } else {
765 DCHECK_GT(matched_messages_num, 0u); 765 DCHECK_GT(matched_messages_num, 0u);
766 --matched_messages_num; 766 --matched_messages_num;
767 if (!stub || matched_messages_num == 0) 767 if (!stub || matched_messages_num == 0)
768 should_handle_swapbuffer_msgs_immediate = false; 768 should_handle_swapbuffer_msgs_immediate = false;
769 } 769 }
770 } while (should_handle_swapbuffer_msgs_immediate); 770 } while (should_handle_swapbuffer_msgs_immediate);
771 771
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after
866 uint64 GpuChannel::GetMemoryUsage() { 866 uint64 GpuChannel::GetMemoryUsage() {
867 uint64 size = 0; 867 uint64 size = 0;
868 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_); 868 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
869 !it.IsAtEnd(); it.Advance()) { 869 !it.IsAtEnd(); it.Advance()) {
870 size += it.GetCurrentValue()->GetMemoryUsage(); 870 size += it.GetCurrentValue()->GetMemoryUsage();
871 } 871 }
872 return size; 872 return size;
873 } 873 }
874 874
875 } // namespace content 875 } // namespace content
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698