OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #if defined(OS_WIN) | 5 #if defined(OS_WIN) |
6 #include <windows.h> | 6 #include <windows.h> |
7 #endif | 7 #endif |
8 | 8 |
9 #include "content/common/gpu/gpu_channel.h" | 9 #include "content/common/gpu/gpu_channel.h" |
10 | 10 |
(...skipping 644 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
655 IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStartEventsRecording, | 655 IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStartEventsRecording, |
656 OnDevToolsStartEventsRecording) | 656 OnDevToolsStartEventsRecording) |
657 IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStopEventsRecording, | 657 IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStopEventsRecording, |
658 OnDevToolsStopEventsRecording) | 658 OnDevToolsStopEventsRecording) |
659 IPC_MESSAGE_UNHANDLED(handled = false) | 659 IPC_MESSAGE_UNHANDLED(handled = false) |
660 IPC_END_MESSAGE_MAP() | 660 IPC_END_MESSAGE_MAP() |
661 DCHECK(handled) << msg.type(); | 661 DCHECK(handled) << msg.type(); |
662 return handled; | 662 return handled; |
663 } | 663 } |
664 | 664 |
665 size_t GpuChannel::MatchSwapBufferMessagesPattern( | |
666 IPC::Message* current_message) { | |
667 DCHECK(current_message); | |
668 if (deferred_messages_.empty() || !current_message) | |
669 return 0; | |
670 // Only care about AsyncFlush message. | |
671 if (current_message->type() != GpuCommandBufferMsg_AsyncFlush::ID) | |
672 return 0; | |
673 | |
674 size_t index = 0; | |
675 int32 routing_id = current_message->routing_id(); | |
676 | |
677 // Fetch the first message and move index to point to the second message. | |
678 IPC::Message* first_message = deferred_messages_[index++]; | |
679 | |
680 // If the current message is AsyncFlush, the expected message sequence for | |
681 // SwapBuffer should be AsyncFlush->Echo. We only try to match Echo message. | |
682 if (current_message->type() == GpuCommandBufferMsg_AsyncFlush::ID && | |
683 first_message->type() == GpuCommandBufferMsg_Echo::ID && | |
684 first_message->routing_id() == routing_id) { | |
685 return 1; | |
686 } | |
687 | |
688 // No matched message is found. | |
689 return 0; | |
690 } | |
691 | |
692 void GpuChannel::HandleMessage() { | 665 void GpuChannel::HandleMessage() { |
693 handle_messages_scheduled_ = false; | 666 handle_messages_scheduled_ = false; |
694 if (deferred_messages_.empty()) | 667 if (deferred_messages_.empty()) |
695 return; | 668 return; |
696 | 669 |
697 size_t matched_messages_num = 0; | |
698 bool should_handle_swapbuffer_msgs_immediate = false; | |
699 IPC::Message* m = NULL; | 670 IPC::Message* m = NULL; |
700 GpuCommandBufferStub* stub = NULL; | 671 GpuCommandBufferStub* stub = NULL; |
701 | 672 |
702 do { | 673 m = deferred_messages_.front(); |
703 m = deferred_messages_.front(); | 674 stub = stubs_.Lookup(m->routing_id()); |
704 stub = stubs_.Lookup(m->routing_id()); | 675 if (stub) { |
| 676 if (!stub->IsScheduled()) |
| 677 return; |
| 678 if (stub->IsPreempted()) { |
| 679 OnScheduled(); |
| 680 return; |
| 681 } |
| 682 } |
| 683 |
| 684 scoped_ptr<IPC::Message> message(m); |
| 685 deferred_messages_.pop_front(); |
| 686 bool message_processed = true; |
| 687 |
| 688 currently_processing_message_ = message.get(); |
| 689 bool result; |
| 690 if (message->routing_id() == MSG_ROUTING_CONTROL) |
| 691 result = OnControlMessageReceived(*message); |
| 692 else |
| 693 result = router_.RouteMessage(*message); |
| 694 currently_processing_message_ = NULL; |
| 695 |
| 696 if (!result) { |
| 697 // Respond to sync messages even if router failed to route. |
| 698 if (message->is_sync()) { |
| 699 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&*message); |
| 700 reply->set_reply_error(); |
| 701 Send(reply); |
| 702 } |
| 703 } else { |
| 704 // If the command buffer becomes unscheduled as a result of handling the |
| 705 // message but still has more commands to process, synthesize an IPC |
| 706 // message to flush that command buffer. |
705 if (stub) { | 707 if (stub) { |
706 if (!stub->IsScheduled()) | 708 if (stub->HasUnprocessedCommands()) { |
707 return; | 709 deferred_messages_.push_front(new GpuCommandBufferMsg_Rescheduled( |
708 if (stub->IsPreempted()) { | 710 stub->route_id())); |
709 OnScheduled(); | 711 message_processed = false; |
710 return; | |
711 } | 712 } |
712 } | 713 } |
713 | 714 } |
714 scoped_ptr<IPC::Message> message(m); | 715 if (message_processed) |
715 deferred_messages_.pop_front(); | 716 MessageProcessed(); |
716 bool message_processed = true; | |
717 | |
718 currently_processing_message_ = message.get(); | |
719 bool result; | |
720 if (message->routing_id() == MSG_ROUTING_CONTROL) | |
721 result = OnControlMessageReceived(*message); | |
722 else | |
723 result = router_.RouteMessage(*message); | |
724 currently_processing_message_ = NULL; | |
725 | |
726 if (!result) { | |
727 // Respond to sync messages even if router failed to route. | |
728 if (message->is_sync()) { | |
729 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&*message); | |
730 reply->set_reply_error(); | |
731 Send(reply); | |
732 } | |
733 } else { | |
734 // If the command buffer becomes unscheduled as a result of handling the | |
735 // message but still has more commands to process, synthesize an IPC | |
736 // message to flush that command buffer. | |
737 if (stub) { | |
738 if (stub->HasUnprocessedCommands()) { | |
739 deferred_messages_.push_front(new GpuCommandBufferMsg_Rescheduled( | |
740 stub->route_id())); | |
741 message_processed = false; | |
742 } | |
743 } | |
744 } | |
745 if (message_processed) | |
746 MessageProcessed(); | |
747 | |
748 if (deferred_messages_.empty()) | |
749 break; | |
750 | |
751 // We process the pending messages immediately if these messages matches | |
752 // the pattern of SwapBuffers, for example, GLRenderer always issues | |
753 // SwapBuffers calls with a specific IPC message patterns, for example, | |
754 // it should be AsyncFlush->Echo sequence. | |
755 // | |
756 // Instead of posting a task to message loop, it could avoid the possibility | |
757 // of being blocked by other channels, and make SwapBuffers executed as soon | |
758 // as possible. | |
759 if (!should_handle_swapbuffer_msgs_immediate) { | |
760 // Start from the current processing message to match SwapBuffer pattern. | |
761 matched_messages_num = MatchSwapBufferMessagesPattern(message.get()); | |
762 should_handle_swapbuffer_msgs_immediate = | |
763 matched_messages_num > 0 && stub; | |
764 } else { | |
765 DCHECK_GT(matched_messages_num, 0u); | |
766 --matched_messages_num; | |
767 if (!stub || matched_messages_num == 0) | |
768 should_handle_swapbuffer_msgs_immediate = false; | |
769 } | |
770 } while (should_handle_swapbuffer_msgs_immediate); | |
771 | 717 |
772 if (!deferred_messages_.empty()) { | 718 if (!deferred_messages_.empty()) { |
773 OnScheduled(); | 719 OnScheduled(); |
774 } | 720 } |
775 } | 721 } |
776 | 722 |
777 void GpuChannel::OnCreateOffscreenCommandBuffer( | 723 void GpuChannel::OnCreateOffscreenCommandBuffer( |
778 const gfx::Size& size, | 724 const gfx::Size& size, |
779 const GPUCreateCommandBufferConfig& init_params, | 725 const GPUCreateCommandBufferConfig& init_params, |
780 int32 route_id, | 726 int32 route_id, |
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
866 uint64 GpuChannel::GetMemoryUsage() { | 812 uint64 GpuChannel::GetMemoryUsage() { |
867 uint64 size = 0; | 813 uint64 size = 0; |
868 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_); | 814 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_); |
869 !it.IsAtEnd(); it.Advance()) { | 815 !it.IsAtEnd(); it.Advance()) { |
870 size += it.GetCurrentValue()->GetMemoryUsage(); | 816 size += it.GetCurrentValue()->GetMemoryUsage(); |
871 } | 817 } |
872 return size; | 818 return size; |
873 } | 819 } |
874 | 820 |
875 } // namespace content | 821 } // namespace content |
OLD | NEW |