OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #if defined(OS_WIN) | 5 #if defined(OS_WIN) |
6 #include <windows.h> | 6 #include <windows.h> |
7 #endif | 7 #endif |
8 | 8 |
9 #include "content/common/gpu/gpu_channel.h" | 9 #include "content/common/gpu/gpu_channel.h" |
10 | 10 |
(...skipping 644 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
655 IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStartEventsRecording, | 655 IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStartEventsRecording, |
656 OnDevToolsStartEventsRecording) | 656 OnDevToolsStartEventsRecording) |
657 IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStopEventsRecording, | 657 IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStopEventsRecording, |
658 OnDevToolsStopEventsRecording) | 658 OnDevToolsStopEventsRecording) |
659 IPC_MESSAGE_UNHANDLED(handled = false) | 659 IPC_MESSAGE_UNHANDLED(handled = false) |
660 IPC_END_MESSAGE_MAP() | 660 IPC_END_MESSAGE_MAP() |
661 DCHECK(handled) << msg.type(); | 661 DCHECK(handled) << msg.type(); |
662 return handled; | 662 return handled; |
663 } | 663 } |
664 | 664 |
| 665 size_t GpuChannel::MatchSwapBufferMessagesPattern( |
| 666 IPC::Message* current_message) { |
| 667 DCHECK(current_message); |
| 668 if (deferred_messages_.empty() || !current_message) |
| 669 return 0; |
| 670 // Only care about SetLatencyInfo and AsyncFlush message. |
| 671 if (current_message->type() != GpuCommandBufferMsg_SetLatencyInfo::ID && |
| 672 current_message->type() != GpuCommandBufferMsg_AsyncFlush::ID) |
| 673 return 0; |
| 674 |
| 675 size_t index = 0; |
| 676 int32 routing_id = current_message->routing_id(); |
| 677 |
| 678 // In case of the current message is SetLatencyInfo, we try to look ahead one |
| 679 // more deferred messages. |
| 680 IPC::Message *first_message = NULL; |
| 681 IPC::Message *second_message = NULL; |
| 682 |
| 683 // Fetch the first message and move index to point to the second message. |
| 684 first_message = deferred_messages_[index++]; |
| 685 |
| 686 // If the current message is AsyncFlush, the expected message sequence for |
| 687 // SwapBuffer should be AsyncFlush->Echo. We only try to match Echo message. |
| 688 if (current_message->type() == GpuCommandBufferMsg_AsyncFlush::ID && |
| 689 first_message->type() == GpuCommandBufferMsg_Echo::ID && |
| 690 first_message->routing_id() == routing_id) { |
| 691 return 1; |
| 692 } |
| 693 |
| 694 // If the current message is SetLatencyInfo, the expected message sequence |
| 695 // for SwapBuffer should be SetLatencyInfo->AsyncFlush->Echo (optional). |
| 696 if (current_message->type() == GpuCommandBufferMsg_SetLatencyInfo::ID && |
| 697 first_message->type() == GpuCommandBufferMsg_AsyncFlush::ID && |
| 698 first_message->routing_id() == routing_id) { |
| 699 if (deferred_messages_.size() >= 2) |
| 700 second_message = deferred_messages_[index]; |
| 701 if (!second_message) |
| 702 return 1; |
| 703 if (second_message->type() == GpuCommandBufferMsg_Echo::ID && |
| 704 second_message->routing_id() == routing_id) { |
| 705 return 2; |
| 706 } |
| 707 } |
| 708 // No matched message is found. |
| 709 return 0; |
| 710 } |
| 711 |
665 void GpuChannel::HandleMessage() { | 712 void GpuChannel::HandleMessage() { |
666 handle_messages_scheduled_ = false; | 713 handle_messages_scheduled_ = false; |
667 if (deferred_messages_.empty()) | 714 if (deferred_messages_.empty()) |
668 return; | 715 return; |
669 | 716 |
670 bool should_fast_track_ack = false; | 717 size_t matched_messages_num = 0; |
671 IPC::Message* m = deferred_messages_.front(); | 718 bool should_handle_swapbuffer_msgs_immediate = false; |
672 GpuCommandBufferStub* stub = stubs_.Lookup(m->routing_id()); | 719 IPC::Message* m = NULL; |
| 720 GpuCommandBufferStub* stub = NULL; |
673 | 721 |
674 do { | 722 do { |
| 723 m = deferred_messages_.front(); |
| 724 stub = stubs_.Lookup(m->routing_id()); |
675 if (stub) { | 725 if (stub) { |
676 if (!stub->IsScheduled()) | 726 if (!stub->IsScheduled()) |
677 return; | 727 return; |
678 if (stub->IsPreempted()) { | 728 if (stub->IsPreempted()) { |
679 OnScheduled(); | 729 OnScheduled(); |
680 return; | 730 return; |
681 } | 731 } |
682 } | 732 } |
683 | 733 |
684 scoped_ptr<IPC::Message> message(m); | 734 scoped_ptr<IPC::Message> message(m); |
(...skipping 23 matching lines...) Expand all Loading... |
708 if (stub->HasUnprocessedCommands()) { | 758 if (stub->HasUnprocessedCommands()) { |
709 deferred_messages_.push_front(new GpuCommandBufferMsg_Rescheduled( | 759 deferred_messages_.push_front(new GpuCommandBufferMsg_Rescheduled( |
710 stub->route_id())); | 760 stub->route_id())); |
711 message_processed = false; | 761 message_processed = false; |
712 } | 762 } |
713 } | 763 } |
714 } | 764 } |
715 if (message_processed) | 765 if (message_processed) |
716 MessageProcessed(); | 766 MessageProcessed(); |
717 | 767 |
718 // We want the EchoACK following the SwapBuffers to be sent as close as | 768 if (deferred_messages_.empty()) |
719 // possible, avoiding scheduling other channels in the meantime. | 769 break; |
720 should_fast_track_ack = false; | 770 |
721 if (!deferred_messages_.empty()) { | 771 // We process the pending messages immediately if these messages matches |
722 m = deferred_messages_.front(); | 772 // the pattern of SwapBuffers, for example, GLRenderer always issues |
723 stub = stubs_.Lookup(m->routing_id()); | 773 // SwapBuffers calls with a specific IPC message patterns, for example, |
724 should_fast_track_ack = | 774 // it should be SetLatencyInfo->AsyncFlush->Echo sequence. |
725 (m->type() == GpuCommandBufferMsg_Echo::ID) && | 775 // |
726 stub && stub->IsScheduled(); | 776 // Instead of posting a task to message loop, it could avoid the possibility |
| 777 // of being blocked by other channels, and make SwapBuffers executed as soon |
| 778 // as possible. |
| 779 if (!should_handle_swapbuffer_msgs_immediate) { |
| 780 // Start from the current processing message to match SwapBuffer pattern. |
| 781 matched_messages_num = MatchSwapBufferMessagesPattern(message.get()); |
| 782 should_handle_swapbuffer_msgs_immediate = |
| 783 matched_messages_num > 0 && stub; |
| 784 } else { |
| 785 DCHECK_GT(matched_messages_num, 0u); |
| 786 --matched_messages_num; |
| 787 if (!stub || matched_messages_num == 0) |
| 788 should_handle_swapbuffer_msgs_immediate = false; |
727 } | 789 } |
728 } while (should_fast_track_ack); | 790 } while (should_handle_swapbuffer_msgs_immediate); |
729 | 791 |
730 if (!deferred_messages_.empty()) { | 792 if (!deferred_messages_.empty()) { |
731 OnScheduled(); | 793 OnScheduled(); |
732 } | 794 } |
733 } | 795 } |
734 | 796 |
735 void GpuChannel::OnCreateOffscreenCommandBuffer( | 797 void GpuChannel::OnCreateOffscreenCommandBuffer( |
736 const gfx::Size& size, | 798 const gfx::Size& size, |
737 const GPUCreateCommandBufferConfig& init_params, | 799 const GPUCreateCommandBufferConfig& init_params, |
738 int32 route_id, | 800 int32 route_id, |
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
824 uint64 GpuChannel::GetMemoryUsage() { | 886 uint64 GpuChannel::GetMemoryUsage() { |
825 uint64 size = 0; | 887 uint64 size = 0; |
826 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_); | 888 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_); |
827 !it.IsAtEnd(); it.Advance()) { | 889 !it.IsAtEnd(); it.Advance()) { |
828 size += it.GetCurrentValue()->GetMemoryUsage(); | 890 size += it.GetCurrentValue()->GetMemoryUsage(); |
829 } | 891 } |
830 return size; | 892 return size; |
831 } | 893 } |
832 | 894 |
833 } // namespace content | 895 } // namespace content |
OLD | NEW |