Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1256)

Unified Diff: content/common/gpu/gpu_channel.cc

Issue 495313003: Let GpuChannel handle deferred IPC messages of SwapBuffer in batch (Closed) Base URL: https://chromium.googlesource.com/chromium/src@master
Patch Set: Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « content/common/gpu/gpu_channel.h ('k') | no next file » | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: content/common/gpu/gpu_channel.cc
diff --git a/content/common/gpu/gpu_channel.cc b/content/common/gpu/gpu_channel.cc
index 0759e5d00f961569894377ec1e49bd3a7d2267fa..1e8e234547a4237f0b7953ba5a8559fae64e7535 100644
--- a/content/common/gpu/gpu_channel.cc
+++ b/content/common/gpu/gpu_channel.cc
@@ -662,12 +662,60 @@ bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) {
return handled;
}
+size_t GpuChannel::MatchSwapBufferMessagesPattern(
+ IPC::Message* current_message) {
+ DCHECK(current_message);
+ if (deferred_messages_.empty() || !current_message)
+ return 0;
+ // Only care about SetLatencyInfo and AsyncFlush message.
+ if (current_message->type() != GpuCommandBufferMsg_SetLatencyInfo::ID &&
+ current_message->type() != GpuCommandBufferMsg_AsyncFlush::ID)
+ return 0;
+
+ size_t index = 0;
+ int32 routing_id = current_message->routing_id();
+
+ // In case of the current message is SetLatencyInfo, we try to look ahead one
+ // more deferred messages.
+ IPC::Message *first_message = NULL;
+ IPC::Message *second_message = NULL;
+
+ // Fetch the first message and move index to point to the second message.
+ first_message = deferred_messages_[index++];
+
+ // If the current message is AsyncFlush, the expected message sequence for
+ // SwapBuffer should be AsyncFlush->Echo. We only try to match Echo message.
+ if (current_message->type() == GpuCommandBufferMsg_AsyncFlush::ID &&
+ first_message->type() == GpuCommandBufferMsg_Echo::ID &&
+ first_message->routing_id() == routing_id) {
+ return 1;
+ }
+
+ // If the current message is SetLatencyInfo, the expected message sequence
+ // for SwapBuffer should be SetLatencyInfo->AsyncFlush->Echo (optional).
+ if (current_message->type() == GpuCommandBufferMsg_SetLatencyInfo::ID &&
+ first_message->type() == GpuCommandBufferMsg_AsyncFlush::ID &&
+ first_message->routing_id() == routing_id) {
+ if (deferred_messages_.size() >= 2)
+ second_message = deferred_messages_[index];
+ if (!second_message)
+ return 1;
+ if (second_message->type() == GpuCommandBufferMsg_Echo::ID &&
+ second_message->routing_id() == routing_id) {
+ return 2;
+ }
+ }
+ // No matched message is found.
+ return 0;
+}
+
void GpuChannel::HandleMessage() {
handle_messages_scheduled_ = false;
if (deferred_messages_.empty())
return;
- bool should_fast_track_ack = false;
+ size_t matched_messages_num = 0;
+ bool should_handle_swapbuffer_msgs_immediate = false;
IPC::Message* m = deferred_messages_.front();
GpuCommandBufferStub* stub = stubs_.Lookup(m->routing_id());
@@ -715,17 +763,30 @@ void GpuChannel::HandleMessage() {
if (message_processed)
MessageProcessed();
- // We want the EchoACK following the SwapBuffers to be sent as close as
- // possible, avoiding scheduling other channels in the meantime.
- should_fast_track_ack = false;
- if (!deferred_messages_.empty()) {
- m = deferred_messages_.front();
- stub = stubs_.Lookup(m->routing_id());
- should_fast_track_ack =
- (m->type() == GpuCommandBufferMsg_Echo::ID) &&
- stub && stub->IsScheduled();
+ if (deferred_messages_.empty())
+ break;
+
+ // We process the pending messages immediately if these messages matches
+ // the pattern of SwapBuffers, for example, GLRenderer always issues
+ // SwapBuffers calls with a specifix IPC message patterns, for example,
Ken Russell (switch to Gerrit) 2014/08/25 23:03:03 typo: specific. Here and in CL description.
Hongbo Min 2014/08/26 11:37:00 Done.
+ // it should be SetLatencyInfo->AsyncFlush->Echo sequence.
+ //
+ // Instead of posting a task to message loop, it could avoid the possibility
+ // of being blocked by other channels, and make SwapBuffers executed as soon
+ // as possible.
+ m = deferred_messages_.front();
jbauman 2014/08/26 00:00:43 I think you could move these two lines to the begi
Hongbo Min 2014/08/26 11:37:00 Done.
+ stub = stubs_.Lookup(m->routing_id());
+ if (!should_handle_swapbuffer_msgs_immediate) {
+ // Start from the current processing message to match SwapBuffer pattern.
+ matched_messages_num = MatchSwapBufferMessagesPattern(message.get());
+ should_handle_swapbuffer_msgs_immediate =
+ matched_messages_num > 0 && stub && stub->IsScheduled();
+ } else {
+ --matched_messages_num;
Ken Russell (switch to Gerrit) 2014/08/25 23:03:03 Before this line, please DCHECK(matched_messages_n
Hongbo Min 2014/08/26 11:37:00 Done.
+ if (!stub || !stub->IsScheduled() || matched_messages_num == 0)
+ should_handle_swapbuffer_msgs_immediate = false;
}
- } while (should_fast_track_ack);
+ } while (should_handle_swapbuffer_msgs_immediate);
if (!deferred_messages_.empty()) {
OnScheduled();
« no previous file with comments | « content/common/gpu/gpu_channel.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698