OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #if defined(OS_WIN) | 5 #if defined(OS_WIN) |
6 #include <windows.h> | 6 #include <windows.h> |
7 #endif | 7 #endif |
8 | 8 |
9 #include "content/common/gpu/gpu_channel.h" | 9 #include "content/common/gpu/gpu_channel.h" |
10 | 10 |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
66 #endif | 66 #endif |
67 } | 67 } |
68 | 68 |
69 bool GpuChannel::OnMessageReceived(const IPC::Message& message) { | 69 bool GpuChannel::OnMessageReceived(const IPC::Message& message) { |
70 if (log_messages_) { | 70 if (log_messages_) { |
71 DVLOG(1) << "received message @" << &message << " on channel @" << this | 71 DVLOG(1) << "received message @" << &message << " on channel @" << this |
72 << " with type " << message.type(); | 72 << " with type " << message.type(); |
73 } | 73 } |
74 | 74 |
75 // Control messages are not deferred and can be handled out of order with | 75 // Control messages are not deferred and can be handled out of order with |
76 // respect to routed ones. Except for Echo, which must be deferred in order | 76 // respect to routed ones. |
77 // to respect the asynchronous Mac SwapBuffers. | 77 if (message.routing_id() == MSG_ROUTING_CONTROL) |
78 if (message.routing_id() == MSG_ROUTING_CONTROL && | |
79 message.type() != GpuChannelMsg_Echo::ID) | |
80 return OnControlMessageReceived(message); | 78 return OnControlMessageReceived(message); |
81 | 79 |
82 if (message.type() == GpuCommandBufferMsg_GetStateFast::ID) { | 80 if (message.type() == GpuCommandBufferMsg_GetStateFast::ID) { |
83 if (processed_get_state_fast_) { | 81 if (processed_get_state_fast_) { |
84 // Require a non-GetStateFast message in between two GetStateFast | 82 // Require a non-GetStateFast message in between two GetStateFast |
85 // messages, to ensure progress is made. | 83 // messages, to ensure progress is made. |
86 std::deque<IPC::Message*>::iterator point = deferred_messages_.begin(); | 84 std::deque<IPC::Message*>::iterator point = deferred_messages_.begin(); |
87 | 85 |
88 while (point != deferred_messages_.end() && | 86 while (point != deferred_messages_.end() && |
89 (*point)->type() == GpuCommandBufferMsg_GetStateFast::ID) { | 87 (*point)->type() == GpuCommandBufferMsg_GetStateFast::ID) { |
(...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
222 bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) { | 220 bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) { |
223 // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers | 221 // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers |
224 // here. This is so the reply can be delayed if the scheduler is unscheduled. | 222 // here. This is so the reply can be delayed if the scheduler is unscheduled. |
225 bool handled = true; | 223 bool handled = true; |
226 IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg) | 224 IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg) |
227 IPC_MESSAGE_HANDLER(GpuChannelMsg_Initialize, OnInitialize) | 225 IPC_MESSAGE_HANDLER(GpuChannelMsg_Initialize, OnInitialize) |
228 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuChannelMsg_CreateOffscreenCommandBuffer, | 226 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuChannelMsg_CreateOffscreenCommandBuffer, |
229 OnCreateOffscreenCommandBuffer) | 227 OnCreateOffscreenCommandBuffer) |
230 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuChannelMsg_DestroyCommandBuffer, | 228 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuChannelMsg_DestroyCommandBuffer, |
231 OnDestroyCommandBuffer) | 229 OnDestroyCommandBuffer) |
232 IPC_MESSAGE_HANDLER(GpuChannelMsg_Echo, OnEcho); | |
233 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuChannelMsg_WillGpuSwitchOccur, | 230 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuChannelMsg_WillGpuSwitchOccur, |
234 OnWillGpuSwitchOccur) | 231 OnWillGpuSwitchOccur) |
235 IPC_MESSAGE_HANDLER(GpuChannelMsg_CloseChannel, OnCloseChannel) | 232 IPC_MESSAGE_HANDLER(GpuChannelMsg_CloseChannel, OnCloseChannel) |
236 IPC_MESSAGE_UNHANDLED(handled = false) | 233 IPC_MESSAGE_UNHANDLED(handled = false) |
237 IPC_END_MESSAGE_MAP() | 234 IPC_END_MESSAGE_MAP() |
238 DCHECK(handled) << msg.type(); | 235 DCHECK(handled) << msg.type(); |
239 return handled; | 236 return handled; |
240 } | 237 } |
241 | 238 |
242 void GpuChannel::HandleMessage() { | 239 void GpuChannel::HandleMessage() { |
243 handle_messages_scheduled_ = false; | 240 handle_messages_scheduled_ = false; |
244 if (!IsScheduled()) | 241 if (!IsScheduled()) |
245 return; | 242 return; |
246 | 243 |
247 if (!deferred_messages_.empty()) { | 244 if (!deferred_messages_.empty()) { |
248 scoped_ptr<IPC::Message> message(deferred_messages_.front()); | 245 scoped_ptr<IPC::Message> message(deferred_messages_.front()); |
249 deferred_messages_.pop_front(); | 246 deferred_messages_.pop_front(); |
250 processed_get_state_fast_ = | 247 processed_get_state_fast_ = |
251 (message->type() == GpuCommandBufferMsg_GetStateFast::ID); | 248 (message->type() == GpuCommandBufferMsg_GetStateFast::ID); |
252 // Handle deferred control messages. | 249 // Handle deferred control messages. |
253 if (message->routing_id() == MSG_ROUTING_CONTROL) | 250 if (!router_.RouteMessage(*message)) { |
254 OnControlMessageReceived(*message); | |
255 else if (!router_.RouteMessage(*message)) { | |
256 // Respond to sync messages even if router failed to route. | 251 // Respond to sync messages even if router failed to route. |
257 if (message->is_sync()) { | 252 if (message->is_sync()) { |
258 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&*message); | 253 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&*message); |
259 reply->set_reply_error(); | 254 reply->set_reply_error(); |
260 Send(reply); | 255 Send(reply); |
261 } | 256 } |
262 } else { | 257 } else { |
263 // If the channel becomes unscheduled as a result of handling the message | 258 // If the channel becomes unscheduled as a result of handling the message |
264 // or has more work to do, synthesize an IPC message to flush the command | 259 // or has more work to do, synthesize an IPC message to flush the command |
265 // buffer that became unscheduled. | 260 // buffer that became unscheduled. |
(...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
367 if (need_reschedule) | 362 if (need_reschedule) |
368 OnScheduled(); | 363 OnScheduled(); |
369 DidDestroyCommandBuffer(gpu_preference); | 364 DidDestroyCommandBuffer(gpu_preference); |
370 } | 365 } |
371 #endif | 366 #endif |
372 | 367 |
373 if (reply_message) | 368 if (reply_message) |
374 Send(reply_message); | 369 Send(reply_message); |
375 } | 370 } |
376 | 371 |
377 void GpuChannel::OnEcho(const IPC::Message& message) { | |
378 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnEcho"); | |
379 Send(new IPC::Message(message)); | |
380 } | |
381 | 372 |
382 void GpuChannel::OnWillGpuSwitchOccur(bool is_creating_context, | 373 void GpuChannel::OnWillGpuSwitchOccur(bool is_creating_context, |
383 gfx::GpuPreference gpu_preference, | 374 gfx::GpuPreference gpu_preference, |
384 IPC::Message* reply_message) { | 375 IPC::Message* reply_message) { |
385 TRACE_EVENT0("gpu", "GpuChannel::OnWillGpuSwitchOccur"); | 376 TRACE_EVENT0("gpu", "GpuChannel::OnWillGpuSwitchOccur"); |
386 | 377 |
387 bool will_switch_occur = false; | 378 bool will_switch_occur = false; |
388 | 379 |
389 if (gpu_preference == gfx::PreferDiscreteGpu && | 380 if (gpu_preference == gfx::PreferDiscreteGpu && |
390 gfx::GLContext::SupportsDualGpus()) { | 381 gfx::GLContext::SupportsDualGpus()) { |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
440 | 431 |
441 #if defined(OS_POSIX) | 432 #if defined(OS_POSIX) |
442 int GpuChannel::TakeRendererFileDescriptor() { | 433 int GpuChannel::TakeRendererFileDescriptor() { |
443 if (!channel_.get()) { | 434 if (!channel_.get()) { |
444 NOTREACHED(); | 435 NOTREACHED(); |
445 return -1; | 436 return -1; |
446 } | 437 } |
447 return channel_->TakeClientFileDescriptor(); | 438 return channel_->TakeClientFileDescriptor(); |
448 } | 439 } |
449 #endif // defined(OS_POSIX) | 440 #endif // defined(OS_POSIX) |
OLD | NEW |