Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(504)

Side by Side Diff: content/common/gpu/gpu_channel.cc

Issue 7253052: Execute all GL commands up to the put offset reported by a flush. (Closed) Base URL: svn://chrome-svn/chrome/trunk/src/
Patch Set: '' Created 9 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « content/common/gpu/gpu_channel.h ('k') | content/common/gpu/gpu_command_buffer_stub.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #if defined(OS_WIN) 5 #if defined(OS_WIN)
6 #include <windows.h> 6 #include <windows.h>
7 #endif 7 #endif
8 8
9 #include "content/common/gpu/gpu_channel.h" 9 #include "content/common/gpu/gpu_channel.h"
10 10
(...skipping 15 matching lines...) Expand all
26 #endif 26 #endif
27 27
28 GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager, 28 GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager,
29 GpuWatchdog* watchdog, 29 GpuWatchdog* watchdog,
30 int renderer_id) 30 int renderer_id)
31 : gpu_channel_manager_(gpu_channel_manager), 31 : gpu_channel_manager_(gpu_channel_manager),
32 renderer_id_(renderer_id), 32 renderer_id_(renderer_id),
33 renderer_process_(base::kNullProcessHandle), 33 renderer_process_(base::kNullProcessHandle),
34 renderer_pid_(base::kNullProcessId), 34 renderer_pid_(base::kNullProcessId),
35 share_group_(new gfx::GLShareGroup), 35 share_group_(new gfx::GLShareGroup),
36 watchdog_(watchdog) { 36 watchdog_(watchdog),
37 task_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) {
37 DCHECK(gpu_channel_manager); 38 DCHECK(gpu_channel_manager);
38 DCHECK(renderer_id); 39 DCHECK(renderer_id);
39 const CommandLine* command_line = CommandLine::ForCurrentProcess(); 40 const CommandLine* command_line = CommandLine::ForCurrentProcess();
40 log_messages_ = command_line->HasSwitch(switches::kLogPluginMessages); 41 log_messages_ = command_line->HasSwitch(switches::kLogPluginMessages);
41 disallowed_extensions_.multisampling = 42 disallowed_extensions_.multisampling =
42 command_line->HasSwitch(switches::kDisableGLMultisampling); 43 command_line->HasSwitch(switches::kDisableGLMultisampling);
43 } 44 }
44 45
45 GpuChannel::~GpuChannel() { 46 GpuChannel::~GpuChannel() {
46 #if defined(OS_WIN) 47 #if defined(OS_WIN)
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
79 } 80 }
80 #endif 81 #endif
81 } 82 }
82 83
83 bool GpuChannel::OnMessageReceived(const IPC::Message& message) { 84 bool GpuChannel::OnMessageReceived(const IPC::Message& message) {
84 if (log_messages_) { 85 if (log_messages_) {
85 VLOG(1) << "received message @" << &message << " on channel @" << this 86 VLOG(1) << "received message @" << &message << " on channel @" << this
86 << " with type " << message.type(); 87 << " with type " << message.type();
87 } 88 }
88 89
90 // Control messages are not deferred and can be handled out of order with
91 // respect to routed ones.
89 if (message.routing_id() == MSG_ROUTING_CONTROL) 92 if (message.routing_id() == MSG_ROUTING_CONTROL)
90 return OnControlMessageReceived(message); 93 return OnControlMessageReceived(message);
91 94
95 // If the channel is unscheduled, defer sync and async messages until it is
96 // rescheduled. Also, even if the channel is scheduled, do not allow newly
97 // received messages to be handled before previously received deferred ones;
98 // append them to the deferred queue as well.
99 if (!IsScheduled() || !deferred_messages_.empty()) {
100 deferred_messages_.push(new IPC::Message(message));
101 return true;
102 }
103
92 if (!router_.RouteMessage(message)) { 104 if (!router_.RouteMessage(message)) {
93 // Respond to sync messages even if router failed to route. 105 // Respond to sync messages even if router failed to route.
94 if (message.is_sync()) { 106 if (message.is_sync()) {
95 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); 107 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message);
96 reply->set_reply_error(); 108 reply->set_reply_error();
97 Send(reply); 109 Send(reply);
98 } 110 }
99 return false; 111 return false;
100 } 112 }
101 113
114 // If the channel becomes unscheduled as a result of handling the message,
115 // synthesize an IPC message to flush the command buffer that became
116 // unscheduled.
117 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
118 !it.IsAtEnd();
119 it.Advance()) {
120 GpuCommandBufferStub* stub = it.GetCurrentValue();
121 if (!stub->IsScheduled()) {
122 DCHECK(deferred_messages_.empty());
123 deferred_messages_.push(new GpuCommandBufferMsg_Rescheduled(
124 stub->route_id()));
125 }
126 }
127
102 return true; 128 return true;
103 } 129 }
104 130
105 void GpuChannel::OnChannelError() { 131 void GpuChannel::OnChannelError() {
106 gpu_channel_manager_->RemoveChannel(renderer_id_); 132 gpu_channel_manager_->RemoveChannel(renderer_id_);
107 } 133 }
108 134
109 void GpuChannel::OnChannelConnected(int32 peer_pid) { 135 void GpuChannel::OnChannelConnected(int32 peer_pid) {
110 renderer_pid_ = peer_pid; 136 renderer_pid_ = peer_pid;
111 } 137 }
112 138
113 bool GpuChannel::Send(IPC::Message* message) { 139 bool GpuChannel::Send(IPC::Message* message) {
114 // The GPU process must never send a synchronous IPC message to the renderer 140 // The GPU process must never send a synchronous IPC message to the renderer
115 // process. This could result in deadlock. 141 // process. This could result in deadlock.
116 DCHECK(!message->is_sync()); 142 DCHECK(!message->is_sync());
117 if (log_messages_) { 143 if (log_messages_) {
118 VLOG(1) << "sending message @" << message << " on channel @" << this 144 VLOG(1) << "sending message @" << message << " on channel @" << this
119 << " with type " << message->type(); 145 << " with type " << message->type();
120 } 146 }
121 147
122 if (!channel_.get()) { 148 if (!channel_.get()) {
123 delete message; 149 delete message;
124 return false; 150 return false;
125 } 151 }
126 152
127 return channel_->Send(message); 153 return channel_->Send(message);
128 } 154 }
129 155
156 bool GpuChannel::IsScheduled() {
157 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
158 !it.IsAtEnd();
159 it.Advance()) {
160 GpuCommandBufferStub* stub = it.GetCurrentValue();
161 if (!stub->IsScheduled())
162 return false;
163 }
164
165 return true;
166 }
167
168 void GpuChannel::OnScheduled() {
169 // Post a task to handle any deferred messages. The deferred message queue is
170 // not emptied here, which ensures that OnMessageReceived will continue to
171 // defer newly received messages until the ones in the queue have all been
172 // handled by HandleDeferredMessages. HandleDeferredMessages is invoked as a
173 // task to prevent reentrancy.
174 MessageLoop::current()->PostTask(
175 FROM_HERE,
176 task_factory_.NewRunnableMethod(
177 &GpuChannel::HandleDeferredMessages));
178 }
179
130 void GpuChannel::LoseAllContexts() { 180 void GpuChannel::LoseAllContexts() {
131 gpu_channel_manager_->LoseAllContexts(); 181 gpu_channel_manager_->LoseAllContexts();
132 } 182 }
133 183
134 void GpuChannel::DestroySoon() { 184 void GpuChannel::DestroySoon() {
135 MessageLoop::current()->PostTask( 185 MessageLoop::current()->PostTask(
136 FROM_HERE, NewRunnableMethod(this, 186 FROM_HERE, NewRunnableMethod(this,
137 &GpuChannel::OnDestroy)); 187 &GpuChannel::OnDestroy));
138 } 188 }
139 189
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after
211 #endif 261 #endif
212 262
213 #if defined(OS_MACOSX) 263 #if defined(OS_MACOSX)
214 void GpuChannel::DestroyCommandBufferByViewId(int32 render_view_id) { 264 void GpuChannel::DestroyCommandBufferByViewId(int32 render_view_id) {
215 // This responds to a message from the browser process to destroy the command 265 // This responds to a message from the browser process to destroy the command
216 // buffer when the window with a GpuScheduler is closed (see 266 // buffer when the window with a GpuScheduler is closed (see
217 // RenderWidgetHostViewMac::DeallocFakePluginWindowHandle). Find the route id 267 // RenderWidgetHostViewMac::DeallocFakePluginWindowHandle). Find the route id
218 // that matches the given render_view_id and delete the route. 268 // that matches the given render_view_id and delete the route.
219 for (StubMap::const_iterator iter(&stubs_); !iter.IsAtEnd(); iter.Advance()) { 269 for (StubMap::const_iterator iter(&stubs_); !iter.IsAtEnd(); iter.Advance()) {
220 if (iter.GetCurrentValue()->render_view_id() == render_view_id) { 270 if (iter.GetCurrentValue()->render_view_id() == render_view_id) {
221 OnDestroyCommandBuffer(iter.GetCurrentKey()); 271 OnDestroyCommandBuffer(iter.GetCurrentKey(), NULL);
222 return; 272 return;
223 } 273 }
224 } 274 }
225 } 275 }
226 #endif 276 #endif
227 277
228 bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) { 278 bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) {
279 // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers
280 // here. This is so the reply can be delayed if the scheduler is unscheduled.
229 bool handled = true; 281 bool handled = true;
230 IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg) 282 IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg)
231 IPC_MESSAGE_HANDLER(GpuChannelMsg_Initialize, OnInitialize) 283 IPC_MESSAGE_HANDLER(GpuChannelMsg_Initialize, OnInitialize)
232 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenCommandBuffer, 284 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuChannelMsg_CreateOffscreenCommandBuffer,
233 OnCreateOffscreenCommandBuffer) 285 OnCreateOffscreenCommandBuffer)
234 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer, 286 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuChannelMsg_DestroyCommandBuffer,
235 OnDestroyCommandBuffer) 287 OnDestroyCommandBuffer)
236 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenSurface, 288 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuChannelMsg_CreateOffscreenSurface,
237 OnCreateOffscreenSurface) 289 OnCreateOffscreenSurface)
238 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroySurface, OnDestroySurface) 290 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroySurface, OnDestroySurface)
239 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateTransportTexture, 291 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateTransportTexture,
240 OnCreateTransportTexture) 292 OnCreateTransportTexture)
241 IPC_MESSAGE_UNHANDLED(handled = false) 293 IPC_MESSAGE_UNHANDLED(handled = false)
242 IPC_END_MESSAGE_MAP() 294 IPC_END_MESSAGE_MAP()
243 DCHECK(handled) << msg.type(); 295 DCHECK(handled) << msg.type();
244 return handled; 296 return handled;
245 } 297 }
246 298
299 void GpuChannel::HandleDeferredMessages() {
300 // Empty the deferred queue so OnMessageRecieved does not defer on that
301 // account and to prevent an infinite loop if the scheduler is unscheduled
302 // as a result of handling already deferred messages.
303 std::queue<IPC::Message*> deferred_messages_copy;
304 std::swap(deferred_messages_copy, deferred_messages_);
305
306 while (!deferred_messages_copy.empty()) {
307 scoped_ptr<IPC::Message> message(deferred_messages_copy.front());
308 deferred_messages_copy.pop();
309
310 OnMessageReceived(*message);
311 }
312 }
313
247 int GpuChannel::GenerateRouteID() { 314 int GpuChannel::GenerateRouteID() {
248 static int last_id = 0; 315 static int last_id = 0;
249 return ++last_id; 316 return ++last_id;
250 } 317 }
251 318
252 void GpuChannel::OnInitialize(base::ProcessHandle renderer_process) { 319 void GpuChannel::OnInitialize(base::ProcessHandle renderer_process) {
253 // Initialize should only happen once. 320 // Initialize should only happen once.
254 DCHECK(!renderer_process_); 321 DCHECK(!renderer_process_);
255 322
256 // Verify that the renderer has passed its own process handle. 323 // Verify that the renderer has passed its own process handle.
257 if (base::GetProcId(renderer_process) == renderer_pid_) 324 if (base::GetProcId(renderer_process) == renderer_pid_)
258 renderer_process_ = renderer_process; 325 renderer_process_ = renderer_process;
259 } 326 }
260 327
261 void GpuChannel::OnCreateOffscreenCommandBuffer( 328 void GpuChannel::OnCreateOffscreenCommandBuffer(
262 const gfx::Size& size, 329 const gfx::Size& size,
263 const GPUCreateCommandBufferConfig& init_params, 330 const GPUCreateCommandBufferConfig& init_params,
264 int32* route_id) { 331 IPC::Message* reply_message) {
332 int32 route_id = MSG_ROUTING_NONE;
333
265 content::GetContentClient()->SetActiveURL(init_params.active_url); 334 content::GetContentClient()->SetActiveURL(init_params.active_url);
266 #if defined(ENABLE_GPU) 335 #if defined(ENABLE_GPU)
267 *route_id = GenerateRouteID(); 336 route_id = GenerateRouteID();
268 337
269 scoped_ptr<GpuCommandBufferStub> stub(new GpuCommandBufferStub( 338 scoped_ptr<GpuCommandBufferStub> stub(new GpuCommandBufferStub(
270 this, 339 this,
271 gfx::kNullPluginWindow, 340 gfx::kNullPluginWindow,
272 size, 341 size,
273 disallowed_extensions_, 342 disallowed_extensions_,
274 init_params.allowed_extensions, 343 init_params.allowed_extensions,
275 init_params.attribs, 344 init_params.attribs,
276 *route_id, 345 route_id,
277 0, 0, watchdog_)); 346 0, 0, watchdog_));
278 router_.AddRoute(*route_id, stub.get()); 347 router_.AddRoute(route_id, stub.get());
279 stubs_.AddWithID(stub.release(), *route_id); 348 stubs_.AddWithID(stub.release(), route_id);
280 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer", 349 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer",
281 "route_id", route_id); 350 "route_id", route_id);
282 #else
283 *route_id = MSG_ROUTING_NONE;
284 #endif 351 #endif
352
353 GpuChannelMsg_CreateOffscreenCommandBuffer::WriteReplyParams(
354 reply_message,
355 route_id);
356 Send(reply_message);
285 } 357 }
286 358
287 void GpuChannel::OnDestroyCommandBuffer(int32 route_id) { 359 void GpuChannel::OnDestroyCommandBuffer(int32 route_id,
360 IPC::Message* reply_message) {
288 #if defined(ENABLE_GPU) 361 #if defined(ENABLE_GPU)
289 TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer", 362 TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer",
290 "route_id", route_id); 363 "route_id", route_id);
291 if (router_.ResolveRoute(route_id)) { 364 if (router_.ResolveRoute(route_id)) {
292 GpuCommandBufferStub* stub = stubs_.Lookup(route_id);
293 // In case the renderer is currently blocked waiting for a sync reply from
294 // the stub, allow the stub to clean up and unblock pending messages here:
295 if (stub != NULL)
296 stub->CommandBufferWasDestroyed();
297 router_.RemoveRoute(route_id); 365 router_.RemoveRoute(route_id);
298 stubs_.Remove(route_id); 366 stubs_.Remove(route_id);
299 } 367 }
300 #endif 368 #endif
369
370 if (reply_message)
371 Send(reply_message);
301 } 372 }
302 373
303 void GpuChannel::OnCreateOffscreenSurface(const gfx::Size& size, 374 void GpuChannel::OnCreateOffscreenSurface(const gfx::Size& size,
304 int* route_id) { 375 IPC::Message* reply_message) {
305 *route_id = MSG_ROUTING_NONE; 376 int route_id = MSG_ROUTING_NONE;
306 377
307 #if defined(ENABLE_GPU) 378 #if defined(ENABLE_GPU)
308 scoped_refptr<gfx::GLSurface> surface( 379 scoped_refptr<gfx::GLSurface> surface(
309 gfx::GLSurface::CreateOffscreenGLSurface(size)); 380 gfx::GLSurface::CreateOffscreenGLSurface(size));
310 if (!surface.get()) 381 if (!surface.get())
311 return; 382 return;
312 383
313 *route_id = GenerateRouteID(); 384 route_id = GenerateRouteID();
314 385
315 scoped_ptr<GpuSurfaceStub> stub (new GpuSurfaceStub(this, 386 scoped_ptr<GpuSurfaceStub> stub (new GpuSurfaceStub(this,
316 *route_id, 387 route_id,
317 surface.release())); 388 surface.release()));
318 389
319 router_.AddRoute(*route_id, stub.get()); 390 router_.AddRoute(route_id, stub.get());
320 surfaces_.AddWithID(stub.release(), *route_id); 391 surfaces_.AddWithID(stub.release(), route_id);
321 #endif 392 #endif
393
394 GpuChannelMsg_CreateOffscreenSurface::WriteReplyParams(reply_message,
395 route_id);
396 Send(reply_message);
322 } 397 }
323 398
324 void GpuChannel::OnDestroySurface(int route_id) { 399 void GpuChannel::OnDestroySurface(int route_id) {
325 #if defined(ENABLE_GPU) 400 #if defined(ENABLE_GPU)
326 if (router_.ResolveRoute(route_id)) { 401 if (router_.ResolveRoute(route_id)) {
327 router_.RemoveRoute(route_id); 402 router_.RemoveRoute(route_id);
328 surfaces_.Remove(route_id); 403 surfaces_.Remove(route_id);
329 } 404 }
330 #endif 405 #endif
331 } 406 }
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
373 448
374 #if defined(OS_POSIX) 449 #if defined(OS_POSIX)
375 int GpuChannel::GetRendererFileDescriptor() { 450 int GpuChannel::GetRendererFileDescriptor() {
376 int fd = -1; 451 int fd = -1;
377 if (channel_.get()) { 452 if (channel_.get()) {
378 fd = channel_->GetClientFileDescriptor(); 453 fd = channel_->GetClientFileDescriptor();
379 } 454 }
380 return fd; 455 return fd;
381 } 456 }
382 #endif // defined(OS_POSIX) 457 #endif // defined(OS_POSIX)
OLDNEW
« no previous file with comments | « content/common/gpu/gpu_channel.h ('k') | content/common/gpu/gpu_command_buffer_stub.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698