Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(43)

Side by Side Diff: content/common/gpu/gpu_channel.cc

Issue 7253052: Execute all GL commands up to the put offset reported by a flush. (Closed) Base URL: svn://chrome-svn/chrome/trunk/src/
Patch Set: '' Created 9 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #if defined(OS_WIN) 5 #if defined(OS_WIN)
6 #include <windows.h> 6 #include <windows.h>
7 #endif 7 #endif
8 8
9 #include "content/common/gpu/gpu_channel.h" 9 #include "content/common/gpu/gpu_channel.h"
10 10
(...skipping 15 matching lines...) Expand all
26 #endif 26 #endif
27 27
28 GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager, 28 GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager,
29 GpuWatchdog* watchdog, 29 GpuWatchdog* watchdog,
30 int renderer_id) 30 int renderer_id)
31 : gpu_channel_manager_(gpu_channel_manager), 31 : gpu_channel_manager_(gpu_channel_manager),
32 renderer_id_(renderer_id), 32 renderer_id_(renderer_id),
33 renderer_process_(base::kNullProcessHandle), 33 renderer_process_(base::kNullProcessHandle),
34 renderer_pid_(base::kNullProcessId), 34 renderer_pid_(base::kNullProcessId),
35 share_group_(new gfx::GLShareGroup), 35 share_group_(new gfx::GLShareGroup),
36 watchdog_(watchdog) { 36 watchdog_(watchdog),
37 task_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) {
37 DCHECK(gpu_channel_manager); 38 DCHECK(gpu_channel_manager);
38 DCHECK(renderer_id); 39 DCHECK(renderer_id);
39 const CommandLine* command_line = CommandLine::ForCurrentProcess(); 40 const CommandLine* command_line = CommandLine::ForCurrentProcess();
40 log_messages_ = command_line->HasSwitch(switches::kLogPluginMessages); 41 log_messages_ = command_line->HasSwitch(switches::kLogPluginMessages);
41 disallowed_extensions_.multisampling = 42 disallowed_extensions_.multisampling =
42 command_line->HasSwitch(switches::kDisableGLMultisampling); 43 command_line->HasSwitch(switches::kDisableGLMultisampling);
43 } 44 }
44 45
45 GpuChannel::~GpuChannel() { 46 GpuChannel::~GpuChannel() {
46 #if defined(OS_WIN) 47 #if defined(OS_WIN)
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
79 } 80 }
80 #endif 81 #endif
81 } 82 }
82 83
83 bool GpuChannel::OnMessageReceived(const IPC::Message& message) { 84 bool GpuChannel::OnMessageReceived(const IPC::Message& message) {
84 if (log_messages_) { 85 if (log_messages_) {
85 VLOG(1) << "received message @" << &message << " on channel @" << this 86 VLOG(1) << "received message @" << &message << " on channel @" << this
86 << " with type " << message.type(); 87 << " with type " << message.type();
87 } 88 }
88 89
90 // Control messages are not deferred and can be handled out of order with
91 // respect to routed ones.
89 if (message.routing_id() == MSG_ROUTING_CONTROL) 92 if (message.routing_id() == MSG_ROUTING_CONTROL)
90 return OnControlMessageReceived(message); 93 return OnControlMessageReceived(message);
91 94
95 // If the channel is unscheduled, defer sync and async messages until it is
96 // rescheduled. Also, even if the channel is scheduled, do not allow newly
97 // received messages to be handled before previously received deferred ones;
98 // append them to the deferred queue as well.
99 if (!IsScheduled() || !deferred_messages_.empty()) {
100 deferred_messages_.push(new IPC::Message(message));
101 return true;
102 }
103
92 if (!router_.RouteMessage(message)) { 104 if (!router_.RouteMessage(message)) {
93 // Respond to sync messages even if router failed to route. 105 // Respond to sync messages even if router failed to route.
94 if (message.is_sync()) { 106 if (message.is_sync()) {
95 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); 107 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message);
96 reply->set_reply_error(); 108 reply->set_reply_error();
97 Send(reply); 109 Send(reply);
98 } 110 }
99 return false; 111 return false;
100 } 112 }
101 113
(...skipping 18 matching lines...) Expand all
120 } 132 }
121 133
122 if (!channel_.get()) { 134 if (!channel_.get()) {
123 delete message; 135 delete message;
124 return false; 136 return false;
125 } 137 }
126 138
127 return channel_->Send(message); 139 return channel_->Send(message);
128 } 140 }
129 141
142 bool GpuChannel::IsScheduled() {
143 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
144 !it.IsAtEnd();
145 it.Advance()) {
146 GpuCommandBufferStub* stub = it.GetCurrentValue();
147 if (!stub->IsScheduled())
148 return false;
149 }
150
151 return true;
152 }
153
154 void GpuChannel::OnScheduled() {
155 // Post a task to handle any deferred messages. The deferred message queue is
156 // not emptied here, which ensures that OnMessageReceived will continue to
157 // defer newly received messages until the ones in the queue have all been
158 // handled by HandleDeferredMessages. HandleDeferredMessages is invoked as a
159 // task to prevent reentrancy.
160 MessageLoop::current()->PostTask(
161 FROM_HERE,
162 task_factory_.NewRunnableMethod(
163 &GpuChannel::HandleDeferredMessages));
164 }
165
130 void GpuChannel::LoseAllContexts() { 166 void GpuChannel::LoseAllContexts() {
131 gpu_channel_manager_->LoseAllContexts(); 167 gpu_channel_manager_->LoseAllContexts();
132 } 168 }
133 169
134 void GpuChannel::DestroySoon() { 170 void GpuChannel::DestroySoon() {
135 MessageLoop::current()->PostTask( 171 MessageLoop::current()->PostTask(
136 FROM_HERE, NewRunnableMethod(this, 172 FROM_HERE, NewRunnableMethod(this,
137 &GpuChannel::OnDestroy)); 173 &GpuChannel::OnDestroy));
138 } 174 }
139 175
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after
211 #endif 247 #endif
212 248
213 #if defined(OS_MACOSX) 249 #if defined(OS_MACOSX)
214 void GpuChannel::DestroyCommandBufferByViewId(int32 render_view_id) { 250 void GpuChannel::DestroyCommandBufferByViewId(int32 render_view_id) {
215 // This responds to a message from the browser process to destroy the command 251 // This responds to a message from the browser process to destroy the command
216 // buffer when the window with a GpuScheduler is closed (see 252 // buffer when the window with a GpuScheduler is closed (see
217 // RenderWidgetHostViewMac::DeallocFakePluginWindowHandle). Find the route id 253 // RenderWidgetHostViewMac::DeallocFakePluginWindowHandle). Find the route id
218 // that matches the given render_view_id and delete the route. 254 // that matches the given render_view_id and delete the route.
219 for (StubMap::const_iterator iter(&stubs_); !iter.IsAtEnd(); iter.Advance()) { 255 for (StubMap::const_iterator iter(&stubs_); !iter.IsAtEnd(); iter.Advance()) {
220 if (iter.GetCurrentValue()->render_view_id() == render_view_id) { 256 if (iter.GetCurrentValue()->render_view_id() == render_view_id) {
221 OnDestroyCommandBuffer(iter.GetCurrentKey()); 257 OnDestroyCommandBuffer(iter.GetCurrentKey(), NULL);
222 return; 258 return;
223 } 259 }
224 } 260 }
225 } 261 }
226 #endif 262 #endif
227 263
228 bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) { 264 bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) {
265 // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers
266 // here. This is so the reply can be delayed if the scheduler is unscheduled.
229 bool handled = true; 267 bool handled = true;
230 IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg) 268 IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg)
231 IPC_MESSAGE_HANDLER(GpuChannelMsg_Initialize, OnInitialize) 269 IPC_MESSAGE_HANDLER(GpuChannelMsg_Initialize, OnInitialize)
232 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenCommandBuffer, 270 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuChannelMsg_CreateOffscreenCommandBuffer,
233 OnCreateOffscreenCommandBuffer) 271 OnCreateOffscreenCommandBuffer)
234 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer, 272 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuChannelMsg_DestroyCommandBuffer,
235 OnDestroyCommandBuffer) 273 OnDestroyCommandBuffer)
236 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenSurface, 274 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuChannelMsg_CreateOffscreenSurface,
237 OnCreateOffscreenSurface) 275 OnCreateOffscreenSurface)
238 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroySurface, OnDestroySurface) 276 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroySurface, OnDestroySurface)
239 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateTransportTexture, 277 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateTransportTexture,
240 OnCreateTransportTexture) 278 OnCreateTransportTexture)
241 IPC_MESSAGE_UNHANDLED(handled = false) 279 IPC_MESSAGE_UNHANDLED(handled = false)
242 IPC_END_MESSAGE_MAP() 280 IPC_END_MESSAGE_MAP()
243 DCHECK(handled) << msg.type(); 281 DCHECK(handled) << msg.type();
244 return handled; 282 return handled;
245 } 283 }
246 284
285 void GpuChannel::HandleDeferredMessages() {
286 // Empty the deferred queue so OnMessageRecieved does not defer on that
287 // account and to prevent an infinite loop if the scheduler is unscheduled
288 // as a result of handling already deferred messages.
289 std::queue<IPC::Message*> deferred_messages_copy;
290 std::swap(deferred_messages_copy, deferred_messages_);
291
292 while (!deferred_messages_copy.empty()) {
293 scoped_ptr<IPC::Message> message(deferred_messages_copy.front());
294 deferred_messages_copy.pop();
295
296 OnMessageReceived(*message);
297 }
298 }
299
247 int GpuChannel::GenerateRouteID() { 300 int GpuChannel::GenerateRouteID() {
248 static int last_id = 0; 301 static int last_id = 0;
249 return ++last_id; 302 return ++last_id;
250 } 303 }
251 304
252 void GpuChannel::OnInitialize(base::ProcessHandle renderer_process) { 305 void GpuChannel::OnInitialize(base::ProcessHandle renderer_process) {
253 // Initialize should only happen once. 306 // Initialize should only happen once.
254 DCHECK(!renderer_process_); 307 DCHECK(!renderer_process_);
255 308
256 // Verify that the renderer has passed its own process handle. 309 // Verify that the renderer has passed its own process handle.
257 if (base::GetProcId(renderer_process) == renderer_pid_) 310 if (base::GetProcId(renderer_process) == renderer_pid_)
258 renderer_process_ = renderer_process; 311 renderer_process_ = renderer_process;
259 } 312 }
260 313
261 void GpuChannel::OnCreateOffscreenCommandBuffer( 314 void GpuChannel::OnCreateOffscreenCommandBuffer(
262 const gfx::Size& size, 315 const gfx::Size& size,
263 const GPUCreateCommandBufferConfig& init_params, 316 const GPUCreateCommandBufferConfig& init_params,
264 int32* route_id) { 317 IPC::Message* reply_message) {
318 int32 route_id = MSG_ROUTING_NONE;
319
265 content::GetContentClient()->SetActiveURL(init_params.active_url); 320 content::GetContentClient()->SetActiveURL(init_params.active_url);
266 #if defined(ENABLE_GPU) 321 #if defined(ENABLE_GPU)
267 *route_id = GenerateRouteID(); 322 route_id = GenerateRouteID();
268 323
269 scoped_ptr<GpuCommandBufferStub> stub(new GpuCommandBufferStub( 324 scoped_ptr<GpuCommandBufferStub> stub(new GpuCommandBufferStub(
270 this, 325 this,
271 gfx::kNullPluginWindow, 326 gfx::kNullPluginWindow,
272 size, 327 size,
273 disallowed_extensions_, 328 disallowed_extensions_,
274 init_params.allowed_extensions, 329 init_params.allowed_extensions,
275 init_params.attribs, 330 init_params.attribs,
276 *route_id, 331 route_id,
277 0, 0, watchdog_)); 332 0, 0, watchdog_));
278 router_.AddRoute(*route_id, stub.get()); 333 router_.AddRoute(route_id, stub.get());
279 stubs_.AddWithID(stub.release(), *route_id); 334 stubs_.AddWithID(stub.release(), route_id);
280 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer", 335 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer",
281 "route_id", route_id); 336 "route_id", route_id);
282 #else
283 *route_id = MSG_ROUTING_NONE;
284 #endif 337 #endif
338
339 GpuChannelMsg_CreateOffscreenCommandBuffer::WriteReplyParams(
340 reply_message,
341 route_id);
342 Send(reply_message);
285 } 343 }
286 344
287 void GpuChannel::OnDestroyCommandBuffer(int32 route_id) { 345 void GpuChannel::OnDestroyCommandBuffer(int32 route_id,
346 IPC::Message* reply_message) {
288 #if defined(ENABLE_GPU) 347 #if defined(ENABLE_GPU)
289 TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer", 348 TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer",
290 "route_id", route_id); 349 "route_id", route_id);
291 if (router_.ResolveRoute(route_id)) { 350 if (router_.ResolveRoute(route_id)) {
292 GpuCommandBufferStub* stub = stubs_.Lookup(route_id);
293 // In case the renderer is currently blocked waiting for a sync reply from
294 // the stub, allow the stub to clean up and unblock pending messages here:
295 if (stub != NULL)
296 stub->CommandBufferWasDestroyed();
297 router_.RemoveRoute(route_id); 351 router_.RemoveRoute(route_id);
298 stubs_.Remove(route_id); 352 stubs_.Remove(route_id);
299 } 353 }
300 #endif 354 #endif
355
356 if (reply_message)
357 Send(reply_message);
301 } 358 }
302 359
303 void GpuChannel::OnCreateOffscreenSurface(const gfx::Size& size, 360 void GpuChannel::OnCreateOffscreenSurface(const gfx::Size& size,
304 int* route_id) { 361 IPC::Message* reply_message) {
305 *route_id = MSG_ROUTING_NONE; 362 int route_id = MSG_ROUTING_NONE;
306 363
307 #if defined(ENABLE_GPU) 364 #if defined(ENABLE_GPU)
308 scoped_refptr<gfx::GLSurface> surface( 365 scoped_refptr<gfx::GLSurface> surface(
309 gfx::GLSurface::CreateOffscreenGLSurface(size)); 366 gfx::GLSurface::CreateOffscreenGLSurface(size));
310 if (!surface.get()) 367 if (!surface.get())
311 return; 368 return;
312 369
313 *route_id = GenerateRouteID(); 370 route_id = GenerateRouteID();
314 371
315 scoped_ptr<GpuSurfaceStub> stub (new GpuSurfaceStub(this, 372 scoped_ptr<GpuSurfaceStub> stub (new GpuSurfaceStub(this,
316 *route_id, 373 route_id,
317 surface.release())); 374 surface.release()));
318 375
319 router_.AddRoute(*route_id, stub.get()); 376 router_.AddRoute(route_id, stub.get());
320 surfaces_.AddWithID(stub.release(), *route_id); 377 surfaces_.AddWithID(stub.release(), route_id);
321 #endif 378 #endif
379
380 GpuChannelMsg_CreateOffscreenSurface::WriteReplyParams(reply_message,
381 route_id);
382 Send(reply_message);
322 } 383 }
323 384
324 void GpuChannel::OnDestroySurface(int route_id) { 385 void GpuChannel::OnDestroySurface(int route_id) {
325 #if defined(ENABLE_GPU) 386 #if defined(ENABLE_GPU)
326 if (router_.ResolveRoute(route_id)) { 387 if (router_.ResolveRoute(route_id)) {
327 router_.RemoveRoute(route_id); 388 router_.RemoveRoute(route_id);
328 surfaces_.Remove(route_id); 389 surfaces_.Remove(route_id);
329 } 390 }
330 #endif 391 #endif
331 } 392 }
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
373 434
374 #if defined(OS_POSIX) 435 #if defined(OS_POSIX)
375 int GpuChannel::GetRendererFileDescriptor() { 436 int GpuChannel::GetRendererFileDescriptor() {
376 int fd = -1; 437 int fd = -1;
377 if (channel_.get()) { 438 if (channel_.get()) {
378 fd = channel_->GetClientFileDescriptor(); 439 fd = channel_->GetClientFileDescriptor();
379 } 440 }
380 return fd; 441 return fd;
381 } 442 }
382 #endif // defined(OS_POSIX) 443 #endif // defined(OS_POSIX)
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698