Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1098)

Side by Side Diff: content/common/gpu/gpu_channel.cc

Issue 7458010: Revert 93066 - Execute all GL commands up to the put offset reported by a each flush.This means g... (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: '' Created 9 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « content/common/gpu/gpu_channel.h ('k') | content/common/gpu/gpu_command_buffer_stub.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #if defined(OS_WIN) 5 #if defined(OS_WIN)
6 #include <windows.h> 6 #include <windows.h>
7 #endif 7 #endif
8 8
9 #include "content/common/gpu/gpu_channel.h" 9 #include "content/common/gpu/gpu_channel.h"
10 10
(...skipping 15 matching lines...) Expand all
26 #endif 26 #endif
27 27
28 GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager, 28 GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager,
29 GpuWatchdog* watchdog, 29 GpuWatchdog* watchdog,
30 int renderer_id) 30 int renderer_id)
31 : gpu_channel_manager_(gpu_channel_manager), 31 : gpu_channel_manager_(gpu_channel_manager),
32 renderer_id_(renderer_id), 32 renderer_id_(renderer_id),
33 renderer_process_(base::kNullProcessHandle), 33 renderer_process_(base::kNullProcessHandle),
34 renderer_pid_(base::kNullProcessId), 34 renderer_pid_(base::kNullProcessId),
35 share_group_(new gfx::GLShareGroup), 35 share_group_(new gfx::GLShareGroup),
36 watchdog_(watchdog), 36 watchdog_(watchdog) {
37 task_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) {
38 DCHECK(gpu_channel_manager); 37 DCHECK(gpu_channel_manager);
39 DCHECK(renderer_id); 38 DCHECK(renderer_id);
40 const CommandLine* command_line = CommandLine::ForCurrentProcess(); 39 const CommandLine* command_line = CommandLine::ForCurrentProcess();
41 log_messages_ = command_line->HasSwitch(switches::kLogPluginMessages); 40 log_messages_ = command_line->HasSwitch(switches::kLogPluginMessages);
42 disallowed_extensions_.multisampling = 41 disallowed_extensions_.multisampling =
43 command_line->HasSwitch(switches::kDisableGLMultisampling); 42 command_line->HasSwitch(switches::kDisableGLMultisampling);
44 } 43 }
45 44
46 GpuChannel::~GpuChannel() { 45 GpuChannel::~GpuChannel() {
47 #if defined(OS_WIN) 46 #if defined(OS_WIN)
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
80 } 79 }
81 #endif 80 #endif
82 } 81 }
83 82
84 bool GpuChannel::OnMessageReceived(const IPC::Message& message) { 83 bool GpuChannel::OnMessageReceived(const IPC::Message& message) {
85 if (log_messages_) { 84 if (log_messages_) {
86 VLOG(1) << "received message @" << &message << " on channel @" << this 85 VLOG(1) << "received message @" << &message << " on channel @" << this
87 << " with type " << message.type(); 86 << " with type " << message.type();
88 } 87 }
89 88
90 // Control messages are not deferred and can be handled out of order with
91 // respect to routed ones.
92 if (message.routing_id() == MSG_ROUTING_CONTROL) 89 if (message.routing_id() == MSG_ROUTING_CONTROL)
93 return OnControlMessageReceived(message); 90 return OnControlMessageReceived(message);
94 91
95 // If the channel is unscheduled, defer sync and async messages until it is
96 // rescheduled. Also, even if the channel is scheduled, do not allow newly
97 // received messages to be handled before previously received deferred ones;
98 // append them to the deferred queue as well.
99 if (!IsScheduled() || !deferred_messages_.empty()) {
100 deferred_messages_.push(new IPC::Message(message));
101 return true;
102 }
103
104 if (!router_.RouteMessage(message)) { 92 if (!router_.RouteMessage(message)) {
105 // Respond to sync messages even if router failed to route. 93 // Respond to sync messages even if router failed to route.
106 if (message.is_sync()) { 94 if (message.is_sync()) {
107 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); 95 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message);
108 reply->set_reply_error(); 96 reply->set_reply_error();
109 Send(reply); 97 Send(reply);
110 } 98 }
111 return false; 99 return false;
112 } 100 }
113 101
114 // If the channel becomes unscheduled as a result of handling the message,
115 // synthesize an IPC message to flush the command buffer that became
116 // unscheduled.
117 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
118 !it.IsAtEnd();
119 it.Advance()) {
120 GpuCommandBufferStub* stub = it.GetCurrentValue();
121 if (!stub->IsScheduled()) {
122 DCHECK(deferred_messages_.empty());
123 deferred_messages_.push(new GpuCommandBufferMsg_Rescheduled(
124 stub->route_id()));
125 }
126 }
127
128 return true; 102 return true;
129 } 103 }
130 104
131 void GpuChannel::OnChannelError() { 105 void GpuChannel::OnChannelError() {
132 gpu_channel_manager_->RemoveChannel(renderer_id_); 106 gpu_channel_manager_->RemoveChannel(renderer_id_);
133 } 107 }
134 108
135 void GpuChannel::OnChannelConnected(int32 peer_pid) { 109 void GpuChannel::OnChannelConnected(int32 peer_pid) {
136 renderer_pid_ = peer_pid; 110 renderer_pid_ = peer_pid;
137 } 111 }
138 112
139 bool GpuChannel::Send(IPC::Message* message) { 113 bool GpuChannel::Send(IPC::Message* message) {
140 // The GPU process must never send a synchronous IPC message to the renderer 114 // The GPU process must never send a synchronous IPC message to the renderer
141 // process. This could result in deadlock. 115 // process. This could result in deadlock.
142 DCHECK(!message->is_sync()); 116 DCHECK(!message->is_sync());
143 if (log_messages_) { 117 if (log_messages_) {
144 VLOG(1) << "sending message @" << message << " on channel @" << this 118 VLOG(1) << "sending message @" << message << " on channel @" << this
145 << " with type " << message->type(); 119 << " with type " << message->type();
146 } 120 }
147 121
148 if (!channel_.get()) { 122 if (!channel_.get()) {
149 delete message; 123 delete message;
150 return false; 124 return false;
151 } 125 }
152 126
153 return channel_->Send(message); 127 return channel_->Send(message);
154 } 128 }
155 129
156 bool GpuChannel::IsScheduled() {
157 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
158 !it.IsAtEnd();
159 it.Advance()) {
160 GpuCommandBufferStub* stub = it.GetCurrentValue();
161 if (!stub->IsScheduled())
162 return false;
163 }
164
165 return true;
166 }
167
168 void GpuChannel::OnScheduled() {
169 // Post a task to handle any deferred messages. The deferred message queue is
170 // not emptied here, which ensures that OnMessageReceived will continue to
171 // defer newly received messages until the ones in the queue have all been
172 // handled by HandleDeferredMessages. HandleDeferredMessages is invoked as a
173 // task to prevent reentrancy.
174 MessageLoop::current()->PostTask(
175 FROM_HERE,
176 task_factory_.NewRunnableMethod(
177 &GpuChannel::HandleDeferredMessages));
178 }
179
180 void GpuChannel::LoseAllContexts() { 130 void GpuChannel::LoseAllContexts() {
181 gpu_channel_manager_->LoseAllContexts(); 131 gpu_channel_manager_->LoseAllContexts();
182 } 132 }
183 133
184 void GpuChannel::DestroySoon() { 134 void GpuChannel::DestroySoon() {
185 MessageLoop::current()->PostTask( 135 MessageLoop::current()->PostTask(
186 FROM_HERE, NewRunnableMethod(this, 136 FROM_HERE, NewRunnableMethod(this,
187 &GpuChannel::OnDestroy)); 137 &GpuChannel::OnDestroy));
188 } 138 }
189 139
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after
261 #endif 211 #endif
262 212
263 #if defined(OS_MACOSX) 213 #if defined(OS_MACOSX)
264 void GpuChannel::DestroyCommandBufferByViewId(int32 render_view_id) { 214 void GpuChannel::DestroyCommandBufferByViewId(int32 render_view_id) {
265 // This responds to a message from the browser process to destroy the command 215 // This responds to a message from the browser process to destroy the command
266 // buffer when the window with a GpuScheduler is closed (see 216 // buffer when the window with a GpuScheduler is closed (see
267 // RenderWidgetHostViewMac::DeallocFakePluginWindowHandle). Find the route id 217 // RenderWidgetHostViewMac::DeallocFakePluginWindowHandle). Find the route id
268 // that matches the given render_view_id and delete the route. 218 // that matches the given render_view_id and delete the route.
269 for (StubMap::const_iterator iter(&stubs_); !iter.IsAtEnd(); iter.Advance()) { 219 for (StubMap::const_iterator iter(&stubs_); !iter.IsAtEnd(); iter.Advance()) {
270 if (iter.GetCurrentValue()->render_view_id() == render_view_id) { 220 if (iter.GetCurrentValue()->render_view_id() == render_view_id) {
271 OnDestroyCommandBuffer(iter.GetCurrentKey(), NULL); 221 OnDestroyCommandBuffer(iter.GetCurrentKey());
272 return; 222 return;
273 } 223 }
274 } 224 }
275 } 225 }
276 #endif 226 #endif
277 227
278 bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) { 228 bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) {
279 // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers
280 // here. This is so the reply can be delayed if the scheduler is unscheduled.
281 bool handled = true; 229 bool handled = true;
282 IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg) 230 IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg)
283 IPC_MESSAGE_HANDLER(GpuChannelMsg_Initialize, OnInitialize) 231 IPC_MESSAGE_HANDLER(GpuChannelMsg_Initialize, OnInitialize)
284 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuChannelMsg_CreateOffscreenCommandBuffer, 232 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenCommandBuffer,
285 OnCreateOffscreenCommandBuffer) 233 OnCreateOffscreenCommandBuffer)
286 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuChannelMsg_DestroyCommandBuffer, 234 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer,
287 OnDestroyCommandBuffer) 235 OnDestroyCommandBuffer)
288 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuChannelMsg_CreateOffscreenSurface, 236 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenSurface,
289 OnCreateOffscreenSurface) 237 OnCreateOffscreenSurface)
290 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroySurface, OnDestroySurface) 238 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroySurface, OnDestroySurface)
291 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateTransportTexture, 239 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateTransportTexture,
292 OnCreateTransportTexture) 240 OnCreateTransportTexture)
293 IPC_MESSAGE_UNHANDLED(handled = false) 241 IPC_MESSAGE_UNHANDLED(handled = false)
294 IPC_END_MESSAGE_MAP() 242 IPC_END_MESSAGE_MAP()
295 DCHECK(handled) << msg.type(); 243 DCHECK(handled) << msg.type();
296 return handled; 244 return handled;
297 } 245 }
298 246
299 void GpuChannel::HandleDeferredMessages() {
300 // Empty the deferred queue so OnMessageRecieved does not defer on that
301 // account and to prevent an infinite loop if the scheduler is unscheduled
302 // as a result of handling already deferred messages.
303 std::queue<IPC::Message*> deferred_messages_copy;
304 std::swap(deferred_messages_copy, deferred_messages_);
305
306 while (!deferred_messages_copy.empty()) {
307 scoped_ptr<IPC::Message> message(deferred_messages_copy.front());
308 deferred_messages_copy.pop();
309
310 OnMessageReceived(*message);
311 }
312 }
313
314 int GpuChannel::GenerateRouteID() { 247 int GpuChannel::GenerateRouteID() {
315 static int last_id = 0; 248 static int last_id = 0;
316 return ++last_id; 249 return ++last_id;
317 } 250 }
318 251
319 void GpuChannel::OnInitialize(base::ProcessHandle renderer_process) { 252 void GpuChannel::OnInitialize(base::ProcessHandle renderer_process) {
320 // Initialize should only happen once. 253 // Initialize should only happen once.
321 DCHECK(!renderer_process_); 254 DCHECK(!renderer_process_);
322 255
323 // Verify that the renderer has passed its own process handle. 256 // Verify that the renderer has passed its own process handle.
324 if (base::GetProcId(renderer_process) == renderer_pid_) 257 if (base::GetProcId(renderer_process) == renderer_pid_)
325 renderer_process_ = renderer_process; 258 renderer_process_ = renderer_process;
326 } 259 }
327 260
328 void GpuChannel::OnCreateOffscreenCommandBuffer( 261 void GpuChannel::OnCreateOffscreenCommandBuffer(
329 const gfx::Size& size, 262 const gfx::Size& size,
330 const GPUCreateCommandBufferConfig& init_params, 263 const GPUCreateCommandBufferConfig& init_params,
331 IPC::Message* reply_message) { 264 int32* route_id) {
332 int32 route_id = MSG_ROUTING_NONE;
333
334 content::GetContentClient()->SetActiveURL(init_params.active_url); 265 content::GetContentClient()->SetActiveURL(init_params.active_url);
335 #if defined(ENABLE_GPU) 266 #if defined(ENABLE_GPU)
336 route_id = GenerateRouteID(); 267 *route_id = GenerateRouteID();
337 268
338 scoped_ptr<GpuCommandBufferStub> stub(new GpuCommandBufferStub( 269 scoped_ptr<GpuCommandBufferStub> stub(new GpuCommandBufferStub(
339 this, 270 this,
340 gfx::kNullPluginWindow, 271 gfx::kNullPluginWindow,
341 size, 272 size,
342 disallowed_extensions_, 273 disallowed_extensions_,
343 init_params.allowed_extensions, 274 init_params.allowed_extensions,
344 init_params.attribs, 275 init_params.attribs,
345 route_id, 276 *route_id,
346 0, 0, watchdog_)); 277 0, 0, watchdog_));
347 router_.AddRoute(route_id, stub.get()); 278 router_.AddRoute(*route_id, stub.get());
348 stubs_.AddWithID(stub.release(), route_id); 279 stubs_.AddWithID(stub.release(), *route_id);
349 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer", 280 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer",
350 "route_id", route_id); 281 "route_id", route_id);
282 #else
283 *route_id = MSG_ROUTING_NONE;
351 #endif 284 #endif
352
353 GpuChannelMsg_CreateOffscreenCommandBuffer::WriteReplyParams(
354 reply_message,
355 route_id);
356 Send(reply_message);
357 } 285 }
358 286
359 void GpuChannel::OnDestroyCommandBuffer(int32 route_id, 287 void GpuChannel::OnDestroyCommandBuffer(int32 route_id) {
360 IPC::Message* reply_message) {
361 #if defined(ENABLE_GPU) 288 #if defined(ENABLE_GPU)
362 TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer", 289 TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer",
363 "route_id", route_id); 290 "route_id", route_id);
364 if (router_.ResolveRoute(route_id)) { 291 if (router_.ResolveRoute(route_id)) {
292 GpuCommandBufferStub* stub = stubs_.Lookup(route_id);
293 // In case the renderer is currently blocked waiting for a sync reply from
294 // the stub, allow the stub to clean up and unblock pending messages here:
295 if (stub != NULL)
296 stub->CommandBufferWasDestroyed();
365 router_.RemoveRoute(route_id); 297 router_.RemoveRoute(route_id);
366 stubs_.Remove(route_id); 298 stubs_.Remove(route_id);
367 } 299 }
368 #endif 300 #endif
369
370 if (reply_message)
371 Send(reply_message);
372 } 301 }
373 302
374 void GpuChannel::OnCreateOffscreenSurface(const gfx::Size& size, 303 void GpuChannel::OnCreateOffscreenSurface(const gfx::Size& size,
375 IPC::Message* reply_message) { 304 int* route_id) {
376 int route_id = MSG_ROUTING_NONE; 305 *route_id = MSG_ROUTING_NONE;
377 306
378 #if defined(ENABLE_GPU) 307 #if defined(ENABLE_GPU)
379 scoped_refptr<gfx::GLSurface> surface( 308 scoped_refptr<gfx::GLSurface> surface(
380 gfx::GLSurface::CreateOffscreenGLSurface(size)); 309 gfx::GLSurface::CreateOffscreenGLSurface(size));
381 if (!surface.get()) 310 if (!surface.get())
382 return; 311 return;
383 312
384 route_id = GenerateRouteID(); 313 *route_id = GenerateRouteID();
385 314
386 scoped_ptr<GpuSurfaceStub> stub (new GpuSurfaceStub(this, 315 scoped_ptr<GpuSurfaceStub> stub (new GpuSurfaceStub(this,
387 route_id, 316 *route_id,
388 surface.release())); 317 surface.release()));
389 318
390 router_.AddRoute(route_id, stub.get()); 319 router_.AddRoute(*route_id, stub.get());
391 surfaces_.AddWithID(stub.release(), route_id); 320 surfaces_.AddWithID(stub.release(), *route_id);
392 #endif 321 #endif
393
394 GpuChannelMsg_CreateOffscreenSurface::WriteReplyParams(reply_message,
395 route_id);
396 Send(reply_message);
397 } 322 }
398 323
399 void GpuChannel::OnDestroySurface(int route_id) { 324 void GpuChannel::OnDestroySurface(int route_id) {
400 #if defined(ENABLE_GPU) 325 #if defined(ENABLE_GPU)
401 if (router_.ResolveRoute(route_id)) { 326 if (router_.ResolveRoute(route_id)) {
402 router_.RemoveRoute(route_id); 327 router_.RemoveRoute(route_id);
403 surfaces_.Remove(route_id); 328 surfaces_.Remove(route_id);
404 } 329 }
405 #endif 330 #endif
406 } 331 }
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
448 373
449 #if defined(OS_POSIX) 374 #if defined(OS_POSIX)
450 int GpuChannel::GetRendererFileDescriptor() { 375 int GpuChannel::GetRendererFileDescriptor() {
451 int fd = -1; 376 int fd = -1;
452 if (channel_.get()) { 377 if (channel_.get()) {
453 fd = channel_->GetClientFileDescriptor(); 378 fd = channel_->GetClientFileDescriptor();
454 } 379 }
455 return fd; 380 return fd;
456 } 381 }
457 #endif // defined(OS_POSIX) 382 #endif // defined(OS_POSIX)
OLDNEW
« no previous file with comments | « content/common/gpu/gpu_channel.h ('k') | content/common/gpu/gpu_command_buffer_stub.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698