Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(300)

Side by Side Diff: content/common/gpu/gpu_command_buffer_stub.cc

Issue 7253052: Execute all GL commands up to the put offset reported by a flush. (Closed) Base URL: svn://chrome-svn/chrome/trunk/src/
Patch Set: '' Created 9 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #if defined(ENABLE_GPU) 5 #if defined(ENABLE_GPU)
6 6
7 #include "base/bind.h" 7 #include "base/bind.h"
8 #include "base/callback.h" 8 #include "base/callback.h"
9 #include "base/debug/trace_event.h" 9 #include "base/debug/trace_event.h"
10 #include "base/process_util.h" 10 #include "base/process_util.h"
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
57 if (scheduler_.get()) { 57 if (scheduler_.get()) {
58 scheduler_->Destroy(); 58 scheduler_->Destroy();
59 } 59 }
60 60
61 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager(); 61 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
62 gpu_channel_manager->Send(new GpuHostMsg_DestroyCommandBuffer( 62 gpu_channel_manager->Send(new GpuHostMsg_DestroyCommandBuffer(
63 handle_, renderer_id_, render_view_id_)); 63 handle_, renderer_id_, render_view_id_));
64 } 64 }
65 65
66 bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) { 66 bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) {
67 // If the scheduler is unscheduled, defer sync and async messages until it is
68 // rescheduled. Also, even if the scheduler is scheduled, do not allow newly
69 // received messages to be handled before previously received deferred ones;
70 // append them to the deferred queue as well.
71 if ((scheduler_.get() && !scheduler_->IsScheduled()) ||
72 !deferred_messages_.empty()) {
73 deferred_messages_.push(new IPC::Message(message));
74 return true;
75 }
76
77 // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers 67 // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers
78 // here. This is so the reply can be delayed if the scheduler is unscheduled. 68 // here. This is so the reply can be delayed if the scheduler is unscheduled.
79 bool handled = true; 69 bool handled = true;
80 IPC_BEGIN_MESSAGE_MAP(GpuCommandBufferStub, message) 70 IPC_BEGIN_MESSAGE_MAP(GpuCommandBufferStub, message)
81 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Initialize, 71 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Initialize,
82 OnInitialize); 72 OnInitialize);
83 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetParent, 73 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetParent,
84 OnSetParent); 74 OnSetParent);
85 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetState, OnGetState); 75 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetState, OnGetState);
86 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Flush, OnFlush); 76 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Flush, OnFlush);
(...skipping 24 matching lines...) Expand all
111 handled = video_decoder_->OnMessageReceived(message); 101 handled = video_decoder_->OnMessageReceived(message);
112 102
113 DCHECK(handled); 103 DCHECK(handled);
114 return handled; 104 return handled;
115 } 105 }
116 106
117 bool GpuCommandBufferStub::Send(IPC::Message* message) { 107 bool GpuCommandBufferStub::Send(IPC::Message* message) {
118 return channel_->Send(message); 108 return channel_->Send(message);
119 } 109 }
120 110
111 bool GpuCommandBufferStub::IsScheduled() {
112 return !scheduler_.get() || scheduler_->IsScheduled();
113 }
114
121 void GpuCommandBufferStub::OnInitialize( 115 void GpuCommandBufferStub::OnInitialize(
122 base::SharedMemoryHandle ring_buffer, 116 base::SharedMemoryHandle ring_buffer,
123 int32 size, 117 int32 size,
124 IPC::Message* reply_message) { 118 IPC::Message* reply_message) {
125 DCHECK(!command_buffer_.get()); 119 DCHECK(!command_buffer_.get());
126 120
127 bool result = false; 121 bool result = false;
128 122
129 command_buffer_.reset(new gpu::CommandBufferService); 123 command_buffer_.reset(new gpu::CommandBufferService);
130 124
(...skipping 21 matching lines...) Expand all
152 allowed_extensions_.c_str(), 146 allowed_extensions_.c_str(),
153 requested_attribs_, 147 requested_attribs_,
154 channel_->share_group())) { 148 channel_->share_group())) {
155 command_buffer_->SetPutOffsetChangeCallback( 149 command_buffer_->SetPutOffsetChangeCallback(
156 NewCallback(scheduler_.get(), 150 NewCallback(scheduler_.get(),
157 &gpu::GpuScheduler::PutChanged)); 151 &gpu::GpuScheduler::PutChanged));
158 command_buffer_->SetParseErrorCallback( 152 command_buffer_->SetParseErrorCallback(
159 NewCallback(this, &GpuCommandBufferStub::OnParseError)); 153 NewCallback(this, &GpuCommandBufferStub::OnParseError));
160 scheduler_->SetSwapBuffersCallback( 154 scheduler_->SetSwapBuffersCallback(
161 NewCallback(this, &GpuCommandBufferStub::OnSwapBuffers)); 155 NewCallback(this, &GpuCommandBufferStub::OnSwapBuffers));
162 scheduler_->SetLatchCallback(base::Bind(
163 &GpuChannel::OnLatchCallback, base::Unretained(channel_), route_id_));
164 scheduler_->SetScheduledCallback( 156 scheduler_->SetScheduledCallback(
165 NewCallback(this, &GpuCommandBufferStub::OnScheduled)); 157 NewCallback(channel_, &GpuChannel::OnScheduled));
166 scheduler_->SetTokenCallback(base::Bind( 158 scheduler_->SetTokenCallback(base::Bind(
167 &GpuCommandBufferStub::OnSetToken, base::Unretained(this))); 159 &GpuCommandBufferStub::OnSetToken, base::Unretained(this)));
168 if (watchdog_) 160 if (watchdog_)
169 scheduler_->SetCommandProcessedCallback( 161 scheduler_->SetCommandProcessedCallback(
170 NewCallback(this, &GpuCommandBufferStub::OnCommandProcessed)); 162 NewCallback(this, &GpuCommandBufferStub::OnCommandProcessed));
171 163
172 #if defined(OS_MACOSX) || defined(TOUCH_UI) 164 #if defined(OS_MACOSX) || defined(TOUCH_UI)
173 if (handle_) { 165 if (handle_) {
174 // This context conceptually puts its output directly on the 166 // This context conceptually puts its output directly on the
175 // screen, rendered by the accelerated plugin layer in 167 // screen, rendered by the accelerated plugin layer in
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after
252 // it may cause other side effects to simply pass the next WaitLatch on all 244 // it may cause other side effects to simply pass the next WaitLatch on all
253 // contexts. Instead, just lose all related contexts when there's an error. 245 // contexts. Instead, just lose all related contexts when there's an error.
254 channel_->DestroySoon(); 246 channel_->DestroySoon();
255 } 247 }
256 248
257 void GpuCommandBufferStub::OnFlush(int32 put_offset, 249 void GpuCommandBufferStub::OnFlush(int32 put_offset,
258 int32 last_known_get, 250 int32 last_known_get,
259 uint32 flush_count, 251 uint32 flush_count,
260 IPC::Message* reply_message) { 252 IPC::Message* reply_message) {
261 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnFlush"); 253 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnFlush");
262 gpu::CommandBuffer::State state; 254 gpu::CommandBuffer::State state = command_buffer_->GetState();
263 if (flush_count - last_flush_count_ >= 0x8000000U) { 255 if (flush_count - last_flush_count_ >= 0x8000000U) {
264 // We received this message out-of-order. This should not happen but is here 256 // We received this message out-of-order. This should not happen but is here
265 // to catch regressions. Ignore the message. 257 // to catch regressions. Ignore the message.
266 NOTREACHED() << "Received an AsyncFlush message out-of-order"; 258 NOTREACHED() << "Received an AsyncFlush message out-of-order";
267 state = command_buffer_->GetState(); 259 GpuCommandBufferMsg_Flush::WriteReplyParams(reply_message, state);
260 Send(reply_message);
268 } else { 261 } else {
269 last_flush_count_ = flush_count; 262 last_flush_count_ = flush_count;
263
264 // Reply immediately if the client was out of date with the current get
265 // offset.
266 bool reply_immediately = state.get_offset != last_known_get;
267 if (reply_immediately) {
268 GpuCommandBufferMsg_Flush::WriteReplyParams(reply_message, state);
269 Send(reply_message);
270 }
271
272 // Process everything up to the put offset.
270 state = command_buffer_->FlushSync(put_offset, last_known_get); 273 state = command_buffer_->FlushSync(put_offset, last_known_get);
274
275 // Lose all contexts if the context was lost.
276 if (state.error == gpu::error::kLostContext &&
277 gfx::GLContext::LosesAllContextsOnContextLost()) {
278 channel_->LoseAllContexts();
279 }
280
281 // Then if the client was up-to-date with the get offset, reply to the
282 // synchronpous IPC only after processing all commands are processed. This
283 // prevents the client from "spinning" when it fills up the command buffer.
284 // Otherwise, since the state has changed since the immediate reply, send
285 // an asyncronous state update back to the client.
286 if (!reply_immediately) {
287 GpuCommandBufferMsg_Flush::WriteReplyParams(reply_message, state);
288 Send(reply_message);
289 } else {
290 ReportState();
291 }
271 } 292 }
272 if (state.error == gpu::error::kLostContext &&
273 gfx::GLContext::LosesAllContextsOnContextLost())
274 channel_->LoseAllContexts();
275 293
276 GpuCommandBufferMsg_Flush::WriteReplyParams(reply_message, state);
277 Send(reply_message);
278 } 294 }
279 295
280 void GpuCommandBufferStub::OnAsyncFlush(int32 put_offset, uint32 flush_count) { 296 void GpuCommandBufferStub::OnAsyncFlush(int32 put_offset, uint32 flush_count) {
281 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnAsyncFlush"); 297 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnAsyncFlush");
282 if (flush_count - last_flush_count_ < 0x8000000U) { 298 if (flush_count - last_flush_count_ < 0x8000000U) {
283 last_flush_count_ = flush_count; 299 last_flush_count_ = flush_count;
284 command_buffer_->Flush(put_offset); 300 command_buffer_->Flush(put_offset);
285 } else { 301 } else {
286 // We received this message out-of-order. This should not happen but is here 302 // We received this message out-of-order. This should not happen but is here
287 // to catch regressions. Ignore the message. 303 // to catch regressions. Ignore the message.
288 NOTREACHED() << "Received a Flush message out-of-order"; 304 NOTREACHED() << "Received a Flush message out-of-order";
289 } 305 }
290 // TODO(piman): Do this everytime the scheduler finishes processing a batch of 306 // TODO(piman): Do this everytime the scheduler finishes processing a batch of
291 // commands. 307 // commands.
292 MessageLoop::current()->PostTask(FROM_HERE, 308 MessageLoop::current()->PostTask(FROM_HERE,
293 task_factory_.NewRunnableMethod(&GpuCommandBufferStub::ReportState)); 309 task_factory_.NewRunnableMethod(&GpuCommandBufferStub::ReportState));
piman 2011/07/09 20:40:22 Because Flush will now fully execute the commands,
apatrick_chromium 2011/07/11 21:25:45 Done.
294 } 310 }
295 311
296 void GpuCommandBufferStub::OnCreateTransferBuffer(int32 size, 312 void GpuCommandBufferStub::OnCreateTransferBuffer(int32 size,
297 int32 id_request, 313 int32 id_request,
298 IPC::Message* reply_message) { 314 IPC::Message* reply_message) {
299 int32 id = command_buffer_->CreateTransferBuffer(size, id_request); 315 int32 id = command_buffer_->CreateTransferBuffer(size, id_request);
300 GpuCommandBufferMsg_CreateTransferBuffer::WriteReplyParams(reply_message, id); 316 GpuCommandBufferMsg_CreateTransferBuffer::WriteReplyParams(reply_message, id);
301 Send(reply_message); 317 Send(reply_message);
302 } 318 }
303 319
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
367 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSwapBuffers"); 383 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSwapBuffers");
368 ReportState(); 384 ReportState();
369 Send(new GpuCommandBufferMsg_SwapBuffers(route_id_)); 385 Send(new GpuCommandBufferMsg_SwapBuffers(route_id_));
370 } 386 }
371 387
372 void GpuCommandBufferStub::OnCommandProcessed() { 388 void GpuCommandBufferStub::OnCommandProcessed() {
373 if (watchdog_) 389 if (watchdog_)
374 watchdog_->CheckArmed(); 390 watchdog_->CheckArmed();
375 } 391 }
376 392
377 void GpuCommandBufferStub::HandleDeferredMessages() {
378 // Empty the deferred queue so OnMessageRecieved does not defer on that
379 // account and to prevent an infinite loop if the scheduler is unscheduled
380 // as a result of handling already deferred messages.
381 std::queue<IPC::Message*> deferred_messages_copy;
382 std::swap(deferred_messages_copy, deferred_messages_);
383
384 while (!deferred_messages_copy.empty()) {
385 scoped_ptr<IPC::Message> message(deferred_messages_copy.front());
386 deferred_messages_copy.pop();
387
388 OnMessageReceived(*message);
389 }
390 }
391
392 void GpuCommandBufferStub::OnScheduled() {
393 // Post a task to handle any deferred messages. The deferred message queue is
394 // not emptied here, which ensures that OnMessageReceived will continue to
395 // defer newly received messages until the ones in the queue have all been
396 // handled by HandleDeferredMessages. HandleDeferredMessages is invoked as a
397 // task to prevent reentrancy.
398 MessageLoop::current()->PostTask(
399 FROM_HERE,
400 task_factory_.NewRunnableMethod(
401 &GpuCommandBufferStub::HandleDeferredMessages));
402 }
403
404 #if defined(OS_MACOSX) 393 #if defined(OS_MACOSX)
405 void GpuCommandBufferStub::OnSetWindowSize(const gfx::Size& size) { 394 void GpuCommandBufferStub::OnSetWindowSize(const gfx::Size& size) {
406 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager(); 395 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
407 // Try using the IOSurface version first. 396 // Try using the IOSurface version first.
408 uint64 new_backing_store = scheduler_->SetWindowSizeForIOSurface(size); 397 uint64 new_backing_store = scheduler_->SetWindowSizeForIOSurface(size);
409 if (new_backing_store) { 398 if (new_backing_store) {
410 GpuHostMsg_AcceleratedSurfaceSetIOSurface_Params params; 399 GpuHostMsg_AcceleratedSurfaceSetIOSurface_Params params;
411 params.renderer_id = renderer_id_; 400 params.renderer_id = renderer_id_;
412 params.render_view_id = render_view_id_; 401 params.render_view_id = render_view_id_;
413 params.window = handle_; 402 params.window = handle_;
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after
481 scheduler_->set_acknowledged_swap_buffers_count(swap_buffers_count); 470 scheduler_->set_acknowledged_swap_buffers_count(swap_buffers_count);
482 471
483 for(uint64 i = 0; i < delta; i++) { 472 for(uint64 i = 0; i < delta; i++) {
484 OnSwapBuffers(); 473 OnSwapBuffers();
485 // Wake up the GpuScheduler to start doing work again. 474 // Wake up the GpuScheduler to start doing work again.
486 scheduler_->SetScheduled(true); 475 scheduler_->SetScheduled(true);
487 } 476 }
488 } 477 }
489 #endif // defined(OS_MACOSX) || defined(TOUCH_UI) 478 #endif // defined(OS_MACOSX) || defined(TOUCH_UI)
490 479
491 void GpuCommandBufferStub::CommandBufferWasDestroyed() {
492 TRACE_EVENT0("gpu", "GpuCommandBufferStub::CommandBufferWasDestroyed");
493 // In case the renderer is currently blocked waiting for a sync reply from
494 // the stub, this method allows us to cleanup and unblock pending messages.
495 if (scheduler_.get()) {
496 while (!scheduler_->IsScheduled())
497 scheduler_->SetScheduled(true);
498 }
499 // Handle any deferred messages now that the scheduler is not blocking
500 // message handling.
501 HandleDeferredMessages();
502 }
503
504 void GpuCommandBufferStub::AddSetTokenCallback( 480 void GpuCommandBufferStub::AddSetTokenCallback(
505 const base::Callback<void(int32)>& callback) { 481 const base::Callback<void(int32)>& callback) {
506 set_token_callbacks_.push_back(callback); 482 set_token_callbacks_.push_back(callback);
507 } 483 }
508 484
509 void GpuCommandBufferStub::OnSetToken(int32 token) { 485 void GpuCommandBufferStub::OnSetToken(int32 token) {
510 for (size_t i = 0; i < set_token_callbacks_.size(); ++i) 486 for (size_t i = 0; i < set_token_callbacks_.size(); ++i)
511 set_token_callbacks_[i].Run(token); 487 set_token_callbacks_[i].Run(token);
512 } 488 }
513 489
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
589 new GpuVideoDecodeAccelerator(this, decoder_host_id, this)); 565 new GpuVideoDecodeAccelerator(this, decoder_host_id, this));
590 video_decoder_->Initialize(configs); 566 video_decoder_->Initialize(configs);
591 } 567 }
592 568
593 void GpuCommandBufferStub::OnDestroyVideoDecoder() { 569 void GpuCommandBufferStub::OnDestroyVideoDecoder() {
594 LOG(ERROR) << "GpuCommandBufferStub::OnDestroyVideoDecoder"; 570 LOG(ERROR) << "GpuCommandBufferStub::OnDestroyVideoDecoder";
595 video_decoder_.reset(); 571 video_decoder_.reset();
596 } 572 }
597 573
598 #endif // defined(ENABLE_GPU) 574 #endif // defined(ENABLE_GPU)
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698