Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(235)

Side by Side Diff: content/common/gpu/gpu_command_buffer_stub.cc

Issue 7253052: Execute all GL commands up to the put offset reported by a flush. (Closed) Base URL: svn://chrome-svn/chrome/trunk/src/
Patch Set: '' Created 9 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « content/common/gpu/gpu_command_buffer_stub.h ('k') | content/common/gpu/gpu_messages.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #if defined(ENABLE_GPU) 5 #if defined(ENABLE_GPU)
6 6
7 #include "base/bind.h" 7 #include "base/bind.h"
8 #include "base/callback.h" 8 #include "base/callback.h"
9 #include "base/debug/trace_event.h" 9 #include "base/debug/trace_event.h"
10 #include "base/process_util.h" 10 #include "base/process_util.h"
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
57 if (scheduler_.get()) { 57 if (scheduler_.get()) {
58 scheduler_->Destroy(); 58 scheduler_->Destroy();
59 } 59 }
60 60
61 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager(); 61 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
62 gpu_channel_manager->Send(new GpuHostMsg_DestroyCommandBuffer( 62 gpu_channel_manager->Send(new GpuHostMsg_DestroyCommandBuffer(
63 handle_, renderer_id_, render_view_id_)); 63 handle_, renderer_id_, render_view_id_));
64 } 64 }
65 65
66 bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) { 66 bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) {
67 // If the scheduler is unscheduled, defer sync and async messages until it is
68 // rescheduled. Also, even if the scheduler is scheduled, do not allow newly
69 // received messages to be handled before previously received deferred ones;
70 // append them to the deferred queue as well.
71 if ((scheduler_.get() && !scheduler_->IsScheduled()) ||
72 !deferred_messages_.empty()) {
73 deferred_messages_.push(new IPC::Message(message));
74 return true;
75 }
76
77 // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers 67 // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers
78 // here. This is so the reply can be delayed if the scheduler is unscheduled. 68 // here. This is so the reply can be delayed if the scheduler is unscheduled.
79 bool handled = true; 69 bool handled = true;
80 IPC_BEGIN_MESSAGE_MAP(GpuCommandBufferStub, message) 70 IPC_BEGIN_MESSAGE_MAP(GpuCommandBufferStub, message)
81 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Initialize, 71 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Initialize,
82 OnInitialize); 72 OnInitialize);
83 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetParent, 73 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetParent,
84 OnSetParent); 74 OnSetParent);
85 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetState, OnGetState); 75 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetState, OnGetState);
86 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Flush, OnFlush); 76 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Flush, OnFlush);
87 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush); 77 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush);
78 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Rescheduled, OnRescheduled);
88 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateTransferBuffer, 79 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateTransferBuffer,
89 OnCreateTransferBuffer); 80 OnCreateTransferBuffer);
90 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_RegisterTransferBuffer, 81 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_RegisterTransferBuffer,
91 OnRegisterTransferBuffer); 82 OnRegisterTransferBuffer);
92 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_DestroyTransferBuffer, 83 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_DestroyTransferBuffer,
93 OnDestroyTransferBuffer); 84 OnDestroyTransferBuffer);
94 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetTransferBuffer, 85 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetTransferBuffer,
95 OnGetTransferBuffer); 86 OnGetTransferBuffer);
96 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateVideoDecoder, 87 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateVideoDecoder,
97 OnCreateVideoDecoder) 88 OnCreateVideoDecoder)
(...skipping 13 matching lines...) Expand all
111 handled = video_decoder_->OnMessageReceived(message); 102 handled = video_decoder_->OnMessageReceived(message);
112 103
113 DCHECK(handled); 104 DCHECK(handled);
114 return handled; 105 return handled;
115 } 106 }
116 107
117 bool GpuCommandBufferStub::Send(IPC::Message* message) { 108 bool GpuCommandBufferStub::Send(IPC::Message* message) {
118 return channel_->Send(message); 109 return channel_->Send(message);
119 } 110 }
120 111
112 bool GpuCommandBufferStub::IsScheduled() {
113 return !scheduler_.get() || scheduler_->IsScheduled();
114 }
115
121 void GpuCommandBufferStub::OnInitialize( 116 void GpuCommandBufferStub::OnInitialize(
122 base::SharedMemoryHandle ring_buffer, 117 base::SharedMemoryHandle ring_buffer,
123 int32 size, 118 int32 size,
124 IPC::Message* reply_message) { 119 IPC::Message* reply_message) {
125 DCHECK(!command_buffer_.get()); 120 DCHECK(!command_buffer_.get());
126 121
127 bool result = false; 122 bool result = false;
128 123
129 command_buffer_.reset(new gpu::CommandBufferService); 124 command_buffer_.reset(new gpu::CommandBufferService);
130 125
131 #if defined(OS_WIN) 126 #if defined(OS_WIN)
132 // Windows dups the shared memory handle it receives into the current process 127 // Windows dups the shared memory handle it receives into the current process
133 // and closes it when this variable goes out of scope. 128 // and closes it when this variable goes out of scope.
134 base::SharedMemory shared_memory(ring_buffer, 129 base::SharedMemory shared_memory(ring_buffer,
135 false, 130 false,
136 channel_->renderer_process()); 131 channel_->renderer_process());
137 #else 132 #else
138 // POSIX receives a dup of the shared memory handle and closes the dup when 133 // POSIX receives a dup of the shared memory handle and closes the dup when
139 // this variable goes out of scope. 134 // this variable goes out of scope.
140 base::SharedMemory shared_memory(ring_buffer, false); 135 base::SharedMemory shared_memory(ring_buffer, false);
141 #endif 136 #endif
142 137
143 // Initialize the CommandBufferService and GpuScheduler. 138 // Initialize the CommandBufferService and GpuScheduler.
144 if (command_buffer_->Initialize(&shared_memory, size)) { 139 if (command_buffer_->Initialize(&shared_memory, size)) {
145 scheduler_.reset(new gpu::GpuScheduler(command_buffer_.get(), 140 scheduler_.reset(gpu::GpuScheduler::Create(command_buffer_.get(),
146 channel_, 141 channel_,
147 NULL)); 142 NULL));
148 if (scheduler_->Initialize( 143 if (scheduler_->Initialize(
149 handle_, 144 handle_,
150 initial_size_, 145 initial_size_,
151 disallowed_extensions_, 146 disallowed_extensions_,
152 allowed_extensions_.c_str(), 147 allowed_extensions_.c_str(),
153 requested_attribs_, 148 requested_attribs_,
154 channel_->share_group())) { 149 channel_->share_group())) {
155 command_buffer_->SetPutOffsetChangeCallback( 150 command_buffer_->SetPutOffsetChangeCallback(
156 NewCallback(scheduler_.get(), 151 NewCallback(scheduler_.get(),
157 &gpu::GpuScheduler::PutChanged)); 152 &gpu::GpuScheduler::PutChanged));
158 command_buffer_->SetParseErrorCallback( 153 command_buffer_->SetParseErrorCallback(
159 NewCallback(this, &GpuCommandBufferStub::OnParseError)); 154 NewCallback(this, &GpuCommandBufferStub::OnParseError));
160 scheduler_->SetSwapBuffersCallback( 155 scheduler_->SetSwapBuffersCallback(
161 NewCallback(this, &GpuCommandBufferStub::OnSwapBuffers)); 156 NewCallback(this, &GpuCommandBufferStub::OnSwapBuffers));
162 scheduler_->SetLatchCallback(base::Bind(
163 &GpuChannel::OnLatchCallback, base::Unretained(channel_), route_id_));
164 scheduler_->SetScheduledCallback( 157 scheduler_->SetScheduledCallback(
165 NewCallback(this, &GpuCommandBufferStub::OnScheduled)); 158 NewCallback(channel_, &GpuChannel::OnScheduled));
166 scheduler_->SetTokenCallback(base::Bind( 159 scheduler_->SetTokenCallback(base::Bind(
167 &GpuCommandBufferStub::OnSetToken, base::Unretained(this))); 160 &GpuCommandBufferStub::OnSetToken, base::Unretained(this)));
168 if (watchdog_) 161 if (watchdog_)
169 scheduler_->SetCommandProcessedCallback( 162 scheduler_->SetCommandProcessedCallback(
170 NewCallback(this, &GpuCommandBufferStub::OnCommandProcessed)); 163 NewCallback(this, &GpuCommandBufferStub::OnCommandProcessed));
171 164
172 #if defined(OS_MACOSX) || defined(TOUCH_UI) 165 #if defined(OS_MACOSX) || defined(TOUCH_UI)
173 if (handle_) { 166 if (handle_) {
174 // This context conceptually puts its output directly on the 167 // This context conceptually puts its output directly on the
175 // screen, rendered by the accelerated plugin layer in 168 // screen, rendered by the accelerated plugin layer in
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after
252 // it may cause other side effects to simply pass the next WaitLatch on all 245 // it may cause other side effects to simply pass the next WaitLatch on all
253 // contexts. Instead, just lose all related contexts when there's an error. 246 // contexts. Instead, just lose all related contexts when there's an error.
254 channel_->DestroySoon(); 247 channel_->DestroySoon();
255 } 248 }
256 249
257 void GpuCommandBufferStub::OnFlush(int32 put_offset, 250 void GpuCommandBufferStub::OnFlush(int32 put_offset,
258 int32 last_known_get, 251 int32 last_known_get,
259 uint32 flush_count, 252 uint32 flush_count,
260 IPC::Message* reply_message) { 253 IPC::Message* reply_message) {
261 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnFlush"); 254 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnFlush");
262 gpu::CommandBuffer::State state; 255 gpu::CommandBuffer::State state = command_buffer_->GetState();
263 if (flush_count - last_flush_count_ >= 0x8000000U) { 256 if (flush_count - last_flush_count_ >= 0x8000000U) {
264 // We received this message out-of-order. This should not happen but is here 257 // We received this message out-of-order. This should not happen but is here
265 // to catch regressions. Ignore the message. 258 // to catch regressions. Ignore the message.
266 NOTREACHED() << "Received an AsyncFlush message out-of-order"; 259 NOTREACHED() << "Received an AsyncFlush message out-of-order";
267 state = command_buffer_->GetState(); 260 GpuCommandBufferMsg_Flush::WriteReplyParams(reply_message, state);
261 Send(reply_message);
268 } else { 262 } else {
269 last_flush_count_ = flush_count; 263 last_flush_count_ = flush_count;
264
265 // Reply immediately if the client was out of date with the current get
266 // offset.
267 bool reply_immediately = state.get_offset != last_known_get;
268 if (reply_immediately) {
269 GpuCommandBufferMsg_Flush::WriteReplyParams(reply_message, state);
270 Send(reply_message);
271 }
272
273 // Process everything up to the put offset.
270 state = command_buffer_->FlushSync(put_offset, last_known_get); 274 state = command_buffer_->FlushSync(put_offset, last_known_get);
275
276 // Lose all contexts if the context was lost.
277 if (state.error == gpu::error::kLostContext &&
278 gfx::GLContext::LosesAllContextsOnContextLost()) {
279 channel_->LoseAllContexts();
280 }
281
282 // Then if the client was up-to-date with the get offset, reply to the
283 // synchronpous IPC only after processing all commands are processed. This
284 // prevents the client from "spinning" when it fills up the command buffer.
285 // Otherwise, since the state has changed since the immediate reply, send
286 // an asyncronous state update back to the client.
287 if (!reply_immediately) {
288 GpuCommandBufferMsg_Flush::WriteReplyParams(reply_message, state);
289 Send(reply_message);
290 } else {
291 ReportState();
292 }
271 } 293 }
272 if (state.error == gpu::error::kLostContext &&
273 gfx::GLContext::LosesAllContextsOnContextLost())
274 channel_->LoseAllContexts();
275 294
276 GpuCommandBufferMsg_Flush::WriteReplyParams(reply_message, state);
277 Send(reply_message);
278 } 295 }
279 296
280 void GpuCommandBufferStub::OnAsyncFlush(int32 put_offset, uint32 flush_count) { 297 void GpuCommandBufferStub::OnAsyncFlush(int32 put_offset, uint32 flush_count) {
281 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnAsyncFlush"); 298 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnAsyncFlush");
282 if (flush_count - last_flush_count_ < 0x8000000U) { 299 if (flush_count - last_flush_count_ < 0x8000000U) {
283 last_flush_count_ = flush_count; 300 last_flush_count_ = flush_count;
284 command_buffer_->Flush(put_offset); 301 command_buffer_->Flush(put_offset);
285 } else { 302 } else {
286 // We received this message out-of-order. This should not happen but is here 303 // We received this message out-of-order. This should not happen but is here
287 // to catch regressions. Ignore the message. 304 // to catch regressions. Ignore the message.
288 NOTREACHED() << "Received a Flush message out-of-order"; 305 NOTREACHED() << "Received a Flush message out-of-order";
289 } 306 }
290 // TODO(piman): Do this everytime the scheduler finishes processing a batch of 307
291 // commands. 308 ReportState();
292 MessageLoop::current()->PostTask(FROM_HERE, 309 }
293 task_factory_.NewRunnableMethod(&GpuCommandBufferStub::ReportState)); 310
311 void GpuCommandBufferStub::OnRescheduled() {
312 gpu::CommandBuffer::State state = command_buffer_->GetLastState();
313 command_buffer_->Flush(state.put_offset);
piman 2011/07/11 23:38:33 Adding a ReportState() here would be nice to the c
apatrick_chromium 2011/07/11 23:56:33 Done.
294 } 314 }
295 315
296 void GpuCommandBufferStub::OnCreateTransferBuffer(int32 size, 316 void GpuCommandBufferStub::OnCreateTransferBuffer(int32 size,
297 int32 id_request, 317 int32 id_request,
298 IPC::Message* reply_message) { 318 IPC::Message* reply_message) {
299 int32 id = command_buffer_->CreateTransferBuffer(size, id_request); 319 int32 id = command_buffer_->CreateTransferBuffer(size, id_request);
300 GpuCommandBufferMsg_CreateTransferBuffer::WriteReplyParams(reply_message, id); 320 GpuCommandBufferMsg_CreateTransferBuffer::WriteReplyParams(reply_message, id);
301 Send(reply_message); 321 Send(reply_message);
302 } 322 }
303 323
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
367 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSwapBuffers"); 387 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSwapBuffers");
368 ReportState(); 388 ReportState();
369 Send(new GpuCommandBufferMsg_SwapBuffers(route_id_)); 389 Send(new GpuCommandBufferMsg_SwapBuffers(route_id_));
370 } 390 }
371 391
372 void GpuCommandBufferStub::OnCommandProcessed() { 392 void GpuCommandBufferStub::OnCommandProcessed() {
373 if (watchdog_) 393 if (watchdog_)
374 watchdog_->CheckArmed(); 394 watchdog_->CheckArmed();
375 } 395 }
376 396
377 void GpuCommandBufferStub::HandleDeferredMessages() {
378 // Empty the deferred queue so OnMessageRecieved does not defer on that
379 // account and to prevent an infinite loop if the scheduler is unscheduled
380 // as a result of handling already deferred messages.
381 std::queue<IPC::Message*> deferred_messages_copy;
382 std::swap(deferred_messages_copy, deferred_messages_);
383
384 while (!deferred_messages_copy.empty()) {
385 scoped_ptr<IPC::Message> message(deferred_messages_copy.front());
386 deferred_messages_copy.pop();
387
388 OnMessageReceived(*message);
389 }
390 }
391
392 void GpuCommandBufferStub::OnScheduled() {
393 // Post a task to handle any deferred messages. The deferred message queue is
394 // not emptied here, which ensures that OnMessageReceived will continue to
395 // defer newly received messages until the ones in the queue have all been
396 // handled by HandleDeferredMessages. HandleDeferredMessages is invoked as a
397 // task to prevent reentrancy.
398 MessageLoop::current()->PostTask(
399 FROM_HERE,
400 task_factory_.NewRunnableMethod(
401 &GpuCommandBufferStub::HandleDeferredMessages));
402 }
403
404 #if defined(OS_MACOSX) 397 #if defined(OS_MACOSX)
405 void GpuCommandBufferStub::OnSetWindowSize(const gfx::Size& size) { 398 void GpuCommandBufferStub::OnSetWindowSize(const gfx::Size& size) {
406 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager(); 399 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
407 // Try using the IOSurface version first. 400 // Try using the IOSurface version first.
408 uint64 new_backing_store = scheduler_->SetWindowSizeForIOSurface(size); 401 uint64 new_backing_store = scheduler_->SetWindowSizeForIOSurface(size);
409 if (new_backing_store) { 402 if (new_backing_store) {
410 GpuHostMsg_AcceleratedSurfaceSetIOSurface_Params params; 403 GpuHostMsg_AcceleratedSurfaceSetIOSurface_Params params;
411 params.renderer_id = renderer_id_; 404 params.renderer_id = renderer_id_;
412 params.render_view_id = render_view_id_; 405 params.render_view_id = render_view_id_;
413 params.window = handle_; 406 params.window = handle_;
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after
481 scheduler_->set_acknowledged_swap_buffers_count(swap_buffers_count); 474 scheduler_->set_acknowledged_swap_buffers_count(swap_buffers_count);
482 475
483 for(uint64 i = 0; i < delta; i++) { 476 for(uint64 i = 0; i < delta; i++) {
484 OnSwapBuffers(); 477 OnSwapBuffers();
485 // Wake up the GpuScheduler to start doing work again. 478 // Wake up the GpuScheduler to start doing work again.
486 scheduler_->SetScheduled(true); 479 scheduler_->SetScheduled(true);
487 } 480 }
488 } 481 }
489 #endif // defined(OS_MACOSX) || defined(TOUCH_UI) 482 #endif // defined(OS_MACOSX) || defined(TOUCH_UI)
490 483
491 void GpuCommandBufferStub::CommandBufferWasDestroyed() {
492 TRACE_EVENT0("gpu", "GpuCommandBufferStub::CommandBufferWasDestroyed");
493 // In case the renderer is currently blocked waiting for a sync reply from
494 // the stub, this method allows us to cleanup and unblock pending messages.
495 if (scheduler_.get()) {
496 while (!scheduler_->IsScheduled())
497 scheduler_->SetScheduled(true);
498 }
499 // Handle any deferred messages now that the scheduler is not blocking
500 // message handling.
501 HandleDeferredMessages();
502 }
503
504 void GpuCommandBufferStub::AddSetTokenCallback( 484 void GpuCommandBufferStub::AddSetTokenCallback(
505 const base::Callback<void(int32)>& callback) { 485 const base::Callback<void(int32)>& callback) {
506 set_token_callbacks_.push_back(callback); 486 set_token_callbacks_.push_back(callback);
507 } 487 }
508 488
509 void GpuCommandBufferStub::OnSetToken(int32 token) { 489 void GpuCommandBufferStub::OnSetToken(int32 token) {
510 for (size_t i = 0; i < set_token_callbacks_.size(); ++i) 490 for (size_t i = 0; i < set_token_callbacks_.size(); ++i)
511 set_token_callbacks_[i].Run(token); 491 set_token_callbacks_[i].Run(token);
512 } 492 }
513 493
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
589 new GpuVideoDecodeAccelerator(this, decoder_host_id, this)); 569 new GpuVideoDecodeAccelerator(this, decoder_host_id, this));
590 video_decoder_->Initialize(configs); 570 video_decoder_->Initialize(configs);
591 } 571 }
592 572
593 void GpuCommandBufferStub::OnDestroyVideoDecoder() { 573 void GpuCommandBufferStub::OnDestroyVideoDecoder() {
594 LOG(ERROR) << "GpuCommandBufferStub::OnDestroyVideoDecoder"; 574 LOG(ERROR) << "GpuCommandBufferStub::OnDestroyVideoDecoder";
595 video_decoder_.reset(); 575 video_decoder_.reset();
596 } 576 }
597 577
598 #endif // defined(ENABLE_GPU) 578 #endif // defined(ENABLE_GPU)
OLDNEW
« no previous file with comments | « content/common/gpu/gpu_command_buffer_stub.h ('k') | content/common/gpu/gpu_messages.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698