Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(218)

Side by Side Diff: content/common/gpu/gpu_command_buffer_stub.cc

Issue 7253052: Execute all GL commands up to the put offset reported by a flush. (Closed) Base URL: svn://chrome-svn/chrome/trunk/src/
Patch Set: '' Created 9 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « content/common/gpu/gpu_command_buffer_stub.h ('k') | content/common/gpu/gpu_messages.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #if defined(ENABLE_GPU) 5 #if defined(ENABLE_GPU)
6 6
7 #include "base/bind.h" 7 #include "base/bind.h"
8 #include "base/callback.h" 8 #include "base/callback.h"
9 #include "base/debug/trace_event.h" 9 #include "base/debug/trace_event.h"
10 #include "base/process_util.h" 10 #include "base/process_util.h"
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
57 if (scheduler_.get()) { 57 if (scheduler_.get()) {
58 scheduler_->Destroy(); 58 scheduler_->Destroy();
59 } 59 }
60 60
61 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager(); 61 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
62 gpu_channel_manager->Send(new GpuHostMsg_DestroyCommandBuffer( 62 gpu_channel_manager->Send(new GpuHostMsg_DestroyCommandBuffer(
63 handle_, renderer_id_, render_view_id_)); 63 handle_, renderer_id_, render_view_id_));
64 } 64 }
65 65
66 bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) { 66 bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) {
67 // If the scheduler is unscheduled, defer sync and async messages until it is
68 // rescheduled. Also, even if the scheduler is scheduled, do not allow newly
69 // received messages to be handled before previously received deferred ones;
70 // append them to the deferred queue as well.
71 if ((scheduler_.get() && !scheduler_->IsScheduled()) ||
72 !deferred_messages_.empty()) {
73 deferred_messages_.push(new IPC::Message(message));
74 return true;
75 }
76
77 // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers 67 // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers
78 // here. This is so the reply can be delayed if the scheduler is unscheduled. 68 // here. This is so the reply can be delayed if the scheduler is unscheduled.
79 bool handled = true; 69 bool handled = true;
80 IPC_BEGIN_MESSAGE_MAP(GpuCommandBufferStub, message) 70 IPC_BEGIN_MESSAGE_MAP(GpuCommandBufferStub, message)
81 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Initialize, 71 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Initialize,
82 OnInitialize); 72 OnInitialize);
83 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetParent, 73 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetParent,
84 OnSetParent); 74 OnSetParent);
85 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetState, OnGetState); 75 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetState, OnGetState);
86 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Flush, OnFlush); 76 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Flush, OnFlush);
87 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush); 77 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush);
78 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Rescheduled, OnRescheduled);
88 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateTransferBuffer, 79 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateTransferBuffer,
89 OnCreateTransferBuffer); 80 OnCreateTransferBuffer);
90 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_RegisterTransferBuffer, 81 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_RegisterTransferBuffer,
91 OnRegisterTransferBuffer); 82 OnRegisterTransferBuffer);
92 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_DestroyTransferBuffer, 83 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_DestroyTransferBuffer,
93 OnDestroyTransferBuffer); 84 OnDestroyTransferBuffer);
94 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetTransferBuffer, 85 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetTransferBuffer,
95 OnGetTransferBuffer); 86 OnGetTransferBuffer);
96 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateVideoDecoder, 87 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateVideoDecoder,
97 OnCreateVideoDecoder) 88 OnCreateVideoDecoder)
(...skipping 13 matching lines...) Expand all
111 handled = video_decoder_->OnMessageReceived(message); 102 handled = video_decoder_->OnMessageReceived(message);
112 103
113 DCHECK(handled); 104 DCHECK(handled);
114 return handled; 105 return handled;
115 } 106 }
116 107
117 bool GpuCommandBufferStub::Send(IPC::Message* message) { 108 bool GpuCommandBufferStub::Send(IPC::Message* message) {
118 return channel_->Send(message); 109 return channel_->Send(message);
119 } 110 }
120 111
112 bool GpuCommandBufferStub::IsScheduled() {
113 return !scheduler_.get() || scheduler_->IsScheduled();
114 }
115
121 void GpuCommandBufferStub::OnInitialize( 116 void GpuCommandBufferStub::OnInitialize(
122 base::SharedMemoryHandle ring_buffer, 117 base::SharedMemoryHandle ring_buffer,
123 int32 size, 118 int32 size,
124 IPC::Message* reply_message) { 119 IPC::Message* reply_message) {
125 DCHECK(!command_buffer_.get()); 120 DCHECK(!command_buffer_.get());
126 121
127 bool result = false; 122 bool result = false;
128 123
129 command_buffer_.reset(new gpu::CommandBufferService); 124 command_buffer_.reset(new gpu::CommandBufferService);
130 125
131 #if defined(OS_WIN) 126 #if defined(OS_WIN)
132 // Windows dups the shared memory handle it receives into the current process 127 // Windows dups the shared memory handle it receives into the current process
133 // and closes it when this variable goes out of scope. 128 // and closes it when this variable goes out of scope.
134 base::SharedMemory shared_memory(ring_buffer, 129 base::SharedMemory shared_memory(ring_buffer,
135 false, 130 false,
136 channel_->renderer_process()); 131 channel_->renderer_process());
137 #else 132 #else
138 // POSIX receives a dup of the shared memory handle and closes the dup when 133 // POSIX receives a dup of the shared memory handle and closes the dup when
139 // this variable goes out of scope. 134 // this variable goes out of scope.
140 base::SharedMemory shared_memory(ring_buffer, false); 135 base::SharedMemory shared_memory(ring_buffer, false);
141 #endif 136 #endif
142 137
143 // Initialize the CommandBufferService and GpuScheduler. 138 // Initialize the CommandBufferService and GpuScheduler.
144 if (command_buffer_->Initialize(&shared_memory, size)) { 139 if (command_buffer_->Initialize(&shared_memory, size)) {
145 scheduler_.reset(new gpu::GpuScheduler(command_buffer_.get(), 140 scheduler_.reset(gpu::GpuScheduler::Create(command_buffer_.get(),
146 channel_, 141 channel_,
147 NULL)); 142 NULL));
148 if (scheduler_->Initialize( 143 if (scheduler_->Initialize(
149 handle_, 144 handle_,
150 initial_size_, 145 initial_size_,
151 disallowed_extensions_, 146 disallowed_extensions_,
152 allowed_extensions_.c_str(), 147 allowed_extensions_.c_str(),
153 requested_attribs_, 148 requested_attribs_,
154 channel_->share_group())) { 149 channel_->share_group())) {
155 command_buffer_->SetPutOffsetChangeCallback( 150 command_buffer_->SetPutOffsetChangeCallback(
156 NewCallback(scheduler_.get(), 151 NewCallback(scheduler_.get(),
157 &gpu::GpuScheduler::PutChanged)); 152 &gpu::GpuScheduler::PutChanged));
158 command_buffer_->SetParseErrorCallback( 153 command_buffer_->SetParseErrorCallback(
159 NewCallback(this, &GpuCommandBufferStub::OnParseError)); 154 NewCallback(this, &GpuCommandBufferStub::OnParseError));
160 scheduler_->SetSwapBuffersCallback( 155 scheduler_->SetSwapBuffersCallback(
161 NewCallback(this, &GpuCommandBufferStub::OnSwapBuffers)); 156 NewCallback(this, &GpuCommandBufferStub::OnSwapBuffers));
162 scheduler_->SetLatchCallback(base::Bind(
163 &GpuChannel::OnLatchCallback, base::Unretained(channel_), route_id_));
164 scheduler_->SetScheduledCallback( 157 scheduler_->SetScheduledCallback(
165 NewCallback(this, &GpuCommandBufferStub::OnScheduled)); 158 NewCallback(channel_, &GpuChannel::OnScheduled));
166 scheduler_->SetTokenCallback(base::Bind( 159 scheduler_->SetTokenCallback(base::Bind(
167 &GpuCommandBufferStub::OnSetToken, base::Unretained(this))); 160 &GpuCommandBufferStub::OnSetToken, base::Unretained(this)));
168 if (watchdog_) 161 if (watchdog_)
169 scheduler_->SetCommandProcessedCallback( 162 scheduler_->SetCommandProcessedCallback(
170 NewCallback(this, &GpuCommandBufferStub::OnCommandProcessed)); 163 NewCallback(this, &GpuCommandBufferStub::OnCommandProcessed));
171 164
172 #if defined(OS_MACOSX) || defined(TOUCH_UI) 165 #if defined(OS_MACOSX) || defined(TOUCH_UI)
173 if (handle_) { 166 if (handle_) {
174 // This context conceptually puts its output directly on the 167 // This context conceptually puts its output directly on the
175 // screen, rendered by the accelerated plugin layer in 168 // screen, rendered by the accelerated plugin layer in
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after
254 // it may cause other side effects to simply pass the next WaitLatch on all 247 // it may cause other side effects to simply pass the next WaitLatch on all
255 // contexts. Instead, just lose all related contexts when there's an error. 248 // contexts. Instead, just lose all related contexts when there's an error.
256 channel_->DestroySoon(); 249 channel_->DestroySoon();
257 } 250 }
258 251
259 void GpuCommandBufferStub::OnFlush(int32 put_offset, 252 void GpuCommandBufferStub::OnFlush(int32 put_offset,
260 int32 last_known_get, 253 int32 last_known_get,
261 uint32 flush_count, 254 uint32 flush_count,
262 IPC::Message* reply_message) { 255 IPC::Message* reply_message) {
263 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnFlush"); 256 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnFlush");
264 gpu::CommandBuffer::State state; 257 gpu::CommandBuffer::State state = command_buffer_->GetState();
265 if (flush_count - last_flush_count_ >= 0x8000000U) { 258 if (flush_count - last_flush_count_ >= 0x8000000U) {
266 // We received this message out-of-order. This should not happen but is here 259 // We received this message out-of-order. This should not happen but is here
267 // to catch regressions. Ignore the message. 260 // to catch regressions. Ignore the message.
268 NOTREACHED() << "Received an AsyncFlush message out-of-order"; 261 NOTREACHED() << "Received an AsyncFlush message out-of-order";
269 state = command_buffer_->GetState(); 262 GpuCommandBufferMsg_Flush::WriteReplyParams(reply_message, state);
263 Send(reply_message);
270 } else { 264 } else {
271 last_flush_count_ = flush_count; 265 last_flush_count_ = flush_count;
266
267 // Reply immediately if the client was out of date with the current get
268 // offset.
269 bool reply_immediately = state.get_offset != last_known_get;
270 if (reply_immediately) {
271 GpuCommandBufferMsg_Flush::WriteReplyParams(reply_message, state);
272 Send(reply_message);
273 }
274
275 // Process everything up to the put offset.
272 state = command_buffer_->FlushSync(put_offset, last_known_get); 276 state = command_buffer_->FlushSync(put_offset, last_known_get);
277
278 // Lose all contexts if the context was lost.
279 if (state.error == gpu::error::kLostContext &&
280 gfx::GLContext::LosesAllContextsOnContextLost()) {
281 channel_->LoseAllContexts();
282 }
283
284 // Then if the client was up-to-date with the get offset, reply to the
285 // synchronpous IPC only after processing all commands are processed. This
286 // prevents the client from "spinning" when it fills up the command buffer.
287 // Otherwise, since the state has changed since the immediate reply, send
288 // an asyncronous state update back to the client.
289 if (!reply_immediately) {
290 GpuCommandBufferMsg_Flush::WriteReplyParams(reply_message, state);
291 Send(reply_message);
292 } else {
293 ReportState();
294 }
273 } 295 }
274 if (state.error == gpu::error::kLostContext &&
275 gfx::GLContext::LosesAllContextsOnContextLost())
276 channel_->LoseAllContexts();
277
278 GpuCommandBufferMsg_Flush::WriteReplyParams(reply_message, state);
279 Send(reply_message);
280 } 296 }
281 297
282 void GpuCommandBufferStub::OnAsyncFlush(int32 put_offset, uint32 flush_count) { 298 void GpuCommandBufferStub::OnAsyncFlush(int32 put_offset, uint32 flush_count) {
283 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnAsyncFlush"); 299 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnAsyncFlush");
284 if (flush_count - last_flush_count_ < 0x8000000U) { 300 if (flush_count - last_flush_count_ < 0x8000000U) {
285 last_flush_count_ = flush_count; 301 last_flush_count_ = flush_count;
286 command_buffer_->Flush(put_offset); 302 command_buffer_->Flush(put_offset);
287 } else { 303 } else {
288 // We received this message out-of-order. This should not happen but is here 304 // We received this message out-of-order. This should not happen but is here
289 // to catch regressions. Ignore the message. 305 // to catch regressions. Ignore the message.
290 NOTREACHED() << "Received a Flush message out-of-order"; 306 NOTREACHED() << "Received a Flush message out-of-order";
291 } 307 }
292 // TODO(piman): Do this everytime the scheduler finishes processing a batch of 308
293 // commands. 309 ReportState();
294 MessageLoop::current()->PostTask(FROM_HERE, 310 }
295 task_factory_.NewRunnableMethod(&GpuCommandBufferStub::ReportState)); 311
312 void GpuCommandBufferStub::OnRescheduled() {
313 gpu::CommandBuffer::State state = command_buffer_->GetLastState();
314 command_buffer_->Flush(state.put_offset);
315
316 ReportState();
296 } 317 }
297 318
298 void GpuCommandBufferStub::OnCreateTransferBuffer(int32 size, 319 void GpuCommandBufferStub::OnCreateTransferBuffer(int32 size,
299 int32 id_request, 320 int32 id_request,
300 IPC::Message* reply_message) { 321 IPC::Message* reply_message) {
301 int32 id = command_buffer_->CreateTransferBuffer(size, id_request); 322 int32 id = command_buffer_->CreateTransferBuffer(size, id_request);
302 GpuCommandBufferMsg_CreateTransferBuffer::WriteReplyParams(reply_message, id); 323 GpuCommandBufferMsg_CreateTransferBuffer::WriteReplyParams(reply_message, id);
303 Send(reply_message); 324 Send(reply_message);
304 } 325 }
305 326
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
369 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSwapBuffers"); 390 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSwapBuffers");
370 ReportState(); 391 ReportState();
371 Send(new GpuCommandBufferMsg_SwapBuffers(route_id_)); 392 Send(new GpuCommandBufferMsg_SwapBuffers(route_id_));
372 } 393 }
373 394
374 void GpuCommandBufferStub::OnCommandProcessed() { 395 void GpuCommandBufferStub::OnCommandProcessed() {
375 if (watchdog_) 396 if (watchdog_)
376 watchdog_->CheckArmed(); 397 watchdog_->CheckArmed();
377 } 398 }
378 399
379 void GpuCommandBufferStub::HandleDeferredMessages() {
380 // Empty the deferred queue so OnMessageRecieved does not defer on that
381 // account and to prevent an infinite loop if the scheduler is unscheduled
382 // as a result of handling already deferred messages.
383 std::queue<IPC::Message*> deferred_messages_copy;
384 std::swap(deferred_messages_copy, deferred_messages_);
385
386 while (!deferred_messages_copy.empty()) {
387 scoped_ptr<IPC::Message> message(deferred_messages_copy.front());
388 deferred_messages_copy.pop();
389
390 OnMessageReceived(*message);
391 }
392 }
393
394 void GpuCommandBufferStub::OnScheduled() {
395 // Post a task to handle any deferred messages. The deferred message queue is
396 // not emptied here, which ensures that OnMessageReceived will continue to
397 // defer newly received messages until the ones in the queue have all been
398 // handled by HandleDeferredMessages. HandleDeferredMessages is invoked as a
399 // task to prevent reentrancy.
400 MessageLoop::current()->PostTask(
401 FROM_HERE,
402 task_factory_.NewRunnableMethod(
403 &GpuCommandBufferStub::HandleDeferredMessages));
404 }
405
406 #if defined(OS_MACOSX) 400 #if defined(OS_MACOSX)
407 void GpuCommandBufferStub::OnSetWindowSize(const gfx::Size& size) { 401 void GpuCommandBufferStub::OnSetWindowSize(const gfx::Size& size) {
408 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager(); 402 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
409 // Try using the IOSurface version first. 403 // Try using the IOSurface version first.
410 uint64 new_backing_store = scheduler_->SetWindowSizeForIOSurface(size); 404 uint64 new_backing_store = scheduler_->SetWindowSizeForIOSurface(size);
411 if (new_backing_store) { 405 if (new_backing_store) {
412 GpuHostMsg_AcceleratedSurfaceSetIOSurface_Params params; 406 GpuHostMsg_AcceleratedSurfaceSetIOSurface_Params params;
413 params.renderer_id = renderer_id_; 407 params.renderer_id = renderer_id_;
414 params.render_view_id = render_view_id_; 408 params.render_view_id = render_view_id_;
415 params.window = handle_; 409 params.window = handle_;
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after
483 scheduler_->set_acknowledged_swap_buffers_count(swap_buffers_count); 477 scheduler_->set_acknowledged_swap_buffers_count(swap_buffers_count);
484 478
485 for(uint64 i = 0; i < delta; i++) { 479 for(uint64 i = 0; i < delta; i++) {
486 OnSwapBuffers(); 480 OnSwapBuffers();
487 // Wake up the GpuScheduler to start doing work again. 481 // Wake up the GpuScheduler to start doing work again.
488 scheduler_->SetScheduled(true); 482 scheduler_->SetScheduled(true);
489 } 483 }
490 } 484 }
491 #endif // defined(OS_MACOSX) || defined(TOUCH_UI) 485 #endif // defined(OS_MACOSX) || defined(TOUCH_UI)
492 486
493 void GpuCommandBufferStub::CommandBufferWasDestroyed() {
494 TRACE_EVENT0("gpu", "GpuCommandBufferStub::CommandBufferWasDestroyed");
495 // In case the renderer is currently blocked waiting for a sync reply from
496 // the stub, this method allows us to cleanup and unblock pending messages.
497 if (scheduler_.get()) {
498 while (!scheduler_->IsScheduled())
499 scheduler_->SetScheduled(true);
500 }
501 // Handle any deferred messages now that the scheduler is not blocking
502 // message handling.
503 HandleDeferredMessages();
504 }
505
506 void GpuCommandBufferStub::AddSetTokenCallback( 487 void GpuCommandBufferStub::AddSetTokenCallback(
507 const base::Callback<void(int32)>& callback) { 488 const base::Callback<void(int32)>& callback) {
508 set_token_callbacks_.push_back(callback); 489 set_token_callbacks_.push_back(callback);
509 } 490 }
510 491
511 void GpuCommandBufferStub::OnSetToken(int32 token) { 492 void GpuCommandBufferStub::OnSetToken(int32 token) {
512 for (size_t i = 0; i < set_token_callbacks_.size(); ++i) 493 for (size_t i = 0; i < set_token_callbacks_.size(); ++i)
513 set_token_callbacks_[i].Run(token); 494 set_token_callbacks_[i].Run(token);
514 } 495 }
515 496
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
591 new GpuVideoDecodeAccelerator(this, route_id_, this)); 572 new GpuVideoDecodeAccelerator(this, route_id_, this));
592 video_decoder_->Initialize(configs); 573 video_decoder_->Initialize(configs);
593 } 574 }
594 575
595 void GpuCommandBufferStub::OnDestroyVideoDecoder() { 576 void GpuCommandBufferStub::OnDestroyVideoDecoder() {
596 LOG(ERROR) << "GpuCommandBufferStub::OnDestroyVideoDecoder"; 577 LOG(ERROR) << "GpuCommandBufferStub::OnDestroyVideoDecoder";
597 video_decoder_.reset(); 578 video_decoder_.reset();
598 } 579 }
599 580
600 #endif // defined(ENABLE_GPU) 581 #endif // defined(ENABLE_GPU)
OLDNEW
« no previous file with comments | « content/common/gpu/gpu_command_buffer_stub.h ('k') | content/common/gpu/gpu_messages.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698