Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(9)

Side by Side Diff: content/common/gpu/gpu_command_buffer_stub.cc

Issue 7458010: Revert 93066 - Execute all GL commands up to the put offset reported by a each flush.This means g... (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: '' Created 9 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « content/common/gpu/gpu_command_buffer_stub.h ('k') | content/common/gpu/gpu_messages.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #if defined(ENABLE_GPU) 5 #if defined(ENABLE_GPU)
6 6
7 #include "base/bind.h" 7 #include "base/bind.h"
8 #include "base/callback.h" 8 #include "base/callback.h"
9 #include "base/debug/trace_event.h" 9 #include "base/debug/trace_event.h"
10 #include "base/process_util.h" 10 #include "base/process_util.h"
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
57 if (scheduler_.get()) { 57 if (scheduler_.get()) {
58 scheduler_->Destroy(); 58 scheduler_->Destroy();
59 } 59 }
60 60
61 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager(); 61 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
62 gpu_channel_manager->Send(new GpuHostMsg_DestroyCommandBuffer( 62 gpu_channel_manager->Send(new GpuHostMsg_DestroyCommandBuffer(
63 handle_, renderer_id_, render_view_id_)); 63 handle_, renderer_id_, render_view_id_));
64 } 64 }
65 65
66 bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) { 66 bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) {
67 // If the scheduler is unscheduled, defer sync and async messages until it is
68 // rescheduled. Also, even if the scheduler is scheduled, do not allow newly
69 // received messages to be handled before previously received deferred ones;
70 // append them to the deferred queue as well.
71 if ((scheduler_.get() && !scheduler_->IsScheduled()) ||
72 !deferred_messages_.empty()) {
73 deferred_messages_.push(new IPC::Message(message));
74 return true;
75 }
76
67 // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers 77 // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers
68 // here. This is so the reply can be delayed if the scheduler is unscheduled. 78 // here. This is so the reply can be delayed if the scheduler is unscheduled.
69 bool handled = true; 79 bool handled = true;
70 IPC_BEGIN_MESSAGE_MAP(GpuCommandBufferStub, message) 80 IPC_BEGIN_MESSAGE_MAP(GpuCommandBufferStub, message)
71 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Initialize, 81 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Initialize,
72 OnInitialize); 82 OnInitialize);
73 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetParent, 83 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetParent,
74 OnSetParent); 84 OnSetParent);
75 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetState, OnGetState); 85 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetState, OnGetState);
76 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Flush, OnFlush); 86 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Flush, OnFlush);
77 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush); 87 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush);
78 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Rescheduled, OnRescheduled);
79 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateTransferBuffer, 88 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateTransferBuffer,
80 OnCreateTransferBuffer); 89 OnCreateTransferBuffer);
81 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_RegisterTransferBuffer, 90 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_RegisterTransferBuffer,
82 OnRegisterTransferBuffer); 91 OnRegisterTransferBuffer);
83 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_DestroyTransferBuffer, 92 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_DestroyTransferBuffer,
84 OnDestroyTransferBuffer); 93 OnDestroyTransferBuffer);
85 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetTransferBuffer, 94 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetTransferBuffer,
86 OnGetTransferBuffer); 95 OnGetTransferBuffer);
87 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateVideoDecoder, 96 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateVideoDecoder,
88 OnCreateVideoDecoder) 97 OnCreateVideoDecoder)
(...skipping 13 matching lines...) Expand all
102 handled = video_decoder_->OnMessageReceived(message); 111 handled = video_decoder_->OnMessageReceived(message);
103 112
104 DCHECK(handled); 113 DCHECK(handled);
105 return handled; 114 return handled;
106 } 115 }
107 116
108 bool GpuCommandBufferStub::Send(IPC::Message* message) { 117 bool GpuCommandBufferStub::Send(IPC::Message* message) {
109 return channel_->Send(message); 118 return channel_->Send(message);
110 } 119 }
111 120
112 bool GpuCommandBufferStub::IsScheduled() {
113 return !scheduler_.get() || scheduler_->IsScheduled();
114 }
115
116 void GpuCommandBufferStub::OnInitialize( 121 void GpuCommandBufferStub::OnInitialize(
117 base::SharedMemoryHandle ring_buffer, 122 base::SharedMemoryHandle ring_buffer,
118 int32 size, 123 int32 size,
119 IPC::Message* reply_message) { 124 IPC::Message* reply_message) {
120 DCHECK(!command_buffer_.get()); 125 DCHECK(!command_buffer_.get());
121 126
122 bool result = false; 127 bool result = false;
123 128
124 command_buffer_.reset(new gpu::CommandBufferService); 129 command_buffer_.reset(new gpu::CommandBufferService);
125 130
126 #if defined(OS_WIN) 131 #if defined(OS_WIN)
127 // Windows dups the shared memory handle it receives into the current process 132 // Windows dups the shared memory handle it receives into the current process
128 // and closes it when this variable goes out of scope. 133 // and closes it when this variable goes out of scope.
129 base::SharedMemory shared_memory(ring_buffer, 134 base::SharedMemory shared_memory(ring_buffer,
130 false, 135 false,
131 channel_->renderer_process()); 136 channel_->renderer_process());
132 #else 137 #else
133 // POSIX receives a dup of the shared memory handle and closes the dup when 138 // POSIX receives a dup of the shared memory handle and closes the dup when
134 // this variable goes out of scope. 139 // this variable goes out of scope.
135 base::SharedMemory shared_memory(ring_buffer, false); 140 base::SharedMemory shared_memory(ring_buffer, false);
136 #endif 141 #endif
137 142
138 // Initialize the CommandBufferService and GpuScheduler. 143 // Initialize the CommandBufferService and GpuScheduler.
139 if (command_buffer_->Initialize(&shared_memory, size)) { 144 if (command_buffer_->Initialize(&shared_memory, size)) {
140 scheduler_.reset(gpu::GpuScheduler::Create(command_buffer_.get(), 145 scheduler_.reset(new gpu::GpuScheduler(command_buffer_.get(),
141 channel_, 146 channel_,
142 NULL)); 147 NULL));
143 if (scheduler_->Initialize( 148 if (scheduler_->Initialize(
144 handle_, 149 handle_,
145 initial_size_, 150 initial_size_,
146 disallowed_extensions_, 151 disallowed_extensions_,
147 allowed_extensions_.c_str(), 152 allowed_extensions_.c_str(),
148 requested_attribs_, 153 requested_attribs_,
149 channel_->share_group())) { 154 channel_->share_group())) {
150 command_buffer_->SetPutOffsetChangeCallback( 155 command_buffer_->SetPutOffsetChangeCallback(
151 NewCallback(scheduler_.get(), 156 NewCallback(scheduler_.get(),
152 &gpu::GpuScheduler::PutChanged)); 157 &gpu::GpuScheduler::PutChanged));
153 command_buffer_->SetParseErrorCallback( 158 command_buffer_->SetParseErrorCallback(
154 NewCallback(this, &GpuCommandBufferStub::OnParseError)); 159 NewCallback(this, &GpuCommandBufferStub::OnParseError));
155 scheduler_->SetSwapBuffersCallback( 160 scheduler_->SetSwapBuffersCallback(
156 NewCallback(this, &GpuCommandBufferStub::OnSwapBuffers)); 161 NewCallback(this, &GpuCommandBufferStub::OnSwapBuffers));
162 scheduler_->SetLatchCallback(base::Bind(
163 &GpuChannel::OnLatchCallback, base::Unretained(channel_), route_id_));
157 scheduler_->SetScheduledCallback( 164 scheduler_->SetScheduledCallback(
158 NewCallback(channel_, &GpuChannel::OnScheduled)); 165 NewCallback(this, &GpuCommandBufferStub::OnScheduled));
159 scheduler_->SetTokenCallback(base::Bind( 166 scheduler_->SetTokenCallback(base::Bind(
160 &GpuCommandBufferStub::OnSetToken, base::Unretained(this))); 167 &GpuCommandBufferStub::OnSetToken, base::Unretained(this)));
161 if (watchdog_) 168 if (watchdog_)
162 scheduler_->SetCommandProcessedCallback( 169 scheduler_->SetCommandProcessedCallback(
163 NewCallback(this, &GpuCommandBufferStub::OnCommandProcessed)); 170 NewCallback(this, &GpuCommandBufferStub::OnCommandProcessed));
164 171
165 #if defined(OS_MACOSX) || defined(TOUCH_UI) 172 #if defined(OS_MACOSX) || defined(TOUCH_UI)
166 if (handle_) { 173 if (handle_) {
167 // This context conceptually puts its output directly on the 174 // This context conceptually puts its output directly on the
168 // screen, rendered by the accelerated plugin layer in 175 // screen, rendered by the accelerated plugin layer in
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after
247 // it may cause other side effects to simply pass the next WaitLatch on all 254 // it may cause other side effects to simply pass the next WaitLatch on all
248 // contexts. Instead, just lose all related contexts when there's an error. 255 // contexts. Instead, just lose all related contexts when there's an error.
249 channel_->DestroySoon(); 256 channel_->DestroySoon();
250 } 257 }
251 258
252 void GpuCommandBufferStub::OnFlush(int32 put_offset, 259 void GpuCommandBufferStub::OnFlush(int32 put_offset,
253 int32 last_known_get, 260 int32 last_known_get,
254 uint32 flush_count, 261 uint32 flush_count,
255 IPC::Message* reply_message) { 262 IPC::Message* reply_message) {
256 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnFlush"); 263 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnFlush");
257 gpu::CommandBuffer::State state = command_buffer_->GetState(); 264 gpu::CommandBuffer::State state;
258 if (flush_count - last_flush_count_ >= 0x8000000U) { 265 if (flush_count - last_flush_count_ >= 0x8000000U) {
259 // We received this message out-of-order. This should not happen but is here 266 // We received this message out-of-order. This should not happen but is here
260 // to catch regressions. Ignore the message. 267 // to catch regressions. Ignore the message.
261 NOTREACHED() << "Received an AsyncFlush message out-of-order"; 268 NOTREACHED() << "Received an AsyncFlush message out-of-order";
262 GpuCommandBufferMsg_Flush::WriteReplyParams(reply_message, state); 269 state = command_buffer_->GetState();
263 Send(reply_message);
264 } else { 270 } else {
265 last_flush_count_ = flush_count; 271 last_flush_count_ = flush_count;
272 state = command_buffer_->FlushSync(put_offset, last_known_get);
273 }
274 if (state.error == gpu::error::kLostContext &&
275 gfx::GLContext::LosesAllContextsOnContextLost())
276 channel_->LoseAllContexts();
266 277
267 // Reply immediately if the client was out of date with the current get 278 GpuCommandBufferMsg_Flush::WriteReplyParams(reply_message, state);
268 // offset. 279 Send(reply_message);
269 bool reply_immediately = state.get_offset != last_known_get;
270 if (reply_immediately) {
271 GpuCommandBufferMsg_Flush::WriteReplyParams(reply_message, state);
272 Send(reply_message);
273 }
274
275 // Process everything up to the put offset.
276 state = command_buffer_->FlushSync(put_offset, last_known_get);
277
278 // Lose all contexts if the context was lost.
279 if (state.error == gpu::error::kLostContext &&
280 gfx::GLContext::LosesAllContextsOnContextLost()) {
281 channel_->LoseAllContexts();
282 }
283
284 // Then if the client was up-to-date with the get offset, reply to the
285 // synchronpous IPC only after processing all commands are processed. This
286 // prevents the client from "spinning" when it fills up the command buffer.
287 // Otherwise, since the state has changed since the immediate reply, send
288 // an asyncronous state update back to the client.
289 if (!reply_immediately) {
290 GpuCommandBufferMsg_Flush::WriteReplyParams(reply_message, state);
291 Send(reply_message);
292 } else {
293 ReportState();
294 }
295 }
296 } 280 }
297 281
298 void GpuCommandBufferStub::OnAsyncFlush(int32 put_offset, uint32 flush_count) { 282 void GpuCommandBufferStub::OnAsyncFlush(int32 put_offset, uint32 flush_count) {
299 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnAsyncFlush"); 283 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnAsyncFlush");
300 if (flush_count - last_flush_count_ < 0x8000000U) { 284 if (flush_count - last_flush_count_ < 0x8000000U) {
301 last_flush_count_ = flush_count; 285 last_flush_count_ = flush_count;
302 command_buffer_->Flush(put_offset); 286 command_buffer_->Flush(put_offset);
303 } else { 287 } else {
304 // We received this message out-of-order. This should not happen but is here 288 // We received this message out-of-order. This should not happen but is here
305 // to catch regressions. Ignore the message. 289 // to catch regressions. Ignore the message.
306 NOTREACHED() << "Received a Flush message out-of-order"; 290 NOTREACHED() << "Received a Flush message out-of-order";
307 } 291 }
308 292 // TODO(piman): Do this everytime the scheduler finishes processing a batch of
309 ReportState(); 293 // commands.
310 } 294 MessageLoop::current()->PostTask(FROM_HERE,
311 295 task_factory_.NewRunnableMethod(&GpuCommandBufferStub::ReportState));
312 void GpuCommandBufferStub::OnRescheduled() {
313 gpu::CommandBuffer::State state = command_buffer_->GetLastState();
314 command_buffer_->Flush(state.put_offset);
315
316 ReportState();
317 } 296 }
318 297
319 void GpuCommandBufferStub::OnCreateTransferBuffer(int32 size, 298 void GpuCommandBufferStub::OnCreateTransferBuffer(int32 size,
320 int32 id_request, 299 int32 id_request,
321 IPC::Message* reply_message) { 300 IPC::Message* reply_message) {
322 int32 id = command_buffer_->CreateTransferBuffer(size, id_request); 301 int32 id = command_buffer_->CreateTransferBuffer(size, id_request);
323 GpuCommandBufferMsg_CreateTransferBuffer::WriteReplyParams(reply_message, id); 302 GpuCommandBufferMsg_CreateTransferBuffer::WriteReplyParams(reply_message, id);
324 Send(reply_message); 303 Send(reply_message);
325 } 304 }
326 305
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
390 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSwapBuffers"); 369 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSwapBuffers");
391 ReportState(); 370 ReportState();
392 Send(new GpuCommandBufferMsg_SwapBuffers(route_id_)); 371 Send(new GpuCommandBufferMsg_SwapBuffers(route_id_));
393 } 372 }
394 373
395 void GpuCommandBufferStub::OnCommandProcessed() { 374 void GpuCommandBufferStub::OnCommandProcessed() {
396 if (watchdog_) 375 if (watchdog_)
397 watchdog_->CheckArmed(); 376 watchdog_->CheckArmed();
398 } 377 }
399 378
379 void GpuCommandBufferStub::HandleDeferredMessages() {
380 // Empty the deferred queue so OnMessageRecieved does not defer on that
381 // account and to prevent an infinite loop if the scheduler is unscheduled
382 // as a result of handling already deferred messages.
383 std::queue<IPC::Message*> deferred_messages_copy;
384 std::swap(deferred_messages_copy, deferred_messages_);
385
386 while (!deferred_messages_copy.empty()) {
387 scoped_ptr<IPC::Message> message(deferred_messages_copy.front());
388 deferred_messages_copy.pop();
389
390 OnMessageReceived(*message);
391 }
392 }
393
394 void GpuCommandBufferStub::OnScheduled() {
395 // Post a task to handle any deferred messages. The deferred message queue is
396 // not emptied here, which ensures that OnMessageReceived will continue to
397 // defer newly received messages until the ones in the queue have all been
398 // handled by HandleDeferredMessages. HandleDeferredMessages is invoked as a
399 // task to prevent reentrancy.
400 MessageLoop::current()->PostTask(
401 FROM_HERE,
402 task_factory_.NewRunnableMethod(
403 &GpuCommandBufferStub::HandleDeferredMessages));
404 }
405
400 #if defined(OS_MACOSX) 406 #if defined(OS_MACOSX)
401 void GpuCommandBufferStub::OnSetWindowSize(const gfx::Size& size) { 407 void GpuCommandBufferStub::OnSetWindowSize(const gfx::Size& size) {
402 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager(); 408 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
403 // Try using the IOSurface version first. 409 // Try using the IOSurface version first.
404 uint64 new_backing_store = scheduler_->SetWindowSizeForIOSurface(size); 410 uint64 new_backing_store = scheduler_->SetWindowSizeForIOSurface(size);
405 if (new_backing_store) { 411 if (new_backing_store) {
406 GpuHostMsg_AcceleratedSurfaceSetIOSurface_Params params; 412 GpuHostMsg_AcceleratedSurfaceSetIOSurface_Params params;
407 params.renderer_id = renderer_id_; 413 params.renderer_id = renderer_id_;
408 params.render_view_id = render_view_id_; 414 params.render_view_id = render_view_id_;
409 params.window = handle_; 415 params.window = handle_;
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after
477 scheduler_->set_acknowledged_swap_buffers_count(swap_buffers_count); 483 scheduler_->set_acknowledged_swap_buffers_count(swap_buffers_count);
478 484
479 for(uint64 i = 0; i < delta; i++) { 485 for(uint64 i = 0; i < delta; i++) {
480 OnSwapBuffers(); 486 OnSwapBuffers();
481 // Wake up the GpuScheduler to start doing work again. 487 // Wake up the GpuScheduler to start doing work again.
482 scheduler_->SetScheduled(true); 488 scheduler_->SetScheduled(true);
483 } 489 }
484 } 490 }
485 #endif // defined(OS_MACOSX) || defined(TOUCH_UI) 491 #endif // defined(OS_MACOSX) || defined(TOUCH_UI)
486 492
493 void GpuCommandBufferStub::CommandBufferWasDestroyed() {
494 TRACE_EVENT0("gpu", "GpuCommandBufferStub::CommandBufferWasDestroyed");
495 // In case the renderer is currently blocked waiting for a sync reply from
496 // the stub, this method allows us to cleanup and unblock pending messages.
497 if (scheduler_.get()) {
498 while (!scheduler_->IsScheduled())
499 scheduler_->SetScheduled(true);
500 }
501 // Handle any deferred messages now that the scheduler is not blocking
502 // message handling.
503 HandleDeferredMessages();
504 }
505
487 void GpuCommandBufferStub::AddSetTokenCallback( 506 void GpuCommandBufferStub::AddSetTokenCallback(
488 const base::Callback<void(int32)>& callback) { 507 const base::Callback<void(int32)>& callback) {
489 set_token_callbacks_.push_back(callback); 508 set_token_callbacks_.push_back(callback);
490 } 509 }
491 510
492 void GpuCommandBufferStub::OnSetToken(int32 token) { 511 void GpuCommandBufferStub::OnSetToken(int32 token) {
493 for (size_t i = 0; i < set_token_callbacks_.size(); ++i) 512 for (size_t i = 0; i < set_token_callbacks_.size(); ++i)
494 set_token_callbacks_[i].Run(token); 513 set_token_callbacks_[i].Run(token);
495 } 514 }
496 515
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
572 new GpuVideoDecodeAccelerator(this, route_id_, this)); 591 new GpuVideoDecodeAccelerator(this, route_id_, this));
573 video_decoder_->Initialize(configs); 592 video_decoder_->Initialize(configs);
574 } 593 }
575 594
576 void GpuCommandBufferStub::OnDestroyVideoDecoder() { 595 void GpuCommandBufferStub::OnDestroyVideoDecoder() {
577 LOG(ERROR) << "GpuCommandBufferStub::OnDestroyVideoDecoder"; 596 LOG(ERROR) << "GpuCommandBufferStub::OnDestroyVideoDecoder";
578 video_decoder_.reset(); 597 video_decoder_.reset();
579 } 598 }
580 599
581 #endif // defined(ENABLE_GPU) 600 #endif // defined(ENABLE_GPU)
OLDNEW
« no previous file with comments | « content/common/gpu/gpu_command_buffer_stub.h ('k') | content/common/gpu/gpu_messages.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698