Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(636)

Side by Side Diff: content/renderer/command_buffer_proxy.cc

Issue 6883179: Rework FlushSync to return early if commands have been processed since the last update (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: style Created 9 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « content/renderer/command_buffer_proxy.h ('k') | gpu/command_buffer/client/cmd_buffer_helper.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/renderer/command_buffer_proxy.h" 5 #include "content/renderer/command_buffer_proxy.h"
6 6
7 #include "base/logging.h" 7 #include "base/logging.h"
8 #include "base/process_util.h" 8 #include "base/process_util.h"
9 #include "base/shared_memory.h" 9 #include "base/shared_memory.h"
10 #include "base/task.h" 10 #include "base/task.h"
(...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after
133 // Return locally cached ring buffer. 133 // Return locally cached ring buffer.
134 Buffer buffer; 134 Buffer buffer;
135 buffer.ptr = ring_buffer_->memory(); 135 buffer.ptr = ring_buffer_->memory();
136 buffer.size = num_entries_ * sizeof(gpu::CommandBufferEntry); 136 buffer.size = num_entries_ * sizeof(gpu::CommandBufferEntry);
137 buffer.shared_memory = ring_buffer_.get(); 137 buffer.shared_memory = ring_buffer_.get();
138 return buffer; 138 return buffer;
139 } 139 }
140 140
141 gpu::CommandBuffer::State CommandBufferProxy::GetState() { 141 gpu::CommandBuffer::State CommandBufferProxy::GetState() {
142 // Send will flag state with lost context if IPC fails. 142 // Send will flag state with lost context if IPC fails.
143 if (last_state_.error == gpu::error::kNoError) 143 if (last_state_.error == gpu::error::kNoError) {
144 Send(new GpuCommandBufferMsg_GetState(route_id_, &last_state_)); 144 gpu::CommandBuffer::State state;
145 if (Send(new GpuCommandBufferMsg_GetState(route_id_, &state)))
146 OnUpdateState(state);
147 }
145 148
146 return last_state_; 149 return last_state_;
147 } 150 }
148 151
149 void CommandBufferProxy::Flush(int32 put_offset) { 152 void CommandBufferProxy::Flush(int32 put_offset) {
150 AsyncFlush(put_offset, NULL); 153 if (last_state_.error != gpu::error::kNoError)
154 return;
155
156 Send(new GpuCommandBufferMsg_AsyncFlush(route_id_, put_offset));
151 } 157 }
152 158
153 gpu::CommandBuffer::State CommandBufferProxy::FlushSync(int32 put_offset) { 159 gpu::CommandBuffer::State CommandBufferProxy::FlushSync(int32 put_offset,
160 int32 last_known_get) {
154 GPU_TRACE_EVENT0("gpu", "CommandBufferProxy::FlushSync"); 161 GPU_TRACE_EVENT0("gpu", "CommandBufferProxy::FlushSync");
155 // Send will flag state with lost context if IPC fails. 162 if (last_known_get == last_state_.get_offset) {
156 if (last_state_.error == gpu::error::kNoError) { 163 // Send will flag state with lost context if IPC fails.
157 Send(new GpuCommandBufferMsg_Flush(route_id_, 164 if (last_state_.error == gpu::error::kNoError) {
158 put_offset, 165 gpu::CommandBuffer::State state;
159 &last_state_)); 166 if (Send(new GpuCommandBufferMsg_Flush(route_id_,
167 put_offset,
168 last_known_get,
169 &state)))
170 OnUpdateState(state);
171 }
172 } else {
173 Flush(put_offset);
160 } 174 }
161 175
162 return last_state_; 176 return last_state_;
163 } 177 }
164 178
165 void CommandBufferProxy::SetGetOffset(int32 get_offset) { 179 void CommandBufferProxy::SetGetOffset(int32 get_offset) {
166 // Not implemented in proxy. 180 // Not implemented in proxy.
167 NOTREACHED(); 181 NOTREACHED();
168 } 182 }
169 183
(...skipping 168 matching lines...) Expand 10 before | Expand all | Expand 10 after
338 352
339 #if defined(OS_MACOSX) 353 #if defined(OS_MACOSX)
340 void CommandBufferProxy::SetWindowSize(const gfx::Size& size) { 354 void CommandBufferProxy::SetWindowSize(const gfx::Size& size) {
341 if (last_state_.error != gpu::error::kNoError) 355 if (last_state_.error != gpu::error::kNoError)
342 return; 356 return;
343 357
344 Send(new GpuCommandBufferMsg_SetWindowSize(route_id_, size)); 358 Send(new GpuCommandBufferMsg_SetWindowSize(route_id_, size));
345 } 359 }
346 #endif 360 #endif
347 361
348 void CommandBufferProxy::AsyncGetState(Task* completion_task) {
349 if (last_state_.error != gpu::error::kNoError)
350 return;
351
352 IPC::Message* message = new GpuCommandBufferMsg_AsyncGetState(route_id_);
353
354 // Do not let a synchronous flush hold up this message. If this handler is
355 // deferred until after the synchronous flush completes, it will overwrite the
356 // cached last_state_ with out-of-date data.
357 message->set_unblock(true);
358
359 if (Send(message))
360 pending_async_flush_tasks_.push(linked_ptr<Task>(completion_task));
361 }
362
363 void CommandBufferProxy::AsyncFlush(int32 put_offset, Task* completion_task) {
364 if (last_state_.error != gpu::error::kNoError)
365 return;
366
367 IPC::Message* message = new GpuCommandBufferMsg_AsyncFlush(route_id_,
368 put_offset);
369
370 // Do not let a synchronous flush hold up this message. If this handler is
371 // deferred until after the synchronous flush completes, it will overwrite the
372 // cached last_state_ with out-of-date data.
373 message->set_unblock(true);
374
375 if (Send(message))
376 pending_async_flush_tasks_.push(linked_ptr<Task>(completion_task));
377 }
378
379 bool CommandBufferProxy::Send(IPC::Message* msg) { 362 bool CommandBufferProxy::Send(IPC::Message* msg) {
380 // Caller should not intentionally send a message if the context is lost. 363 // Caller should not intentionally send a message if the context is lost.
381 DCHECK(last_state_.error == gpu::error::kNoError); 364 DCHECK(last_state_.error == gpu::error::kNoError);
382 365
383 if (channel_) { 366 if (channel_) {
384 if (channel_->Send(msg)) { 367 if (channel_->Send(msg)) {
385 return true; 368 return true;
386 } else { 369 } else {
387 // Flag the command buffer as lost. Defer deleting the channel until 370 // Flag the command buffer as lost. Defer deleting the channel until
388 // OnChannelError is called after returning to the message loop in case 371 // OnChannelError is called after returning to the message loop in case
389 // it is referenced elsewhere. 372 // it is referenced elsewhere.
390 last_state_.error = gpu::error::kLostContext; 373 last_state_.error = gpu::error::kLostContext;
391 return false; 374 return false;
392 } 375 }
393 } 376 }
394 377
395 // Callee takes ownership of message, regardless of whether Send is 378 // Callee takes ownership of message, regardless of whether Send is
396 // successful. See IPC::Message::Sender. 379 // successful. See IPC::Message::Sender.
397 delete msg; 380 delete msg;
398 return false; 381 return false;
399 } 382 }
400 383
401 void CommandBufferProxy::OnUpdateState(const gpu::CommandBuffer::State& state) { 384 void CommandBufferProxy::OnUpdateState(const gpu::CommandBuffer::State& state) {
402 last_state_ = state; 385 // Handle wraparound. It works as long as we don't have more than 2B state
403 386 // updates in flight across which reordering occurs.
404 linked_ptr<Task> task = pending_async_flush_tasks_.front(); 387 if (state.generation - last_state_.generation < 0x80000000U)
405 pending_async_flush_tasks_.pop(); 388 last_state_ = state;
406
407 if (task.get()) {
408 // Although we need need to update last_state_ while potentially waiting
409 // for a synchronous flush to complete, we do not need to invoke the
410 // callback synchonously. Also, post it as a non nestable task so it is
411 // always invoked by the outermost message loop.
412 MessageLoop::current()->PostNonNestableTask(FROM_HERE, task.release());
413 }
414 } 389 }
OLDNEW
« no previous file with comments | « content/renderer/command_buffer_proxy.h ('k') | gpu/command_buffer/client/cmd_buffer_helper.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698