Index: content/common/gpu/client/command_buffer_proxy_impl.cc |
diff --git a/content/common/gpu/client/command_buffer_proxy_impl.cc b/content/common/gpu/client/command_buffer_proxy_impl.cc |
index a0f67b3dedb55955c6cbb71f0bea71a06cc660b0..9f3497c41e4d37afe59cf2135fd5b89b30ece133 100644 |
--- a/content/common/gpu/client/command_buffer_proxy_impl.cc |
+++ b/content/common/gpu/client/command_buffer_proxy_impl.cc |
@@ -47,6 +47,8 @@ bool CommandBufferProxyImpl::OnMessageReceived(const IPC::Message& message) { |
OnSetMemoryAllocation); |
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPointAck, |
OnSignalSyncPointAck); |
+ IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SwapBuffersCompleted, |
+ OnSwapBuffersCompleted); |
IPC_MESSAGE_UNHANDLED(handled = false) |
IPC_END_MESSAGE_MAP() |
@@ -187,7 +189,9 @@ void CommandBufferProxyImpl::Flush(int32 put_offset) { |
} |
void CommandBufferProxyImpl::SetLatencyInfo( |
- const std::vector<ui::LatencyInfo>& latency_info) { |
+ const std::vector<ui::LatencyInfo>& latency_info, |
+ const SwapBuffersCompletionCallback& callback) { |
+ swap_buffers_completion_callback_ = callback; |
for (size_t i = 0; i < latency_info.size(); i++) |
latency_info_.push_back(latency_info[i]); |
} |
@@ -504,4 +508,12 @@ gpu::CommandBufferSharedState* CommandBufferProxyImpl::shared_state() const { |
shared_state_shm_->memory()); |
} |
+void CommandBufferProxyImpl::OnSwapBuffersCompleted( |
+ const std::vector<ui::LatencyInfo>& latency_info) { |
Mike West
2014/10/07 04:46:23
It doesn't look like you have any limits on the am
|
+ if (!swap_buffers_completion_callback_.is_null()) { |
+ swap_buffers_completion_callback_.Run(latency_info); |
+ swap_buffers_completion_callback_.Reset(); |
piman
2014/10/07 02:30:39
If we have more than 1 frame deep, I don't think t
no sievers
2014/10/07 19:47:24
Done.
|
+ } |
+} |
+ |
} // namespace content |