| OLD | NEW |
| 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "content/renderer/gpu/command_buffer_proxy.h" | 5 #include "content/renderer/gpu/command_buffer_proxy.h" |
| 6 | 6 |
| 7 #include "base/debug/trace_event.h" | 7 #include "base/debug/trace_event.h" |
| 8 #include "base/logging.h" | 8 #include "base/logging.h" |
| 9 #include "base/process_util.h" | 9 #include "base/process_util.h" |
| 10 #include "base/shared_memory.h" | 10 #include "base/shared_memory.h" |
| 11 #include "base/stl_util.h" | 11 #include "base/stl_util.h" |
| 12 #include "base/task.h" | 12 #include "base/task.h" |
| 13 #include "content/common/child_process_messages.h" | 13 #include "content/common/child_process_messages.h" |
| 14 #include "content/common/child_thread.h" | 14 #include "content/common/child_thread.h" |
| 15 #include "content/common/gpu/gpu_messages.h" | 15 #include "content/common/gpu/gpu_messages.h" |
| 16 #include "content/common/plugin_messages.h" | 16 #include "content/common/plugin_messages.h" |
| 17 #include "content/common/view_messages.h" | 17 #include "content/common/view_messages.h" |
| 18 #include "content/renderer/gpu/gpu_channel_host.h" | 18 #include "content/renderer/gpu/gpu_channel_host.h" |
| 19 #include "content/renderer/plugin_channel_host.h" | 19 #include "content/renderer/plugin_channel_host.h" |
| 20 #include "gpu/command_buffer/common/cmd_buffer_common.h" | 20 #include "gpu/command_buffer/common/cmd_buffer_common.h" |
| 21 #include "ui/gfx/size.h" | 21 #include "ui/gfx/size.h" |
| 22 | 22 |
| 23 using gpu::Buffer; | 23 using gpu::Buffer; |
| 24 | 24 |
| 25 CommandBufferProxy::CommandBufferProxy( | 25 CommandBufferProxy::CommandBufferProxy( |
| 26 GpuChannelHost* channel, | 26 GpuChannelHost* channel, |
| 27 int route_id) | 27 int route_id) |
| 28 : num_entries_(0), | 28 : channel_(channel), |
| 29 channel_(channel), | |
| 30 route_id_(route_id), | 29 route_id_(route_id), |
| 31 flush_count_(0) { | 30 flush_count_(0) { |
| 32 } | 31 } |
| 33 | 32 |
| 34 CommandBufferProxy::~CommandBufferProxy() { | 33 CommandBufferProxy::~CommandBufferProxy() { |
| 35 // Delete all the locally cached shared memory objects, closing the handle | 34 // Delete all the locally cached shared memory objects, closing the handle |
| 36 // in this process. | 35 // in this process. |
| 37 for (TransferBufferMap::iterator it = transfer_buffers_.begin(); | 36 for (TransferBufferMap::iterator it = transfer_buffers_.begin(); |
| 38 it != transfer_buffers_.end(); | 37 it != transfer_buffers_.end(); |
| 39 ++it) { | 38 ++it) { |
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 86 base::Closure callback = echo_tasks_.front(); | 85 base::Closure callback = echo_tasks_.front(); |
| 87 echo_tasks_.pop(); | 86 echo_tasks_.pop(); |
| 88 callback.Run(); | 87 callback.Run(); |
| 89 } | 88 } |
| 90 | 89 |
| 91 void CommandBufferProxy::SetChannelErrorCallback( | 90 void CommandBufferProxy::SetChannelErrorCallback( |
| 92 const base::Closure& callback) { | 91 const base::Closure& callback) { |
| 93 channel_error_callback_ = callback; | 92 channel_error_callback_ = callback; |
| 94 } | 93 } |
| 95 | 94 |
| 96 bool CommandBufferProxy::Initialize(int32 size) { | 95 bool CommandBufferProxy::Initialize() { |
| 97 DCHECK(!ring_buffer_.get()); | |
| 98 | |
| 99 ChildThread* child_thread = ChildThread::current(); | 96 ChildThread* child_thread = ChildThread::current(); |
| 100 if (!child_thread) | 97 if (!child_thread) |
| 101 return false; | 98 return false; |
| 102 | 99 |
| 103 base::SharedMemoryHandle handle; | |
| 104 if (!child_thread->Send(new ChildProcessHostMsg_SyncAllocateSharedMemory( | |
| 105 size, | |
| 106 &handle))) { | |
| 107 return false; | |
| 108 } | |
| 109 | |
| 110 if (!base::SharedMemory::IsHandleValid(handle)) | |
| 111 return false; | |
| 112 | |
| 113 #if defined(OS_POSIX) | |
| 114 handle.auto_close = false; | |
| 115 #endif | |
| 116 | |
| 117 // Take ownership of shared memory. This will close the handle if Send below | |
| 118 // fails. Otherwise, callee takes ownership before this variable | |
| 119 // goes out of scope. | |
| 120 base::SharedMemory shared_memory(handle, false); | |
| 121 | |
| 122 return Initialize(&shared_memory, size); | |
| 123 } | |
| 124 | |
| 125 bool CommandBufferProxy::Initialize(base::SharedMemory* buffer, int32 size) { | |
| 126 bool result; | 100 bool result; |
| 127 if (!Send(new GpuCommandBufferMsg_Initialize(route_id_, | 101 if (!Send(new GpuCommandBufferMsg_Initialize(route_id_, &result))) { |
| 128 buffer->handle(), | |
| 129 size, | |
| 130 &result))) { | |
| 131 LOG(ERROR) << "Could not send GpuCommandBufferMsg_Initialize."; | 102 LOG(ERROR) << "Could not send GpuCommandBufferMsg_Initialize."; |
| 132 return false; | 103 return false; |
| 133 } | 104 } |
| 134 | 105 |
| 135 if (!result) { | 106 if (!result) { |
| 136 LOG(ERROR) << "Failed to initialize command buffer service."; | 107 LOG(ERROR) << "Failed to initialize command buffer service."; |
| 137 return false; | 108 return false; |
| 138 } | 109 } |
| 139 | 110 |
| 140 base::SharedMemoryHandle handle; | |
| 141 if (!buffer->GiveToProcess(base::GetCurrentProcessHandle(), &handle)) { | |
| 142 LOG(ERROR) << "Failed to duplicate command buffer handle."; | |
| 143 return false; | |
| 144 } | |
| 145 | |
| 146 ring_buffer_.reset(new base::SharedMemory(handle, false)); | |
| 147 if (!ring_buffer_->Map(size)) { | |
| 148 LOG(ERROR) << "Failed to map shared memory for command buffer."; | |
| 149 ring_buffer_.reset(); | |
| 150 return false; | |
| 151 } | |
| 152 | |
| 153 num_entries_ = size / sizeof(gpu::CommandBufferEntry); | |
| 154 return true; | 111 return true; |
| 155 } | 112 } |
| 156 | 113 |
| 157 Buffer CommandBufferProxy::GetRingBuffer() { | |
| 158 DCHECK(ring_buffer_.get()); | |
| 159 // Return locally cached ring buffer. | |
| 160 Buffer buffer; | |
| 161 if (ring_buffer_.get()) { | |
| 162 buffer.ptr = ring_buffer_->memory(); | |
| 163 buffer.size = num_entries_ * sizeof(gpu::CommandBufferEntry); | |
| 164 buffer.shared_memory = ring_buffer_.get(); | |
| 165 } else { | |
| 166 buffer.ptr = NULL; | |
| 167 buffer.size = 0; | |
| 168 buffer.shared_memory = NULL; | |
| 169 } | |
| 170 return buffer; | |
| 171 } | |
| 172 | |
| 173 gpu::CommandBuffer::State CommandBufferProxy::GetState() { | 114 gpu::CommandBuffer::State CommandBufferProxy::GetState() { |
| 174 // Send will flag state with lost context if IPC fails. | 115 // Send will flag state with lost context if IPC fails. |
| 175 if (last_state_.error == gpu::error::kNoError) { | 116 if (last_state_.error == gpu::error::kNoError) { |
| 176 gpu::CommandBuffer::State state; | 117 gpu::CommandBuffer::State state; |
| 177 if (Send(new GpuCommandBufferMsg_GetState(route_id_, &state))) | 118 if (Send(new GpuCommandBufferMsg_GetState(route_id_, &state))) |
| 178 OnUpdateState(state); | 119 OnUpdateState(state); |
| 179 } | 120 } |
| 180 | 121 |
| 181 return last_state_; | 122 return last_state_; |
| 182 } | 123 } |
| (...skipping 24 matching lines...) Expand all Loading... |
| 207 gpu::CommandBuffer::State state; | 148 gpu::CommandBuffer::State state; |
| 208 if (Send(new GpuCommandBufferMsg_GetStateFast(route_id_, | 149 if (Send(new GpuCommandBufferMsg_GetStateFast(route_id_, |
| 209 &state))) | 150 &state))) |
| 210 OnUpdateState(state); | 151 OnUpdateState(state); |
| 211 } | 152 } |
| 212 } | 153 } |
| 213 | 154 |
| 214 return last_state_; | 155 return last_state_; |
| 215 } | 156 } |
| 216 | 157 |
| 158 void CommandBufferProxy::SetGetBuffer(int32 shm_id) { |
| 159 if (last_state_.error != gpu::error::kNoError) |
| 160 return; |
| 161 |
| 162 Send(new GpuCommandBufferMsg_SetGetBuffer(route_id_, shm_id)); |
| 163 } |
| 164 |
| 217 void CommandBufferProxy::SetGetOffset(int32 get_offset) { | 165 void CommandBufferProxy::SetGetOffset(int32 get_offset) { |
| 218 // Not implemented in proxy. | 166 // Not implemented in proxy. |
| 219 NOTREACHED(); | 167 NOTREACHED(); |
| 220 } | 168 } |
| 221 | 169 |
| 222 int32 CommandBufferProxy::CreateTransferBuffer(size_t size, int32 id_request) { | 170 int32 CommandBufferProxy::CreateTransferBuffer(size_t size, int32 id_request) { |
| 223 if (last_state_.error != gpu::error::kNoError) | 171 if (last_state_.error != gpu::error::kNoError) |
| 224 return -1; | 172 return -1; |
| 225 | 173 |
| 226 ChildThread* child_thread = ChildThread::current(); | 174 ChildThread* child_thread = ChildThread::current(); |
| (...skipping 230 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 457 delete msg; | 405 delete msg; |
| 458 return false; | 406 return false; |
| 459 } | 407 } |
| 460 | 408 |
| 461 void CommandBufferProxy::OnUpdateState(const gpu::CommandBuffer::State& state) { | 409 void CommandBufferProxy::OnUpdateState(const gpu::CommandBuffer::State& state) { |
| 462 // Handle wraparound. It works as long as we don't have more than 2B state | 410 // Handle wraparound. It works as long as we don't have more than 2B state |
| 463 // updates in flight across which reordering occurs. | 411 // updates in flight across which reordering occurs. |
| 464 if (state.generation - last_state_.generation < 0x80000000U) | 412 if (state.generation - last_state_.generation < 0x80000000U) |
| 465 last_state_ = state; | 413 last_state_ = state; |
| 466 } | 414 } |
| OLD | NEW |