| OLD | NEW |
| (Empty) |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "content/renderer/gpu/command_buffer_proxy.h" | |
| 6 | |
| 7 #include "base/callback.h" | |
| 8 #include "base/debug/trace_event.h" | |
| 9 #include "base/logging.h" | |
| 10 #include "base/process_util.h" | |
| 11 #include "base/shared_memory.h" | |
| 12 #include "base/stl_util.h" | |
| 13 #include "content/common/child_process_messages.h" | |
| 14 #include "content/common/child_thread.h" | |
| 15 #include "content/common/gpu/gpu_messages.h" | |
| 16 #include "content/common/plugin_messages.h" | |
| 17 #include "content/common/view_messages.h" | |
| 18 #include "content/renderer/gpu/gpu_channel_host.h" | |
| 19 #include "content/renderer/plugin_channel_host.h" | |
| 20 #include "gpu/command_buffer/common/cmd_buffer_common.h" | |
| 21 #include "ui/gfx/size.h" | |
| 22 | |
| 23 using gpu::Buffer; | |
| 24 | |
| 25 CommandBufferProxy::CommandBufferProxy( | |
| 26 GpuChannelHost* channel, | |
| 27 int route_id) | |
| 28 : channel_(channel), | |
| 29 route_id_(route_id), | |
| 30 flush_count_(0) { | |
| 31 } | |
| 32 | |
| 33 CommandBufferProxy::~CommandBufferProxy() { | |
| 34 // Delete all the locally cached shared memory objects, closing the handle | |
| 35 // in this process. | |
| 36 for (TransferBufferMap::iterator it = transfer_buffers_.begin(); | |
| 37 it != transfer_buffers_.end(); | |
| 38 ++it) { | |
| 39 delete it->second.shared_memory; | |
| 40 it->second.shared_memory = NULL; | |
| 41 } | |
| 42 } | |
| 43 | |
| 44 bool CommandBufferProxy::OnMessageReceived(const IPC::Message& message) { | |
| 45 bool handled = true; | |
| 46 IPC_BEGIN_MESSAGE_MAP(CommandBufferProxy, message) | |
| 47 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_UpdateState, OnUpdateState); | |
| 48 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Destroyed, OnDestroyed); | |
| 49 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_NotifyRepaint, | |
| 50 OnNotifyRepaint); | |
| 51 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_EchoAck, OnEchoAck); | |
| 52 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ConsoleMsg, OnConsoleMessage); | |
| 53 IPC_MESSAGE_UNHANDLED(handled = false) | |
| 54 IPC_END_MESSAGE_MAP() | |
| 55 | |
| 56 DCHECK(handled); | |
| 57 return handled; | |
| 58 } | |
| 59 | |
| 60 void CommandBufferProxy::OnChannelError() { | |
| 61 for (Decoders::iterator it = video_decoder_hosts_.begin(); | |
| 62 it != video_decoder_hosts_.end(); ++it) { | |
| 63 it->second->OnChannelError(); | |
| 64 } | |
| 65 OnDestroyed(gpu::error::kUnknown); | |
| 66 } | |
| 67 | |
| 68 void CommandBufferProxy::OnDestroyed(gpu::error::ContextLostReason reason) { | |
| 69 // Prevent any further messages from being sent. | |
| 70 channel_ = NULL; | |
| 71 | |
| 72 // When the client sees that the context is lost, they should delete this | |
| 73 // CommandBufferProxy and create a new one. | |
| 74 last_state_.error = gpu::error::kLostContext; | |
| 75 last_state_.context_lost_reason = reason; | |
| 76 | |
| 77 if (!channel_error_callback_.is_null()) { | |
| 78 channel_error_callback_.Run(); | |
| 79 // Avoid calling the error callback more than once. | |
| 80 channel_error_callback_.Reset(); | |
| 81 } | |
| 82 } | |
| 83 | |
| 84 void CommandBufferProxy::OnEchoAck() { | |
| 85 DCHECK(!echo_tasks_.empty()); | |
| 86 base::Closure callback = echo_tasks_.front(); | |
| 87 echo_tasks_.pop(); | |
| 88 callback.Run(); | |
| 89 } | |
| 90 | |
| 91 void CommandBufferProxy::OnConsoleMessage( | |
| 92 const GPUCommandBufferConsoleMessage& message) { | |
| 93 // TODO(gman): Pass this on to the console. | |
| 94 DLOG(INFO) << "CONSOLE_MESSAGE: " | |
| 95 << message.id << " : " << message.message; | |
| 96 } | |
| 97 | |
| 98 void CommandBufferProxy::SetChannelErrorCallback( | |
| 99 const base::Closure& callback) { | |
| 100 channel_error_callback_ = callback; | |
| 101 } | |
| 102 | |
| 103 bool CommandBufferProxy::Initialize() { | |
| 104 if (!channel_->factory()->IsMainThread()) | |
| 105 return false; | |
| 106 | |
| 107 bool result; | |
| 108 if (!Send(new GpuCommandBufferMsg_Initialize(route_id_, &result))) { | |
| 109 LOG(ERROR) << "Could not send GpuCommandBufferMsg_Initialize."; | |
| 110 return false; | |
| 111 } | |
| 112 | |
| 113 if (!result) { | |
| 114 LOG(ERROR) << "Failed to initialize command buffer service."; | |
| 115 return false; | |
| 116 } | |
| 117 | |
| 118 return true; | |
| 119 } | |
| 120 | |
| 121 gpu::CommandBuffer::State CommandBufferProxy::GetState() { | |
| 122 // Send will flag state with lost context if IPC fails. | |
| 123 if (last_state_.error == gpu::error::kNoError) { | |
| 124 gpu::CommandBuffer::State state; | |
| 125 if (Send(new GpuCommandBufferMsg_GetState(route_id_, &state))) | |
| 126 OnUpdateState(state); | |
| 127 } | |
| 128 | |
| 129 return last_state_; | |
| 130 } | |
| 131 | |
| 132 gpu::CommandBuffer::State CommandBufferProxy::GetLastState() { | |
| 133 return last_state_; | |
| 134 } | |
| 135 | |
| 136 void CommandBufferProxy::Flush(int32 put_offset) { | |
| 137 if (last_state_.error != gpu::error::kNoError) | |
| 138 return; | |
| 139 | |
| 140 TRACE_EVENT1("gpu", "CommandBufferProxy::Flush", "put_offset", put_offset); | |
| 141 | |
| 142 Send(new GpuCommandBufferMsg_AsyncFlush(route_id_, | |
| 143 put_offset, | |
| 144 ++flush_count_)); | |
| 145 } | |
| 146 | |
| 147 gpu::CommandBuffer::State CommandBufferProxy::FlushSync(int32 put_offset, | |
| 148 int32 last_known_get) { | |
| 149 TRACE_EVENT1("gpu", "CommandBufferProxy::FlushSync", "put_offset", | |
| 150 put_offset); | |
| 151 Flush(put_offset); | |
| 152 if (last_known_get == last_state_.get_offset) { | |
| 153 // Send will flag state with lost context if IPC fails. | |
| 154 if (last_state_.error == gpu::error::kNoError) { | |
| 155 gpu::CommandBuffer::State state; | |
| 156 if (Send(new GpuCommandBufferMsg_GetStateFast(route_id_, | |
| 157 &state))) | |
| 158 OnUpdateState(state); | |
| 159 } | |
| 160 } | |
| 161 | |
| 162 return last_state_; | |
| 163 } | |
| 164 | |
| 165 void CommandBufferProxy::SetGetBuffer(int32 shm_id) { | |
| 166 if (last_state_.error != gpu::error::kNoError) | |
| 167 return; | |
| 168 | |
| 169 Send(new GpuCommandBufferMsg_SetGetBuffer(route_id_, shm_id)); | |
| 170 } | |
| 171 | |
| 172 void CommandBufferProxy::SetGetOffset(int32 get_offset) { | |
| 173 // Not implemented in proxy. | |
| 174 NOTREACHED(); | |
| 175 } | |
| 176 | |
| 177 int32 CommandBufferProxy::CreateTransferBuffer(size_t size, int32 id_request) { | |
| 178 if (last_state_.error != gpu::error::kNoError) | |
| 179 return -1; | |
| 180 | |
| 181 // Take ownership of shared memory. This will close the handle if Send below | |
| 182 // fails. Otherwise, callee takes ownership before this variable | |
| 183 // goes out of scope by duping the handle. | |
| 184 scoped_ptr<base::SharedMemory> shm( | |
| 185 channel_->factory()->AllocateSharedMemory(size)); | |
| 186 if (!shm.get()) | |
| 187 return -1; | |
| 188 | |
| 189 base::SharedMemoryHandle handle = shm->handle(); | |
| 190 #if defined(OS_POSIX) | |
| 191 DCHECK(!handle.auto_close); | |
| 192 #endif | |
| 193 | |
| 194 int32 id; | |
| 195 if (!Send(new GpuCommandBufferMsg_RegisterTransferBuffer(route_id_, | |
| 196 handle, | |
| 197 size, | |
| 198 id_request, | |
| 199 &id))) { | |
| 200 return -1; | |
| 201 } | |
| 202 | |
| 203 return id; | |
| 204 } | |
| 205 | |
| 206 int32 CommandBufferProxy::RegisterTransferBuffer( | |
| 207 base::SharedMemory* shared_memory, | |
| 208 size_t size, | |
| 209 int32 id_request) { | |
| 210 if (last_state_.error != gpu::error::kNoError) | |
| 211 return -1; | |
| 212 | |
| 213 int32 id; | |
| 214 if (!Send(new GpuCommandBufferMsg_RegisterTransferBuffer( | |
| 215 route_id_, | |
| 216 shared_memory->handle(), // Returns FileDescriptor with auto_close off. | |
| 217 size, | |
| 218 id_request, | |
| 219 &id))) { | |
| 220 return -1; | |
| 221 } | |
| 222 | |
| 223 return id; | |
| 224 } | |
| 225 | |
| 226 void CommandBufferProxy::DestroyTransferBuffer(int32 id) { | |
| 227 if (last_state_.error != gpu::error::kNoError) | |
| 228 return; | |
| 229 | |
| 230 // Remove the transfer buffer from the client side cache. | |
| 231 TransferBufferMap::iterator it = transfer_buffers_.find(id); | |
| 232 if (it != transfer_buffers_.end()) { | |
| 233 delete it->second.shared_memory; | |
| 234 transfer_buffers_.erase(it); | |
| 235 } | |
| 236 | |
| 237 Send(new GpuCommandBufferMsg_DestroyTransferBuffer(route_id_, id)); | |
| 238 } | |
| 239 | |
| 240 Buffer CommandBufferProxy::GetTransferBuffer(int32 id) { | |
| 241 if (last_state_.error != gpu::error::kNoError) | |
| 242 return Buffer(); | |
| 243 | |
| 244 // Check local cache to see if there is already a client side shared memory | |
| 245 // object for this id. | |
| 246 TransferBufferMap::iterator it = transfer_buffers_.find(id); | |
| 247 if (it != transfer_buffers_.end()) { | |
| 248 return it->second; | |
| 249 } | |
| 250 | |
| 251 // Assuming we are in the renderer process, the service is responsible for | |
| 252 // duplicating the handle. This might not be true for NaCl. | |
| 253 base::SharedMemoryHandle handle; | |
| 254 uint32 size; | |
| 255 if (!Send(new GpuCommandBufferMsg_GetTransferBuffer(route_id_, | |
| 256 id, | |
| 257 &handle, | |
| 258 &size))) { | |
| 259 return Buffer(); | |
| 260 } | |
| 261 | |
| 262 // Cache the transfer buffer shared memory object client side. | |
| 263 base::SharedMemory* shared_memory = new base::SharedMemory(handle, false); | |
| 264 | |
| 265 // Map the shared memory on demand. | |
| 266 if (!shared_memory->memory()) { | |
| 267 if (!shared_memory->Map(size)) { | |
| 268 delete shared_memory; | |
| 269 return Buffer(); | |
| 270 } | |
| 271 } | |
| 272 | |
| 273 Buffer buffer; | |
| 274 buffer.ptr = shared_memory->memory(); | |
| 275 buffer.size = size; | |
| 276 buffer.shared_memory = shared_memory; | |
| 277 transfer_buffers_[id] = buffer; | |
| 278 | |
| 279 return buffer; | |
| 280 } | |
| 281 | |
| 282 void CommandBufferProxy::SetToken(int32 token) { | |
| 283 // Not implemented in proxy. | |
| 284 NOTREACHED(); | |
| 285 } | |
| 286 | |
| 287 void CommandBufferProxy::OnNotifyRepaint() { | |
| 288 if (!notify_repaint_task_.is_null()) | |
| 289 MessageLoop::current()->PostNonNestableTask( | |
| 290 FROM_HERE, notify_repaint_task_); | |
| 291 notify_repaint_task_.Reset(); | |
| 292 } | |
| 293 | |
| 294 void CommandBufferProxy::SetParseError( | |
| 295 gpu::error::Error error) { | |
| 296 // Not implemented in proxy. | |
| 297 NOTREACHED(); | |
| 298 } | |
| 299 | |
| 300 void CommandBufferProxy::SetContextLostReason( | |
| 301 gpu::error::ContextLostReason reason) { | |
| 302 // Not implemented in proxy. | |
| 303 NOTREACHED(); | |
| 304 } | |
| 305 | |
| 306 bool CommandBufferProxy::Echo(const base::Closure& callback) { | |
| 307 if (last_state_.error != gpu::error::kNoError) { | |
| 308 return false; | |
| 309 } | |
| 310 | |
| 311 if (!Send(new GpuChannelMsg_Echo(GpuCommandBufferMsg_EchoAck(route_id_)))) { | |
| 312 return false; | |
| 313 } | |
| 314 | |
| 315 echo_tasks_.push(callback); | |
| 316 | |
| 317 return true; | |
| 318 } | |
| 319 | |
| 320 bool CommandBufferProxy::SetSurfaceVisible(bool visible) { | |
| 321 if (last_state_.error != gpu::error::kNoError) { | |
| 322 return false; | |
| 323 } | |
| 324 | |
| 325 return Send(new GpuCommandBufferMsg_SetSurfaceVisible(route_id_, visible)); | |
| 326 } | |
| 327 | |
| 328 | |
| 329 bool CommandBufferProxy::SetParent(CommandBufferProxy* parent_command_buffer, | |
| 330 uint32 parent_texture_id) { | |
| 331 if (last_state_.error != gpu::error::kNoError) | |
| 332 return false; | |
| 333 | |
| 334 bool result; | |
| 335 if (parent_command_buffer) { | |
| 336 if (!Send(new GpuCommandBufferMsg_SetParent( | |
| 337 route_id_, | |
| 338 parent_command_buffer->route_id_, | |
| 339 parent_texture_id, | |
| 340 &result))) { | |
| 341 return false; | |
| 342 } | |
| 343 } else { | |
| 344 if (!Send(new GpuCommandBufferMsg_SetParent( | |
| 345 route_id_, | |
| 346 MSG_ROUTING_NONE, | |
| 347 0, | |
| 348 &result))) { | |
| 349 return false; | |
| 350 } | |
| 351 } | |
| 352 | |
| 353 return result; | |
| 354 } | |
| 355 | |
| 356 void CommandBufferProxy::SetNotifyRepaintTask(const base::Closure& task) { | |
| 357 notify_repaint_task_ = task; | |
| 358 } | |
| 359 | |
| 360 scoped_refptr<GpuVideoDecodeAcceleratorHost> | |
| 361 CommandBufferProxy::CreateVideoDecoder( | |
| 362 media::VideoDecodeAccelerator::Profile profile, | |
| 363 media::VideoDecodeAccelerator::Client* client) { | |
| 364 int decoder_route_id; | |
| 365 if (!Send(new GpuCommandBufferMsg_CreateVideoDecoder(route_id_, profile, | |
| 366 &decoder_route_id))) { | |
| 367 LOG(ERROR) << "Send(GpuCommandBufferMsg_CreateVideoDecoder) failed"; | |
| 368 return NULL; | |
| 369 } | |
| 370 | |
| 371 scoped_refptr<GpuVideoDecodeAcceleratorHost> decoder_host = | |
| 372 new GpuVideoDecodeAcceleratorHost(channel_, decoder_route_id, client); | |
| 373 bool inserted = video_decoder_hosts_.insert(std::make_pair( | |
| 374 decoder_route_id, decoder_host)).second; | |
| 375 DCHECK(inserted); | |
| 376 | |
| 377 channel_->AddRoute(decoder_route_id, decoder_host->AsWeakPtr()); | |
| 378 | |
| 379 return decoder_host; | |
| 380 } | |
| 381 | |
| 382 gpu::error::Error CommandBufferProxy::GetLastError() { | |
| 383 return last_state_.error; | |
| 384 } | |
| 385 | |
| 386 bool CommandBufferProxy::Send(IPC::Message* msg) { | |
| 387 // Caller should not intentionally send a message if the context is lost. | |
| 388 DCHECK(last_state_.error == gpu::error::kNoError); | |
| 389 | |
| 390 if (channel_) { | |
| 391 if (channel_->Send(msg)) { | |
| 392 return true; | |
| 393 } else { | |
| 394 // Flag the command buffer as lost. Defer deleting the channel until | |
| 395 // OnChannelError is called after returning to the message loop in case | |
| 396 // it is referenced elsewhere. | |
| 397 last_state_.error = gpu::error::kLostContext; | |
| 398 return false; | |
| 399 } | |
| 400 } | |
| 401 | |
| 402 // Callee takes ownership of message, regardless of whether Send is | |
| 403 // successful. See IPC::Message::Sender. | |
| 404 delete msg; | |
| 405 return false; | |
| 406 } | |
| 407 | |
| 408 void CommandBufferProxy::OnUpdateState(const gpu::CommandBuffer::State& state) { | |
| 409 // Handle wraparound. It works as long as we don't have more than 2B state | |
| 410 // updates in flight across which reordering occurs. | |
| 411 if (state.generation - last_state_.generation < 0x80000000U) | |
| 412 last_state_ = state; | |
| 413 } | |
| OLD | NEW |