| OLD | NEW |
| 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "content/common/gpu/media/gpu_video_decode_accelerator.h" | 5 #include "content/common/gpu/media/gpu_video_decode_accelerator.h" |
| 6 | 6 |
| 7 #include <vector> | 7 #include <vector> |
| 8 | 8 |
| 9 #include "base/bind.h" | 9 #include "base/bind.h" |
| 10 #include "base/logging.h" | 10 #include "base/logging.h" |
| 11 #include "base/stl_util-inl.h" | 11 #include "base/stl_util-inl.h" |
| 12 #include "gpu/command_buffer/common/command_buffer.h" | 12 #include "gpu/command_buffer/common/command_buffer.h" |
| 13 #include "ipc/ipc_message_macros.h" | 13 #include "ipc/ipc_message_macros.h" |
| 14 #include "ipc/ipc_message_utils.h" | 14 #include "ipc/ipc_message_utils.h" |
| 15 #include "content/common/gpu/gpu_channel.h" | 15 #include "content/common/gpu/gpu_channel.h" |
| 16 #include "content/common/gpu/gpu_command_buffer_stub.h" | 16 #include "content/common/gpu/gpu_command_buffer_stub.h" |
| 17 #include "content/common/gpu/gpu_messages.h" | 17 #include "content/common/gpu/gpu_messages.h" |
| 18 #include "content/common/gpu/media/gpu_video_service.h" | 18 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL) |
| 19 #include "content/common/gpu/media/omx_video_decode_accelerator.h" |
| 20 #include "ui/gfx/gl/gl_surface_egl.h" |
| 21 #endif |
| 19 #include "ui/gfx/size.h" | 22 #include "ui/gfx/size.h" |
| 20 | 23 |
| 21 GpuVideoDecodeAccelerator::GpuVideoDecodeAccelerator( | 24 GpuVideoDecodeAccelerator::GpuVideoDecodeAccelerator( |
| 22 IPC::Message::Sender* sender, | 25 IPC::Message::Sender* sender, |
| 23 int32 host_route_id, | 26 int32 host_route_id, |
| 24 int32 decoder_route_id, | 27 int32 decoder_route_id, |
| 25 GpuCommandBufferStub* stub) | 28 GpuCommandBufferStub* stub) |
| 26 : sender_(sender), | 29 : sender_(sender), |
| 27 host_route_id_(host_route_id), | 30 host_route_id_(host_route_id), |
| 28 decoder_route_id_(decoder_route_id), | 31 decoder_route_id_(decoder_route_id), |
| 29 stub_(stub), | 32 stub_(stub), |
| 30 video_decode_accelerator_(NULL) { | 33 video_decode_accelerator_(NULL) { |
| 34 // stub_ owns and will always outlive this object. |
| 31 stub_->AddSetTokenCallback(base::Bind( | 35 stub_->AddSetTokenCallback(base::Bind( |
| 32 &GpuVideoDecodeAccelerator::OnSetToken, this)); | 36 &GpuVideoDecodeAccelerator::OnSetToken, base::Unretained(this))); |
| 33 } | 37 } |
| 34 | 38 |
| 35 GpuVideoDecodeAccelerator::~GpuVideoDecodeAccelerator() { | 39 GpuVideoDecodeAccelerator::~GpuVideoDecodeAccelerator() { |
| 36 STLDeleteElements(&deferred_messages_); | 40 STLDeleteElements(&deferred_messages_); |
| 41 // TODO(fischman/vrk): We need to synchronously wait for the OMX decoder |
| 42 // to finish shutting down. |
| 37 } | 43 } |
| 38 | 44 |
| 39 void GpuVideoDecodeAccelerator::OnSetToken(int32 token) { | 45 void GpuVideoDecodeAccelerator::OnSetToken(int32 token) { |
| 40 // Note: this always retries all deferred messages on every token arrival. | 46 // Note: this always retries all deferred messages on every token arrival. |
| 41 // There's an optimization to be done here by only trying messages which are | 47 // There's an optimization to be done here by only trying messages which are |
| 42 // waiting for tokens which are earlier than |token|. | 48 // waiting for tokens which are earlier than |token|. |
| 43 std::vector<IPC::Message*> deferred_messages_copy; | 49 std::vector<IPC::Message*> deferred_messages_copy; |
| 44 std::swap(deferred_messages_copy, deferred_messages_); | 50 std::swap(deferred_messages_copy, deferred_messages_); |
| 45 for (size_t i = 0; i < deferred_messages_copy.size(); ++i) | 51 for (size_t i = 0; i < deferred_messages_copy.size(); ++i) |
| 46 OnMessageReceived(*deferred_messages_copy[i]); | 52 OnMessageReceived(*deferred_messages_copy[i]); |
| 47 STLDeleteElements(&deferred_messages_copy); | 53 STLDeleteElements(&deferred_messages_copy); |
| 48 } | 54 } |
| 49 | 55 |
| 50 bool GpuVideoDecodeAccelerator::DeferMessageIfNeeded( | 56 bool GpuVideoDecodeAccelerator::DeferMessageIfNeeded( |
| 51 const IPC::Message& msg, bool* deferred) { | 57 const IPC::Message& msg, bool* deferred) { |
| 52 // Only consider deferring for message types that need it. | 58 // Only consider deferring for message types that need it. |
| 53 switch (msg.type()) { | 59 switch (msg.type()) { |
| 54 case AcceleratedVideoDecoderMsg_GetConfigs::ID: | 60 case AcceleratedVideoDecoderMsg_GetConfigs::ID: |
| 55 case AcceleratedVideoDecoderMsg_Initialize::ID: | 61 case AcceleratedVideoDecoderMsg_Initialize::ID: |
| 56 case AcceleratedVideoDecoderMsg_Decode::ID: | 62 case AcceleratedVideoDecoderMsg_Decode::ID: |
| 57 case AcceleratedVideoDecoderMsg_AssignTextures::ID: | 63 case AcceleratedVideoDecoderMsg_AssignGLESBuffers::ID: |
| 58 case AcceleratedVideoDecoderMsg_AssignSysmemBuffers::ID: | 64 case AcceleratedVideoDecoderMsg_AssignSysmemBuffers::ID: |
| 59 case AcceleratedVideoDecoderMsg_ReusePictureBuffer::ID: | 65 case AcceleratedVideoDecoderMsg_ReusePictureBuffer::ID: |
| 60 case AcceleratedVideoDecoderMsg_Flush::ID: | 66 case AcceleratedVideoDecoderMsg_Flush::ID: |
| 61 case AcceleratedVideoDecoderMsg_Abort::ID: | 67 case AcceleratedVideoDecoderMsg_Abort::ID: |
| 62 break; | 68 break; |
| 63 default: | 69 default: |
| 64 return false; | 70 return false; |
| 65 } | 71 } |
| 66 | 72 |
| 67 gpu::ReadWriteTokens tokens; | 73 gpu::ReadWriteTokens tokens; |
| (...skipping 14 matching lines...) Expand all Loading... |
| 82 if (!DeferMessageIfNeeded(msg, &deferred)) | 88 if (!DeferMessageIfNeeded(msg, &deferred)) |
| 83 return false; | 89 return false; |
| 84 if (deferred) | 90 if (deferred) |
| 85 return true; | 91 return true; |
| 86 | 92 |
| 87 bool handled = true; | 93 bool handled = true; |
| 88 IPC_BEGIN_MESSAGE_MAP(GpuVideoDecodeAccelerator, msg) | 94 IPC_BEGIN_MESSAGE_MAP(GpuVideoDecodeAccelerator, msg) |
| 89 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_GetConfigs, OnGetConfigs) | 95 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_GetConfigs, OnGetConfigs) |
| 90 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Initialize, OnInitialize) | 96 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Initialize, OnInitialize) |
| 91 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Decode, OnDecode) | 97 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Decode, OnDecode) |
| 92 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_AssignTextures, | 98 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_AssignGLESBuffers, |
| 93 OnAssignTextures) | 99 OnAssignGLESBuffers) |
| 94 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_AssignSysmemBuffers, | 100 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_AssignSysmemBuffers, |
| 95 OnAssignSysmemBuffers) | 101 OnAssignSysmemBuffers) |
| 96 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_ReusePictureBuffer, | 102 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_ReusePictureBuffer, |
| 97 OnReusePictureBuffer) | 103 OnReusePictureBuffer) |
| 98 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Flush, OnFlush) | 104 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Flush, OnFlush) |
| 99 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Abort, OnAbort) | 105 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Abort, OnAbort) |
| 100 IPC_MESSAGE_UNHANDLED(handled = false) | 106 IPC_MESSAGE_UNHANDLED(handled = false) |
| 101 IPC_END_MESSAGE_MAP() | 107 IPC_END_MESSAGE_MAP() |
| 102 return handled; | 108 return handled; |
| 103 } | 109 } |
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 163 // Initialize, but can't call Initialize until we have some configs! | 169 // Initialize, but can't call Initialize until we have some configs! |
| 164 if (!video_decode_accelerator_.get()) | 170 if (!video_decode_accelerator_.get()) |
| 165 return; | 171 return; |
| 166 video_decode_accelerator_->GetConfigs(requested, matched); | 172 video_decode_accelerator_->GetConfigs(requested, matched); |
| 167 } | 173 } |
| 168 | 174 |
| 169 void GpuVideoDecodeAccelerator::OnInitialize( | 175 void GpuVideoDecodeAccelerator::OnInitialize( |
| 170 const gpu::ReadWriteTokens& /* tokens */, | 176 const gpu::ReadWriteTokens& /* tokens */, |
| 171 const std::vector<uint32>& configs) { | 177 const std::vector<uint32>& configs) { |
| 172 DCHECK(!video_decode_accelerator_.get()); | 178 DCHECK(!video_decode_accelerator_.get()); |
| 173 GpuVideoService::GetInstance()->InitializeVideoDecoder(decoder_route_id_); | 179 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL) |
| 174 DCHECK(video_decode_accelerator_.get()); | 180 DCHECK(stub_ && stub_->scheduler()); |
| 181 OmxVideoDecodeAccelerator* omx_decoder = |
| 182 new OmxVideoDecodeAccelerator(this, MessageLoop::current()); |
| 183 omx_decoder->SetEglState( |
| 184 gfx::GLSurfaceEGL::GetDisplay(), |
| 185 stub_->scheduler()->decoder()->GetGLContext()->GetHandle()); |
| 186 video_decode_accelerator_.reset(omx_decoder); |
| 175 video_decode_accelerator_->Initialize(configs); | 187 video_decode_accelerator_->Initialize(configs); |
| 188 #else |
| 189 NOTIMPLEMENTED() << "HW video decode acceleration not available."; |
| 190 #endif // defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL) |
| 176 } | 191 } |
| 177 | 192 |
| 178 void GpuVideoDecodeAccelerator::OnDecode( | 193 void GpuVideoDecodeAccelerator::OnDecode( |
| 179 const gpu::ReadWriteTokens&, /* tokens */ | 194 const gpu::ReadWriteTokens&, /* tokens */ |
| 180 base::SharedMemoryHandle handle, int32 id, int32 size) { | 195 base::SharedMemoryHandle handle, int32 id, int32 size) { |
| 181 DCHECK(video_decode_accelerator_.get()); | 196 DCHECK(video_decode_accelerator_.get()); |
| 182 video_decode_accelerator_->Decode(media::BitstreamBuffer(id, handle, size)); | 197 video_decode_accelerator_->Decode(media::BitstreamBuffer(id, handle, size)); |
| 183 } | 198 } |
| 184 | 199 |
| 185 void GpuVideoDecodeAccelerator::AssignGLESBuffers( | 200 void GpuVideoDecodeAccelerator::OnAssignGLESBuffers( |
| 186 const std::vector<media::GLESBuffer>& buffers) { | 201 const gpu::ReadWriteTokens& /* tokens */, |
| 187 // TODO(fischman,vrk): it's wonky that we handle the AssignTextures message by | 202 const std::vector<int32>& buffer_ids, |
| 188 // handing its contents to GpuVideoService which then turns around and calls | 203 const std::vector<uint32>& texture_ids, |
| 189 // this (public) method. Instead we should make GpuVideoService vend the | 204 const std::vector<gfx::Size>& sizes) { |
| 190 // translation method we need and use it directly. | 205 DCHECK(stub_ && stub_->scheduler()); // Ensure already Initialize()'d. |
| 191 DCHECK(video_decode_accelerator_.get()); | 206 gpu::gles2::GLES2Decoder* command_decoder = stub_->scheduler()->decoder(); |
| 207 |
| 208 std::vector<media::GLESBuffer> buffers; |
| 209 for (uint32 i = 0; i < buffer_ids.size(); ++i) { |
| 210 uint32 service_texture_id; |
| 211 if (!command_decoder->GetServiceTextureId( |
| 212 texture_ids[i], &service_texture_id)) { |
| 213 // TODO(vrk): Send an error for invalid GLES buffers. |
| 214 LOG(DFATAL) << "Failed to translate texture!"; |
| 215 return; |
| 216 } |
| 217 buffers.push_back(media::GLESBuffer( |
| 218 buffer_ids[i], sizes[i], service_texture_id)); |
| 219 } |
| 192 video_decode_accelerator_->AssignGLESBuffers(buffers); | 220 video_decode_accelerator_->AssignGLESBuffers(buffers); |
| 193 } | 221 } |
| 194 | 222 |
| 195 void GpuVideoDecodeAccelerator::OnAssignTextures( | |
| 196 const gpu::ReadWriteTokens& /* tokens */, | |
| 197 const std::vector<int32>& buffer_ids, | |
| 198 const std::vector<uint32>& texture_ids, | |
| 199 const std::vector<gfx::Size>& sizes) { | |
| 200 GpuVideoService* service = GpuVideoService::GetInstance(); | |
| 201 service->AssignTexturesToDecoder( | |
| 202 decoder_route_id_, buffer_ids, texture_ids, sizes); | |
| 203 } | |
| 204 | |
| 205 void GpuVideoDecodeAccelerator::OnAssignSysmemBuffers( | 223 void GpuVideoDecodeAccelerator::OnAssignSysmemBuffers( |
| 206 const gpu::ReadWriteTokens& /* tokens */, | 224 const gpu::ReadWriteTokens& /* tokens */, |
| 207 const std::vector<int32> buffer_ids, | 225 const std::vector<int32> buffer_ids, |
| 208 const std::vector<base::SharedMemoryHandle> data, | 226 const std::vector<base::SharedMemoryHandle> data, |
| 209 const std::vector<gfx::Size> sizes) { | 227 const std::vector<gfx::Size> sizes) { |
| 210 // TODO(vrk): Implement. | 228 // TODO(vrk): Implement. |
| 211 NOTIMPLEMENTED(); | 229 NOTIMPLEMENTED(); |
| 212 } | 230 } |
| 213 | 231 |
| 214 void GpuVideoDecodeAccelerator::OnReusePictureBuffer( | 232 void GpuVideoDecodeAccelerator::OnReusePictureBuffer( |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 252 | 270 |
| 253 void GpuVideoDecodeAccelerator::NotifyAbortDone() { | 271 void GpuVideoDecodeAccelerator::NotifyAbortDone() { |
| 254 if (!Send(new AcceleratedVideoDecoderHostMsg_AbortDone(host_route_id_))) | 272 if (!Send(new AcceleratedVideoDecoderHostMsg_AbortDone(host_route_id_))) |
| 255 LOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_AbortDone) failed"; | 273 LOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_AbortDone) failed"; |
| 256 } | 274 } |
| 257 | 275 |
| 258 bool GpuVideoDecodeAccelerator::Send(IPC::Message* message) { | 276 bool GpuVideoDecodeAccelerator::Send(IPC::Message* message) { |
| 259 DCHECK(sender_); | 277 DCHECK(sender_); |
| 260 return sender_->Send(message); | 278 return sender_->Send(message); |
| 261 } | 279 } |
| OLD | NEW |