Chromium Code Reviews| Index: content/common/gpu/media/gpu_jpeg_decode_accelerator.cc |
| diff --git a/content/common/gpu/media/gpu_jpeg_decode_accelerator.cc b/content/common/gpu/media/gpu_jpeg_decode_accelerator.cc |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..62e9a4ec71458079546b6f5237ef89dc0156f746 |
| --- /dev/null |
| +++ b/content/common/gpu/media/gpu_jpeg_decode_accelerator.cc |
| @@ -0,0 +1,241 @@ |
| +// Copyright 2015 The Chromium Authors. All rights reserved. |
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +#include "content/common/gpu/media/gpu_jpeg_decode_accelerator.h" |
| + |
| +#include <stdint.h> |
| + |
| +#include "base/bind.h" |
| +#include "base/logging.h" |
| +#include "base/memory/shared_memory.h" |
| +#include "base/single_thread_task_runner.h" |
| +#include "base/trace_event/trace_event.h" |
| +#include "content/common/gpu/gpu_channel.h" |
| +#include "content/common/gpu/gpu_messages.h" |
| +#include "ipc/ipc_message_macros.h" |
| +#include "ipc/message_filter.h" |
| +#include "media/filters/jpeg_parser.h" |
| +#include "ui/gfx/geometry/size.h" |
| + |
| +#if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY) |
| +#include "content/common/gpu/media/vaapi_jpeg_decode_accelerator.h" |
| +#endif |
| + |
| +namespace base { |
| + |
| +void DefaultDeleter<content::GpuJpegDecodeAccelerator>::operator()( |
| + void* jpeg_decode_accelerator) const { |
| + static_cast<content::GpuJpegDecodeAccelerator*>(jpeg_decode_accelerator) |
| + ->Destroy(); |
| +} |
| + |
| +} // namespace base |
| + |
| +namespace content { |
| + |
| +class GpuJpegDecodeAccelerator::MessageFilter : public IPC::MessageFilter { |
|
piman
2015/05/18 22:46:19
I would like to avoid having a separate filter per
kcwu
2015/05/22 19:46:51
I'm trying to implement your suggestion but not ye
piman
2015/05/22 22:27:48
Agreed on that.
kcwu
2015/05/25 18:57:16
Done.
|
| + public: |
| + MessageFilter(GpuJpegDecodeAccelerator* owner, int32 host_route_id) |
| + : owner_(owner), host_route_id_(host_route_id) {} |
| + |
| + void OnChannelError() override { sender_ = NULL; } |
| + |
| + void OnChannelClosing() override { sender_ = NULL; } |
| + |
| + void OnFilterAdded(IPC::Sender* sender) override { sender_ = sender; } |
| + |
| + void OnFilterRemoved() override { |
| + owner_->OnFilterRemoved(); |
| + } |
| + |
| + bool OnMessageReceived(const IPC::Message& msg) override { |
| + if (msg.routing_id() != host_route_id_) |
| + return false; |
| + |
| + IPC_BEGIN_MESSAGE_MAP(MessageFilter, msg) |
| + IPC_MESSAGE_FORWARD(AcceleratedJpegDecoderMsg_Decode, owner_, |
| + GpuJpegDecodeAccelerator::OnDecode) |
| + IPC_MESSAGE_UNHANDLED(return false;) |
| + IPC_END_MESSAGE_MAP() |
| + return true; |
| + } |
| + |
| + bool SendOnIOThread(IPC::Message* message) { |
| + DCHECK(!message->is_sync()); |
| + if (!sender_) { |
| + delete message; |
| + return false; |
| + } |
| + return sender_->Send(message); |
| + } |
| + |
| + protected: |
| + virtual ~MessageFilter() {} |
| + |
| + private: |
| + GpuJpegDecodeAccelerator* owner_; |
| + int32 host_route_id_; |
| + // The sender to which this filter was added. |
| + IPC::Sender* sender_; |
| +}; |
| + |
| +GpuJpegDecodeAccelerator::GpuJpegDecodeAccelerator( |
| + GpuChannel* channel, |
| + int32 host_route_id, |
| + const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner) |
| + : channel_(channel), |
| + host_route_id_(host_route_id), |
| + filter_removed_(true, false), |
| + io_task_runner_(io_task_runner) { |
| + child_task_runner_ = base::ThreadTaskRunnerHandle::Get(); |
| +} |
| + |
| +GpuJpegDecodeAccelerator::~GpuJpegDecodeAccelerator() { |
| +} |
| + |
| +bool GpuJpegDecodeAccelerator::OnMessageReceived(const IPC::Message& msg) { |
| + bool handled = true; |
| + IPC_BEGIN_MESSAGE_MAP(GpuJpegDecodeAccelerator, msg) |
| + IPC_MESSAGE_HANDLER(AcceleratedJpegDecoderMsg_Destroy, OnDestroy) |
| + IPC_MESSAGE_UNHANDLED(handled = false) |
| + IPC_END_MESSAGE_MAP() |
| + return handled; |
| +} |
| + |
| +bool GpuJpegDecodeAccelerator::Initialize() { |
| + DCHECK(child_task_runner_->BelongsToCurrentThread()); |
| + DCHECK(!jpeg_decode_accelerator_.get()); |
| + |
| +// When adding more platforms, GpuJpegDecodeAcceleratorAdapter::Supported need |
| +// update as well. |
|
piman
2015/05/18 22:46:19
nit: indent comments
kcwu
2015/05/25 18:57:16
This was indented by 'git cl format'. Would you li
|
| +#if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY) |
| + jpeg_decode_accelerator_.reset( |
| + new VaapiJpegDecodeAccelerator(io_task_runner_)); |
| +#else |
| + DVLOG(1) << "HW JPEG decode acceleration not available."; |
| + return false; |
| +#endif |
| + |
| + if (!channel_->AddRoute(host_route_id_, this)) { |
|
piman
2015/05/18 22:46:19
Actually, if the routing is done by the filter, yo
kcwu
2015/05/25 18:57:16
Done.
|
| + LOG(ERROR) << "GpuJpegDecodeAccelerator::Initialize(): " |
| + "failed to add route"; |
| + return false; |
| + } |
| + |
| + filter_ = new MessageFilter(this, host_route_id_); |
| + channel_->AddFilter(filter_.get()); |
| + |
| + return jpeg_decode_accelerator_->Initialize(this); |
| +} |
| + |
| +void GpuJpegDecodeAccelerator::NotifyError( |
| + int32_t buffer_id, |
| + media::JpegDecodeAccelerator::Error error) { |
| + Send(new AcceleratedJpegDecoderHostMsg_NotifyError(host_route_id_, buffer_id, |
| + error)); |
| +} |
| + |
| +void GpuJpegDecodeAccelerator::VideoFrameReady(int32_t bitstream_buffer_id) { |
| + // This is called from JDA's decode thread. |
| + Send(new AcceleratedJpegDecoderHostMsg_VideoFrameReady(host_route_id_, |
| + bitstream_buffer_id)); |
| +} |
| + |
| +void DecodeFinished(scoped_ptr<base::SharedMemory> shm) { |
| + // Do nothing. Because VideoFrame is backed by |shm|, the purpose of this |
| + // function is to just keep reference of |shm| to make sure it lives util |
| + // decode finishes. |
| +} |
| + |
| +void GpuJpegDecodeAccelerator::OnDecode( |
|
piman
2015/05/18 22:46:19
This is called from the IO thread. Can you name it
kcwu
2015/05/25 18:57:16
Done.
|
| + const AcceleratedJpegDecoderMsg_Decode_Params& params) { |
| + DCHECK(io_task_runner_->BelongsToCurrentThread()); |
| + DCHECK(jpeg_decode_accelerator_.get()); |
| + TRACE_EVENT0("jpeg", "GpuJpegDecodeAccelerator::OnDecode"); |
| + |
| + if (params.input_buffer_id < 0) { |
| + LOG(ERROR) << "BitstreamBuffer id " << params.input_buffer_id |
| + << " out of range"; |
| + NotifyError(params.input_buffer_id, |
| + media::JpegDecodeAccelerator::INVALID_ARGUMENT); |
| + return; |
| + } |
| + |
| + media::BitstreamBuffer input_buffer(params.input_buffer_id, |
| + params.input_buffer_handle, |
| + params.input_buffer_size); |
| + |
| + scoped_ptr<base::SharedMemory> output_shm( |
| + new base::SharedMemory(params.output_video_frame_handle, false)); |
| + if (!output_shm->Map(params.output_buffer_size)) { |
| + LOG(ERROR) << "Could not map output shared memory for input buffer id " |
| + << params.input_buffer_id; |
| + NotifyError(params.input_buffer_id, |
| + media::JpegDecodeAccelerator::PLATFORM_FAILURE); |
| + return; |
| + } |
| + |
| + uint8* shm_memory = reinterpret_cast<uint8*>(output_shm->memory()); |
| + scoped_refptr<media::VideoFrame> frame = |
| + media::VideoFrame::WrapExternalPackedMemory( |
| + media::VideoFrame::I420, |
| + params.coded_size, |
| + gfx::Rect(params.coded_size), |
| + params.coded_size, |
| + shm_memory, |
| + params.output_buffer_size, |
| + params.output_video_frame_handle, |
| + 0, |
| + base::TimeDelta(), |
| + base::Bind(DecodeFinished, base::Passed(&output_shm))); |
| + |
| + if (!frame.get()) { |
| + LOG(ERROR) << "Could not create VideoFrame for input buffer id " |
| + << params.input_buffer_id; |
| + NotifyError(params.input_buffer_id, |
| + media::JpegDecodeAccelerator::PLATFORM_FAILURE); |
| + return; |
| + } |
| + |
| + jpeg_decode_accelerator_->Decode(input_buffer, frame); |
| +} |
| + |
| +void GpuJpegDecodeAccelerator::OnDestroy() { |
| + DCHECK(child_task_runner_->BelongsToCurrentThread()); |
| + DCHECK(jpeg_decode_accelerator_.get()); |
| + Destroy(); |
| +} |
| + |
| +void GpuJpegDecodeAccelerator::OnFilterRemoved() { |
| + // We're destroying; cancel all callbacks. |
| + filter_removed_.Signal(); |
| +} |
| + |
| +void GpuJpegDecodeAccelerator::Destroy() { |
| + DCHECK(child_task_runner_->BelongsToCurrentThread()); |
| + // We cannot destroy the JDA before the IO thread message filter is |
| + // removed however, since we cannot service incoming messages with JDA gone. |
| + // We cannot simply check for existence of JDA on IO thread though, because |
| + // we don't want to synchronize the IO thread with the ChildThread. |
| + // So we have to wait for the RemoveFilter callback here instead and remove |
| + // the JDA after it arrives and before returning. |
| + if (filter_.get()) { |
| + channel_->RemoveFilter(filter_.get()); |
| + filter_removed_.Wait(); |
| + } |
| + |
| + channel_->RemoveRoute(host_route_id_); |
| + channel_->ReleaseJpegDecoder(host_route_id_); |
| + jpeg_decode_accelerator_.reset(); |
| + |
| + delete this; |
| +} |
| + |
| +bool GpuJpegDecodeAccelerator::Send(IPC::Message* message) { |
| + if (io_task_runner_->BelongsToCurrentThread()) |
| + return filter_->SendOnIOThread(message); |
| + return channel_->Send(message); |
| +} |
| + |
| +} // namespace content |