Index: content/common/gpu/media/gpu_jpeg_decode_accelerator.cc |
diff --git a/content/common/gpu/media/gpu_jpeg_decode_accelerator.cc b/content/common/gpu/media/gpu_jpeg_decode_accelerator.cc |
new file mode 100644 |
index 0000000000000000000000000000000000000000..94cd88e0cadfca066f8e69341a21cfc7f62c684d |
--- /dev/null |
+++ b/content/common/gpu/media/gpu_jpeg_decode_accelerator.cc |
@@ -0,0 +1,241 @@ |
+// Copyright 2015 The Chromium Authors. All rights reserved. |
+// Use of this source code is governed by a BSD-style license that can be |
+// found in the LICENSE file. |
+ |
+#include "content/common/gpu/media/gpu_jpeg_decode_accelerator.h" |
+ |
+#include <stdint.h> |
+ |
+#include "base/bind.h" |
+#include "base/logging.h" |
+#include "base/memory/shared_memory.h" |
+#include "base/message_loop/message_loop_proxy.h" |
+#include "base/trace_event/trace_event.h" |
+#include "content/common/gpu/gpu_channel.h" |
+#include "content/common/gpu/gpu_messages.h" |
+#include "ipc/ipc_message_macros.h" |
+#include "ipc/message_filter.h" |
+#include "media/filters/jpeg_parser.h" |
+#include "ui/gfx/geometry/size.h" |
+ |
+#if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY) |
+#include "content/common/gpu/media/vaapi_jpeg_decode_accelerator.h" |
+#endif |
+ |
+namespace base { |
+ |
+void DefaultDeleter<content::GpuJpegDecodeAccelerator>::operator()( |
+ void* jpeg_decode_accelerator) const { |
+ static_cast<content::GpuJpegDecodeAccelerator*>(jpeg_decode_accelerator) |
+ ->Destroy(); |
+} |
+ |
+} // namespace base |
+ |
+namespace content { |
+ |
+class GpuJpegDecodeAccelerator::MessageFilter : public IPC::MessageFilter { |
+ public: |
+ MessageFilter(GpuJpegDecodeAccelerator* owner, int32 host_route_id) |
+ : owner_(owner), host_route_id_(host_route_id) {} |
+ |
+ void OnChannelError() override { sender_ = NULL; } |
+ |
+ void OnChannelClosing() override { sender_ = NULL; } |
+ |
+ void OnFilterAdded(IPC::Sender* sender) override { sender_ = sender; } |
+ |
+ void OnFilterRemoved() override { |
+ owner_->OnFilterRemoved(); |
+ } |
+ |
+ bool OnMessageReceived(const IPC::Message& msg) override { |
+ if (msg.routing_id() != host_route_id_) |
+ return false; |
+ |
+ IPC_BEGIN_MESSAGE_MAP(MessageFilter, msg) |
+ IPC_MESSAGE_FORWARD(AcceleratedJpegDecoderMsg_Decode, owner_, |
+ GpuJpegDecodeAccelerator::OnDecode) |
+ IPC_MESSAGE_UNHANDLED(return false;) |
+ IPC_END_MESSAGE_MAP() |
+ return true; |
+ } |
+ |
+ bool SendOnIOThread(IPC::Message* message) { |
+ DCHECK(!message->is_sync()); |
+ if (!sender_) { |
+ delete message; |
+ return false; |
+ } |
+ return sender_->Send(message); |
+ } |
+ |
+ protected: |
+ virtual ~MessageFilter() {} |
+ |
+ private: |
+ GpuJpegDecodeAccelerator* owner_; |
+ int32 host_route_id_; |
+ // The sender to which this filter was added. |
+ IPC::Sender* sender_; |
+}; |
+ |
+GpuJpegDecodeAccelerator::GpuJpegDecodeAccelerator( |
+ GpuChannel* channel, |
+ int32 host_route_id, |
+ const scoped_refptr<base::MessageLoopProxy>& io_message_loop) |
+ : channel_(channel), |
+ host_route_id_(host_route_id), |
+ filter_removed_(true, false), |
+ io_message_loop_(io_message_loop) { |
+ child_message_loop_ = base::MessageLoopProxy::current(); |
+} |
+ |
+GpuJpegDecodeAccelerator::~GpuJpegDecodeAccelerator() { |
+} |
+ |
+bool GpuJpegDecodeAccelerator::OnMessageReceived(const IPC::Message& msg) { |
+ bool handled = true; |
+ IPC_BEGIN_MESSAGE_MAP(GpuJpegDecodeAccelerator, msg) |
+ IPC_MESSAGE_HANDLER(AcceleratedJpegDecoderMsg_Destroy, OnDestroy) |
+ IPC_MESSAGE_UNHANDLED(handled = false) |
+ IPC_END_MESSAGE_MAP() |
+ return handled; |
+} |
+ |
+bool GpuJpegDecodeAccelerator::Initialize() { |
+ DCHECK(child_message_loop_->BelongsToCurrentThread()); |
+ DCHECK(!jpeg_decode_accelerator_.get()); |
+ |
+// When adding more platforms, GpuJpegDecodeAcceleratorAdapter::Supported need |
+// update as well. |
+#if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY) |
+ jpeg_decode_accelerator_.reset( |
+ new VaapiJpegDecodeAccelerator(io_message_loop_)); |
+#else |
+ DVLOG(1) << "HW JPEG decode acceleration not available."; |
+ return false; |
+#endif |
+ |
+ if (!channel_->AddRoute(host_route_id_, this)) { |
+ LOG(ERROR) << "GpuJpegDecodeAccelerator::Initialize(): " |
+ "failed to add route"; |
+ return false; |
+ } |
+ |
+ filter_ = new MessageFilter(this, host_route_id_); |
+ channel_->AddFilter(filter_.get()); |
+ |
+ return jpeg_decode_accelerator_->Initialize(this); |
+} |
+ |
+void GpuJpegDecodeAccelerator::NotifyError( |
+ int32_t buffer_id, |
+ media::JpegDecodeAccelerator::Error error) { |
+ Send(new AcceleratedJpegDecoderHostMsg_NotifyError(host_route_id_, buffer_id, |
+ error)); |
+} |
+ |
+void GpuJpegDecodeAccelerator::VideoFrameReady(int32_t bitstream_buffer_id) { |
+ // This is called from JDA's decode thread. |
+ Send(new AcceleratedJpegDecoderHostMsg_VideoFrameReady(host_route_id_, |
+ bitstream_buffer_id)); |
+} |
+ |
+void DecodeFinished(scoped_ptr<base::SharedMemory> shm) { |
+ // Do nothing. Because VideoFrame is backed by |shm|, the purpose of this |
+ // function is to just keep reference of |shm| to make sure it lives util |
+ // decode finishes. |
+} |
+ |
+void GpuJpegDecodeAccelerator::OnDecode( |
+ const AcceleratedJpegDecoderMsg_Decode_Params& params) { |
+ DCHECK(io_message_loop_->BelongsToCurrentThread()); |
+ DCHECK(jpeg_decode_accelerator_.get()); |
+ TRACE_EVENT0("jpeg", "GpuJpegDecodeAccelerator::OnDecode"); |
+ |
+ if (params.input_buffer_id < 0) { |
+ LOG(ERROR) << "BitstreamBuffer id " << params.input_buffer_id |
+ << " out of range"; |
+ NotifyError(params.input_buffer_id, |
+ media::JpegDecodeAccelerator::INVALID_ARGUMENT); |
+ return; |
+ } |
+ |
+ media::BitstreamBuffer input_buffer(params.input_buffer_id, |
+ params.input_buffer_handle, |
+ params.input_buffer_size); |
+ |
+ scoped_ptr<base::SharedMemory> output_shm( |
+ new base::SharedMemory(params.output_video_frame_handle, false)); |
+ if (!output_shm->Map(params.output_buffer_size)) { |
+ LOG(ERROR) << "Could not map output shared memory for input buffer id " |
+ << params.input_buffer_id; |
+ NotifyError(params.input_buffer_id, |
+ media::JpegDecodeAccelerator::PLATFORM_FAILURE); |
+ return; |
+ } |
+ |
+ uint8* shm_memory = reinterpret_cast<uint8*>(output_shm->memory()); |
+ scoped_refptr<media::VideoFrame> frame = |
+ media::VideoFrame::WrapExternalPackedMemory( |
+ media::VideoFrame::I420, |
+ params.coded_size, |
+ gfx::Rect(params.coded_size), |
+ params.coded_size, |
+ shm_memory, |
+ params.output_buffer_size, |
+ params.output_video_frame_handle, |
+ 0, |
+ base::TimeDelta(), |
+ base::Bind(DecodeFinished, base::Passed(&output_shm))); |
+ |
+ if (!frame.get()) { |
+ LOG(ERROR) << "Could not create VideoFrame for input buffer id " |
+ << params.input_buffer_id; |
+ NotifyError(params.input_buffer_id, |
+ media::JpegDecodeAccelerator::PLATFORM_FAILURE); |
+ return; |
+ } |
+ |
+ jpeg_decode_accelerator_->Decode(input_buffer, frame); |
+} |
+ |
+void GpuJpegDecodeAccelerator::OnDestroy() { |
+ DCHECK(child_message_loop_->BelongsToCurrentThread()); |
+ DCHECK(jpeg_decode_accelerator_.get()); |
+ Destroy(); |
+} |
+ |
+void GpuJpegDecodeAccelerator::OnFilterRemoved() { |
+ // We're destroying; cancel all callbacks. |
+ filter_removed_.Signal(); |
+} |
+ |
+void GpuJpegDecodeAccelerator::Destroy() { |
+ DCHECK(child_message_loop_->BelongsToCurrentThread()); |
+ // We cannot destroy the JDA before the IO thread message filter is |
+ // removed however, since we cannot service incoming messages with JDA gone. |
+ // We cannot simply check for existence of JDA on IO thread though, because |
+ // we don't want to synchronize the IO thread with the ChildThread. |
+ // So we have to wait for the RemoveFilter callback here instead and remove |
+ // the JDA after it arrives and before returning. |
+ if (filter_.get()) { |
+ channel_->RemoveFilter(filter_.get()); |
+ filter_removed_.Wait(); |
+ } |
+ |
+ channel_->RemoveRoute(host_route_id_); |
+ channel_->ReleaseJpegDecoder(host_route_id_); |
+ jpeg_decode_accelerator_.reset(); |
+ |
+ delete this; |
+} |
+ |
+bool GpuJpegDecodeAccelerator::Send(IPC::Message* message) { |
+ if (io_message_loop_->BelongsToCurrentThread()) |
+ return filter_->SendOnIOThread(message); |
+ return channel_->Send(message); |
+} |
+ |
+} // namespace content |