Index: content/common/gpu/media/gpu_jpeg_decode_accelerator.cc |
diff --git a/content/common/gpu/media/gpu_jpeg_decode_accelerator.cc b/content/common/gpu/media/gpu_jpeg_decode_accelerator.cc |
new file mode 100644 |
index 0000000000000000000000000000000000000000..7488ead66b49045e538163948e753dd7bc0ce201 |
--- /dev/null |
+++ b/content/common/gpu/media/gpu_jpeg_decode_accelerator.cc |
@@ -0,0 +1,347 @@ |
+// Copyright 2015 The Chromium Authors. All rights reserved. |
+// Use of this source code is governed by a BSD-style license that can be |
+// found in the LICENSE file. |
+ |
+#include "content/common/gpu/media/gpu_jpeg_decode_accelerator.h" |
+ |
+#include <stdint.h> |
+ |
+#include <map> |
+ |
+#include "base/bind.h" |
+#include "base/logging.h" |
+#include "base/memory/shared_memory.h" |
+#include "base/single_thread_task_runner.h" |
+#include "base/stl_util.h" |
+#include "base/synchronization/waitable_event.h" |
+#include "base/trace_event/trace_event.h" |
+#include "content/common/gpu/gpu_channel.h" |
+#include "content/common/gpu/gpu_messages.h" |
+#include "ipc/ipc_message_macros.h" |
+#include "ipc/message_filter.h" |
+#include "media/filters/jpeg_parser.h" |
+#include "ui/gfx/geometry/size.h" |
+ |
+#if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY) |
+#include "content/common/gpu/media/vaapi_jpeg_decode_accelerator.h" |
+#endif |
+ |
+namespace { |
+ |
+void DecodeFinished(scoped_ptr<base::SharedMemory> shm) { |
+ // Do nothing. Because VideoFrame is backed by |shm|, the purpose of this |
+ // function is to just keep reference of |shm| to make sure it lives util |
+ // decode finishes. |
+} |
+ |
+} // namespace |
+ |
+namespace content { |
+ |
+class GpuJpegDecodeAccelerator::Client |
+ : public media::JpegDecodeAccelerator::Client { |
piman
2015/05/28 22:01:40
nit: can you make this NonThreadSafe, and check th
kcwu
2015/05/29 11:11:25
Done.
|
+ public: |
+ Client(content::GpuJpegDecodeAccelerator* owner, int32 route_id) |
+ : owner_(owner->AsWeakPtr()), route_id_(route_id) {} |
+ |
+ // media::JpegDecodeAccelerator::Client implementation. |
+ void VideoFrameReady(int32_t bitstream_buffer_id) override { |
+ if (owner_) |
+ owner_->NotifyDecodeStatus(route_id_, bitstream_buffer_id, |
+ media::JpegDecodeAccelerator::NO_ERROR); |
+ } |
+ |
+ void NotifyError(int32_t bitstream_buffer_id, |
+ media::JpegDecodeAccelerator::Error error) override { |
+ if (owner_) |
+ owner_->NotifyDecodeStatus(route_id_, bitstream_buffer_id, error); |
+ } |
+ |
+ void Decode(const media::BitstreamBuffer& bitstream_buffer, |
+ const scoped_refptr<media::VideoFrame>& video_frame) { |
+ DCHECK(accelerator_); |
+ accelerator_->Decode(bitstream_buffer, video_frame); |
+ } |
+ |
+ void set_accelerator(scoped_ptr<media::JpegDecodeAccelerator> accelerator) { |
+ accelerator_ = accelerator.Pass(); |
+ } |
+ |
+ private: |
+ base::WeakPtr<content::GpuJpegDecodeAccelerator> owner_; |
+ int32 route_id_; |
+ scoped_ptr<media::JpegDecodeAccelerator> accelerator_; |
+}; |
+ |
+// Create, destroy, and RemoveClient run on child thread. All other methods run |
+// on IO thread. |
+class GpuJpegDecodeAccelerator::MessageFilter : public IPC::MessageFilter { |
+ public: |
+ explicit MessageFilter(GpuJpegDecodeAccelerator* owner) |
+ : owner_(owner->AsWeakPtr()), |
+ child_task_runner_(owner_->child_task_runner_), |
+ io_task_runner_(owner_->io_task_runner_), |
+ filter_removed_(true, false) {} |
+ |
+ void OnChannelError() override { sender_ = nullptr; } |
+ |
+ void OnChannelClosing() override { sender_ = nullptr; } |
+ |
+ void OnFilterAdded(IPC::Sender* sender) override { sender_ = sender; } |
+ |
+ void OnFilterRemoved() override { |
+ // After this, |this| could be safely deleted on child thread. |
+ filter_removed_.Signal(); |
+ } |
+ |
+ bool OnMessageReceived(const IPC::Message& msg) override { |
+ const int32 route_id = msg.routing_id(); |
+ if (client_map_.count(route_id) == 0) |
piman
2015/05/28 22:01:39
nit: if (client_map_.find(route_id) == client_map_
kcwu
2015/05/29 11:11:24
Done.
|
+ return false; |
+ |
+ bool handled = true; |
+ IPC_BEGIN_MESSAGE_MAP_WITH_PARAM(MessageFilter, msg, &route_id) |
+ IPC_MESSAGE_HANDLER(AcceleratedJpegDecoderMsg_Decode, OnDecodeOnIOThread) |
+ IPC_MESSAGE_HANDLER(AcceleratedJpegDecoderMsg_Destroy, |
+ OnDestroyOnIOThread) |
+ IPC_MESSAGE_UNHANDLED(handled = false) |
+ IPC_END_MESSAGE_MAP() |
+ return handled; |
+ } |
+ |
+ bool SendOnIOThread(IPC::Message* message) { |
+ DCHECK(!message->is_sync()); |
+ if (!sender_) { |
+ delete message; |
+ return false; |
+ } |
+ return sender_->Send(message); |
+ } |
+ |
+ void AddClientOnIOThread(int32 route_id, |
+ Client* client, |
+ IPC::Message* reply_msg) { |
+ DCHECK(io_task_runner_->BelongsToCurrentThread()); |
+ DCHECK(client_map_.count(route_id) == 0); |
+ |
+ client_map_[route_id] = client; |
+ GpuMsg_CreateJpegDecoder::WriteReplyParams(reply_msg, true); |
+ SendOnIOThread(reply_msg); |
+ } |
+ |
+ void OnDestroyOnIOThread(const int32* route_id) { |
+ DCHECK(io_task_runner_->BelongsToCurrentThread()); |
+ const auto& it = client_map_.find(*route_id); |
+ DCHECK(it != client_map_.end()); |
+ Client* client = it->second; |
+ DCHECK(client); |
+ client_map_.erase(it); |
+ |
+ child_task_runner_->PostTask( |
+ FROM_HERE, base::Bind(&MessageFilter::DestroyClient, this, client)); |
+ } |
+ |
+ void DestroyClient(Client* client) { |
+ DCHECK(child_task_runner_->BelongsToCurrentThread()); |
+ delete client; |
+ if (owner_) |
+ owner_->ClientRemoved(); |
+ } |
+ |
+ void NotifyDecodeStatusOnIOThread(int32 route_id, |
+ int32_t buffer_id, |
+ media::JpegDecodeAccelerator::Error error) { |
+ DCHECK(io_task_runner_->BelongsToCurrentThread()); |
+ SendOnIOThread(new AcceleratedJpegDecoderHostMsg_DecodeAck( |
+ route_id, buffer_id, error)); |
+ } |
+ |
+ void OnDecodeOnIOThread( |
+ const int32* route_id, |
+ const AcceleratedJpegDecoderMsg_Decode_Params& params) { |
+ DCHECK(io_task_runner_->BelongsToCurrentThread()); |
+ DCHECK(route_id); |
+ TRACE_EVENT0("jpeg", "GpuJpegDecodeAccelerator::MessageFilter::OnDecode"); |
+ |
+ if (params.input_buffer_id < 0) { |
+ LOG(ERROR) << "BitstreamBuffer id " << params.input_buffer_id |
+ << " out of range"; |
+ NotifyDecodeStatusOnIOThread( |
+ *route_id, params.input_buffer_id, |
+ media::JpegDecodeAccelerator::INVALID_ARGUMENT); |
+ return; |
+ } |
+ |
+ media::BitstreamBuffer input_buffer(params.input_buffer_id, |
+ params.input_buffer_handle, |
piman
2015/05/28 22:01:40
input_buffer_handle is only released by Decode, bu
kcwu
2015/05/29 11:11:25
Because SharedMemory takes ownership of SharedMemo
piman
2015/06/01 22:33:36
The documentation in JpegDecodeAccelerator::Decode
kcwu
2015/06/02 15:07:26
The "ownership" are referring to the underlying me
|
+ params.input_buffer_size); |
+ |
+ scoped_ptr<base::SharedMemory> output_shm( |
+ new base::SharedMemory(params.output_video_frame_handle, false)); |
piman
2015/05/28 22:01:40
You probably want to do this before the first earl
kcwu
2015/05/29 11:11:24
Done.
|
+ if (!output_shm->Map(params.output_buffer_size)) { |
+ LOG(ERROR) << "Could not map output shared memory for input buffer id " |
+ << params.input_buffer_id; |
+ NotifyDecodeStatusOnIOThread( |
+ *route_id, params.input_buffer_id, |
+ media::JpegDecodeAccelerator::PLATFORM_FAILURE); |
+ return; |
+ } |
+ |
+ uint8* shm_memory = reinterpret_cast<uint8*>(output_shm->memory()); |
piman
2015/05/28 22:01:40
nit: uint8_t instead of uint8, also you can use st
kcwu
2015/05/29 11:11:24
This aligns to VideoFrame::WrapExternalPackedMemor
piman
2015/06/01 22:33:35
Please use uint8_t in new code. uint8 is deprecate
kcwu
2015/06/02 15:07:26
Done.
|
+ scoped_refptr<media::VideoFrame> frame = |
+ media::VideoFrame::WrapExternalPackedMemory( |
+ media::VideoFrame::I420, |
+ params.coded_size, |
+ gfx::Rect(params.coded_size), |
+ params.coded_size, |
+ shm_memory, |
+ params.output_buffer_size, |
+ params.output_video_frame_handle, |
+ 0, |
+ base::TimeDelta(), |
+ base::Bind(DecodeFinished, base::Passed(&output_shm))); |
+ |
+ if (!frame.get()) { |
+ LOG(ERROR) << "Could not create VideoFrame for input buffer id " |
+ << params.input_buffer_id; |
+ NotifyDecodeStatusOnIOThread( |
+ *route_id, params.input_buffer_id, |
+ media::JpegDecodeAccelerator::PLATFORM_FAILURE); |
+ return; |
+ } |
+ |
+ DCHECK_GT(client_map_.count(*route_id), 0u); |
+ Client* client = client_map_[*route_id]; |
+ client->Decode(input_buffer, frame); |
+ } |
+ |
+ void WaitForRemoved() { filter_removed_.Wait(); } |
+ |
+ protected: |
+ ~MessageFilter() override { |
+ if (client_map_.empty()) |
+ return; |
+ |
+ if (child_task_runner_->BelongsToCurrentThread()) { |
+ STLDeleteValues(&client_map_); |
+ } else { |
+ // Make sure |Client| are deleted on child thread. |
+ scoped_ptr<std::map<int32, Client*>> client_map( |
+ new std::map<int32, Client*>()); |
+ std::swap(client_map_, *client_map); |
piman
2015/05/28 22:01:39
nit: client_map->swap(client_map_);
kcwu
2015/05/29 11:11:25
Done.
|
+ |
+ child_task_runner_->PostTask( |
+ FROM_HERE, |
+ base::Bind(&DeleteClientMapOnChildThread, base::Passed(&client_map))); |
+ } |
+ } |
+ |
+ private: |
+ // Must be static because this method runs after destructor. |
+ static void DeleteClientMapOnChildThread( |
+ scoped_ptr<std::map<int32, Client*>> client_map) { |
+ STLDeleteValues(client_map.get()); |
+ } |
+ |
+ base::WeakPtr<GpuJpegDecodeAccelerator> owner_; |
+ |
+ // GPU child task runner. |
+ scoped_refptr<base::SingleThreadTaskRunner> child_task_runner_; |
+ |
+ // GPU IO task runner. |
+ scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_; |
+ |
+ // The sender to which this filter was added. |
+ IPC::Sender* sender_; |
+ |
+ // A map from route id to JpegDecodeAccelerator. |
+ // Unless in destructor (maybe on child thread), |client_map_| should |
+ // only be accessed on IO thread. |
+ std::map<int32, Client*> client_map_; |
piman
2015/05/28 22:01:39
nit: hash_map.
You can also make a typedef to avoi
kcwu
2015/05/29 11:11:24
Done.
|
+ |
+ // Used to wait on for |this| to be removed from the IPC channel, before |
+ // we can safely delete |this|. |
+ base::WaitableEvent filter_removed_; |
+}; |
+ |
+GpuJpegDecodeAccelerator::GpuJpegDecodeAccelerator( |
+ GpuChannel* channel, |
+ const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner) |
+ : channel_(channel), |
+ child_task_runner_(base::ThreadTaskRunnerHandle::Get()), |
+ io_task_runner_(io_task_runner), |
+ client_number_(0) { |
+} |
+ |
+GpuJpegDecodeAccelerator::~GpuJpegDecodeAccelerator() { |
+ DCHECK(CalledOnValidThread()); |
+ if (filter_) { |
+ channel_->RemoveFilter(filter_.get()); |
+ filter_->WaitForRemoved(); |
piman
2015/05/28 22:01:39
As discussed, you don't need this.
kcwu
2015/05/29 11:11:24
Done.
|
+ } |
+} |
+ |
+bool GpuJpegDecodeAccelerator::OnMessageReceived(const IPC::Message& msg) { |
+ // Messages are actually handled in filter on IO thread. |
+ return false; |
+} |
+ |
+void GpuJpegDecodeAccelerator::AddClient(int32 route_id, |
+ IPC::Message* reply_msg) { |
+ DCHECK(CalledOnValidThread()); |
+ scoped_ptr<media::JpegDecodeAccelerator> accelerator; |
+ |
+// When adding more platforms, GpuJpegDecoder::Supported need |
+// update as well. |
+#if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY) |
+ accelerator.reset(new VaapiJpegDecodeAccelerator(io_task_runner_)); |
+#else |
+ DVLOG(1) << "HW JPEG decode acceleration not available."; |
+#endif |
+ |
+ scoped_ptr<Client> client(new Client(this, route_id)); |
+ if (!accelerator.get() || !accelerator->Initialize(client.get())) { |
+ DLOG(ERROR) << "JPEG accelerator Initialize failed"; |
+ GpuMsg_CreateJpegDecoder::WriteReplyParams(reply_msg, false); |
+ Send(reply_msg); |
+ return; |
+ } |
+ client->set_accelerator(accelerator.Pass()); |
+ |
+ if (!filter_) { |
+ DCHECK_EQ(client_number_, 0); |
+ filter_ = new MessageFilter(this); |
+ // This should be before AddClientOnIOThread. |
+ channel_->AddFilter(filter_.get()); |
+ } |
+ client_number_++; |
+ |
+ io_task_runner_->PostTask( |
+ FROM_HERE, base::Bind(&MessageFilter::AddClientOnIOThread, filter_, |
+ route_id, client.release(), reply_msg)); |
piman
2015/05/28 22:01:39
can you use Passed(client) instead?
kcwu
2015/05/29 11:11:25
Does io_task_runner guarantee the task of RefCount
piman
2015/06/01 22:33:36
Fair enough, the message loop would be destroyed o
kcwu
2015/06/02 15:07:25
Done.
|
+} |
+ |
+void GpuJpegDecodeAccelerator::NotifyDecodeStatus( |
+ int32 route_id, |
+ int32_t buffer_id, |
+ media::JpegDecodeAccelerator::Error error) { |
+ DCHECK(CalledOnValidThread()); |
+ Send(new AcceleratedJpegDecoderHostMsg_DecodeAck(route_id, buffer_id, error)); |
+} |
+ |
+void GpuJpegDecodeAccelerator::ClientRemoved() { |
+ DCHECK(CalledOnValidThread()); |
+ DCHECK_GT(client_number_, 0); |
+ client_number_--; |
+ if (client_number_ == 0) { |
+ channel_->RemoveFilter(filter_.get()); |
+ filter_ = nullptr; |
+ } |
+} |
+ |
+bool GpuJpegDecodeAccelerator::Send(IPC::Message* message) { |
+ DCHECK(CalledOnValidThread()); |
+ return channel_->Send(message); |
+} |
+ |
+} // namespace content |