Index: content/common/gpu/media/dxva_video_decode_accelerator.cc |
=================================================================== |
--- content/common/gpu/media/dxva_video_decode_accelerator.cc (revision 0) |
+++ content/common/gpu/media/dxva_video_decode_accelerator.cc (revision 0) |
@@ -0,0 +1,799 @@ |
+// Copyright (c) 2011 The Chromium Authors. All rights reserved. |
+// Use of this source code is governed by a BSD-style license that can be |
+// found in the LICENSE file. |
+ |
+#include "content/common/gpu/media/dxva_video_decode_accelerator.h" |
+ |
+#if !defined(OS_WIN) |
+#error This file should only be built on Windows. |
+#endif // !defined(OS_WIN) |
+ |
+#include <ks.h> |
+#include <codecapi.h> |
+#include <d3dx9tex.h> |
+#include <mfapi.h> |
+#include <mferror.h> |
+#include <wmcodecdsp.h> |
+ |
+#include "base/bind.h" |
+#include "base/callback.h" |
+#include "base/debug/trace_event.h" |
+#include "base/logging.h" |
+#include "base/memory/scoped_handle.h" |
+#include "base/memory/scoped_ptr.h" |
+#include "base/message_loop.h" |
+#include "base/shared_memory.h" |
+#include "media/video/video_decode_accelerator.h" |
+#include "third_party/angle/include/GLES2/gl2.h" |
+#include "third_party/angle/include/GLES2/gl2ext.h" |
+ |
+static const int kNumPictureBuffers = 5; |
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
Doco origin of this magic 5?
ananta
2011/12/17 00:40:25
Done.
|
+ |
+#define RETURN_ON_HR_FAILURE(result, log, ret_val) \ |
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
s/result/hr/
(it's funny that it works anyway; yay
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
I'm surprised this doesn't NotifyError.
ananta
2011/12/17 00:40:25
Done.
|
+ do { \ |
+ if (FAILED(hr)) { \ |
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
You also have lots of occurrences of error-checkin
ananta
2011/12/17 00:40:25
Added two variants of these macros. One which noti
|
+ DVLOG(1) << log; \ |
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
Not worth emitting result itself?
ananta
2011/12/17 00:40:25
The callers now pass in the formatted string if ne
|
+ return ret_val; \ |
+ } \ |
+ } while (0) |
+ |
+static IMFSample* CreateEmptySample() { |
+ HRESULT hr = E_FAIL; |
+ base::win::ScopedComPtr<IMFSample> sample; |
+ hr = MFCreateSample(sample.Receive()); |
+ RETURN_ON_HR_FAILURE(hr, "Unable to create an empty sample", NULL); |
+ return sample.Detach(); |
+} |
+ |
+// Creates a Media Foundation sample with one buffer of length |buffer_length| |
+// on a |align|-byte boundary. Alignment must be a perfect power of 2 or 0. |
+static IMFSample* CreateEmptySampleWithBuffer(int buffer_length, int align) { |
+ CHECK_GT(buffer_length, 0); |
+ base::win::ScopedComPtr<IMFSample> sample; |
+ sample.Attach(CreateEmptySample()); |
+ if (!sample.get()) |
+ return NULL; |
+ base::win::ScopedComPtr<IMFMediaBuffer> buffer; |
+ HRESULT hr = E_FAIL; |
+ if (align == 0) { |
+ // Note that MFCreateMemoryBuffer is same as MFCreateAlignedMemoryBuffer |
+ // with the align argument being 0. |
+ hr = MFCreateMemoryBuffer(buffer_length, buffer.Receive()); |
+ } else { |
+ hr = MFCreateAlignedMemoryBuffer(buffer_length, |
+ align - 1, |
+ buffer.Receive()); |
+ } |
+ RETURN_ON_HR_FAILURE(hr, "Unable to create an empty buffer", NULL); |
+ |
+ hr = sample->AddBuffer(buffer.get()); |
+ RETURN_ON_HR_FAILURE(hr, "Failed to add empty buffer to sample", NULL); |
+ |
+ return sample.Detach(); |
+} |
+ |
+// Creates a Media Foundation sample with one buffer containing a copy of the |
+// given Annex B stream data. |
+// If duration and sample time are not known, provide 0. |
+// |min_size| specifies the minimum size of the buffer (might be required by |
+// the decoder for input). If no alignment is required, provide 0. |
+static IMFSample* CreateInputSample(const uint8* stream, int size, |
+ int min_size, int alignment) { |
+ CHECK(stream); |
+ CHECK_GT(size, 0); |
+ base::win::ScopedComPtr<IMFSample> sample; |
+ sample.Attach(CreateEmptySampleWithBuffer(std::max(min_size, size), |
+ alignment)); |
+ if (!sample.get()) { |
+ NOTREACHED() << "Failed to create empty buffer for input"; |
+ return NULL; |
+ } |
+ HRESULT hr = E_FAIL; |
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
Why not declare & initialize to the real result yo
ananta
2011/12/17 00:40:25
Done.
|
+ |
+ base::win::ScopedComPtr<IMFMediaBuffer> buffer; |
+ hr = sample->GetBufferByIndex(0, buffer.Receive()); |
+ RETURN_ON_HR_FAILURE(hr, "Failed to get buffer in sample", NULL); |
+ |
+ DWORD max_length = 0; |
+ DWORD current_length = 0; |
+ uint8* destination = NULL; |
+ |
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
remove newline
ananta
2011/12/17 00:40:25
Done.
|
+ hr = buffer->Lock(&destination, &max_length, ¤t_length); |
+ RETURN_ON_HR_FAILURE(hr, "Failed to lock buffer", NULL); |
+ |
+ CHECK_EQ(current_length, 0u); |
+ CHECK_GE(static_cast<int>(max_length), size); |
+ memcpy(destination, stream, size); |
+ CHECK(SUCCEEDED(buffer->Unlock())); |
+ |
+ hr = buffer->SetCurrentLength(size); |
+ RETURN_ON_HR_FAILURE(hr, "Failed to set current length", NULL); |
+ |
+ hr = sample->SetUINT32(MFSampleExtension_CleanPoint, TRUE); |
+ RETURN_ON_HR_FAILURE(hr, "Failed to mark sample as key sample", NULL); |
+ |
+ return sample.Detach(); |
+} |
+ |
+static IMFSample* CreateSampleFromInputBuffer( |
+ const media::BitstreamBuffer& bitstream_buffer, |
+ base::ProcessHandle renderer_process, |
+ DWORD stream_size, |
+ DWORD alignment) { |
+ HANDLE shared_memory_handle = NULL; |
+ if (!::DuplicateHandle(renderer_process, |
+ bitstream_buffer.handle(), |
+ ::GetCurrentProcess(), |
+ &shared_memory_handle, |
+ 0, |
+ FALSE, |
+ DUPLICATE_SAME_ACCESS)) { |
+ NOTREACHED() << "Failed to open duplicate shared mem handle"; |
+ return NULL; |
+ } |
+ |
+ base::SharedMemory shm(shared_memory_handle, true); |
+ if (!shm.Map(bitstream_buffer.size())) { |
+ NOTREACHED() << "Failed in SharedMemory::Map()"; |
+ return NULL; |
+ } |
+ |
+ return CreateInputSample(reinterpret_cast<const uint8*>(shm.memory()), |
+ bitstream_buffer.size(), |
+ stream_size, |
+ alignment); |
+} |
+ |
+DXVAVideoDecodeAccelerator::DXVAPictureBuffer::DXVAPictureBuffer( |
+ bool is_available, const media::PictureBuffer& buffer) |
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
Does the ctor ever get called with an is_available
ananta
2011/12/17 00:40:25
It does with your suggestion of using insert to ch
Ami GONE FROM CHROMIUM
2011/12/19 22:53:44
Where? All I'm seeing is "true", never "false".
|
+ : available(is_available), |
+ picture_buffer(buffer) { |
+} |
+ |
+DXVAVideoDecodeAccelerator::DXVAPictureBuffer::DXVAPictureBuffer() |
+ : available(false), |
+ picture_buffer(0, gfx::Size(), 0) { |
+} |
+ |
+DXVAVideoDecodeAccelerator::PendingSampleInfo::PendingSampleInfo( |
+ int32 buffer_id, IDirect3DSurface9* surface) |
+ : input_buffer_id(buffer_id), |
+ dest_surface(surface) { |
+} |
+ |
+DXVAVideoDecodeAccelerator::DXVAVideoDecodeAccelerator( |
+ media::VideoDecodeAccelerator::Client* client, |
+ base::ProcessHandle renderer_process) |
+ : client_(client), |
+ state_(kUninitialized), |
+ pictures_requested_(false), |
+ renderer_process_(renderer_process), |
+ dev_manager_reset_token_(0), |
+ last_input_buffer_id_(-1), |
+ inputs_before_decode_(0) { |
+ method_factory_.reset( |
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
Unused, which is unfortunate (see my comment about
ananta
2011/12/17 01:29:07
Removed
|
+ new ScopedRunnableMethodFactory<media::VideoDecodeAccelerator::Client>( |
+ client_)); |
+} |
+ |
+DXVAVideoDecodeAccelerator::~DXVAVideoDecodeAccelerator() { |
+ client_ = NULL; |
+} |
+ |
+bool DXVAVideoDecodeAccelerator::Initialize(Profile profile) { |
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
s/ profile// if unused
ananta
2011/12/17 00:40:25
Done.
|
+ DCHECK(CalledOnValidThread()); |
+ |
+ if (state_ != kUninitialized) { |
+ NOTREACHED() << "Initialize: invalid state: " << state_; |
+ return false; |
+ } |
+ |
+ HRESULT hr = MFStartup(MF_VERSION, MFSTARTUP_FULL); |
+ if (FAILED(hr)) { |
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
use macro?
(here and elsewhere; please do a sweep)
ananta
2011/12/17 00:40:25
Done.
|
+ NOTREACHED() << "MFStartup failed. Error:" << std::hex << std::showbase |
+ << hr; |
+ return false; |
+ } |
+ if (!CreateD3DDevManager()) |
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
l.197-208 can be:
if (!CreateD3DDevManager() || !I
ananta
2011/12/17 00:40:25
Changed with the new macros.
|
+ return false; |
+ |
+ if (!InitDecoder()) |
+ return false; |
+ |
+ if (!GetStreamsInfoAndBufferReqs()) |
+ return false; |
+ |
+ if (!SendMFTMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0)) |
+ return false; |
+ |
+ state_ = kNormal; |
+ MessageLoop::current()->PostTask(FROM_HERE, |
+ base::Bind(&media::VideoDecodeAccelerator::Client::NotifyInitializeDone, |
+ base::Unretained(client_))); |
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
base::Unretained() should make you wonder about li
ananta
2011/12/17 01:29:07
Done.
|
+ return true; |
+} |
+ |
+void DXVAVideoDecodeAccelerator::Decode( |
+ const media::BitstreamBuffer& bitstream_buffer) { |
+ DCHECK(CalledOnValidThread()); |
+ if (state_ != kNormal && state_ != kStopped) { |
+ NOTREACHED() << "ConsumeVideoSample: invalid state"; |
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
NOTREACHED() should be reserved for detecting prog
ananta
2011/12/17 00:40:25
Done.
|
+ return; |
+ } |
+ |
+ base::win::ScopedComPtr<IMFSample> sample; |
+ sample.Attach(CreateSampleFromInputBuffer(bitstream_buffer, |
+ renderer_process_, |
+ input_stream_info_.cbSize, |
+ input_stream_info_.cbAlignment)); |
+ if (!sample.get()) { |
+ NOTREACHED() << "Failed to create an input sample"; |
+ NotifyError(PLATFORM_FAILURE); |
+ return; |
+ } |
+ |
+ if (!inputs_before_decode_) { |
+ TRACE_EVENT_BEGIN_ETW("DXVAVideoDecodeAccelerator.Decoding", this, ""); |
+ } |
+ inputs_before_decode_++; |
+ |
+ if (!SendMFTMessage(MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0)) { |
+ NOTREACHED() << "Failed to send start of stream message to MFT"; |
+ NotifyError(PLATFORM_FAILURE); |
+ return; |
+ } |
+ |
+ if (FAILED(decoder_->ProcessInput(0, sample.get(), 0))) { |
+ NOTREACHED() << "Failed to process input"; |
+ NotifyError(PLATFORM_FAILURE); |
+ return; |
+ } |
+ |
+ if (state_ != DXVAVideoDecodeAccelerator::kEosDrain) { |
+ if (!SendMFTMessage(MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0)) { |
+ NOTREACHED() << "Failed to send eos message to MFT"; |
+ NotifyError(PLATFORM_FAILURE); |
+ return; |
+ } else { |
+ state_ = kEosDrain; |
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
I still don't understand this.
What happens if th
ananta
2011/12/17 01:29:07
Every decode call is associated with a ProcessInpu
|
+ } |
+ } |
+ |
+ last_input_buffer_id_ = bitstream_buffer.id(); |
+ DoDecode(); |
+ if (state_ != kStopped && state_ != kNormal) { |
+ NOTREACHED() << "Failed to process output. Unexpected decoder state: " |
+ << state_; |
+ NotifyError(PLATFORM_FAILURE); |
+ return; |
+ } |
+ // The Microsoft Media foundation decoder internally buffers upto 30 fps |
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
s/upto/up to/
s/30fps/30 frames/
ananta
2011/12/17 00:40:25
Done.
|
+ // before returning a decoded frame. We need to inform the client that this |
+ // input buffer is processed as it may stop sending us further input. |
+ // Note: This may break clients which expect every input buffer to be |
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
Wouldn't it be better to not do this (retain the s
ananta
2011/12/17 00:40:25
As per our discussion leaving this as is for now.
Ami GONE FROM CHROMIUM
2011/12/19 22:53:44
Please add a TODO for this and point to a crbug.
ananta
2011/12/20 02:27:22
Done.
|
+ // associated with a decoded output buffer. |
+ MessageLoop::current()->PostTask(FROM_HERE, base::Bind( |
+ &media::VideoDecodeAccelerator::Client::NotifyEndOfBitstreamBuffer, |
+ base::Unretained(client_), bitstream_buffer.id())); |
+} |
+ |
+void DXVAVideoDecodeAccelerator::AssignPictureBuffers( |
+ const std::vector<media::PictureBuffer>& buffers) { |
+ DCHECK(CalledOnValidThread()); |
+ // Copy the picture buffers provided by the client to the available list, |
+ // and mark these buffers as available for use. |
+ for (size_t buffer_index = 0; buffer_index < buffers.size(); |
+ ++buffer_index) { |
+ DXVAPictureBuffer picture_buffer(true, buffers[buffer_index]); |
+ |
+ DCHECK(output_picture_buffers_.find(buffers[buffer_index].id()) == |
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
I think you missed my point. By calling insert()
ananta
2011/12/17 01:29:07
Done.
|
+ output_picture_buffers_.end()); |
+ output_picture_buffers_[buffers[buffer_index].id()] = picture_buffer; |
+ } |
+ ProcessPendingSamples(); |
+} |
+ |
+void DXVAVideoDecodeAccelerator::ReusePictureBuffer( |
+ int32 picture_buffer_id) { |
+ DCHECK(CalledOnValidThread()); |
+ |
+ OutputBuffers::iterator it = output_picture_buffers_.find(picture_buffer_id); |
+ if (it == output_picture_buffers_.end()) { |
+ DVLOG(1) << "Invalid picture id"; |
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
NotifyError
ananta
2011/12/17 00:40:25
Done.
|
+ return; |
+ } |
+ it->second.available = true; |
+ ProcessPendingSamples(); |
+} |
+ |
+void DXVAVideoDecodeAccelerator::Flush() { |
+ DCHECK(CalledOnValidThread()); |
+ |
+ VLOG(1) << "DXVAVideoDecodeAccelerator::Flush"; |
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
I think you missed my comment to s/VLOG/DVLOG/ eve
ananta
2011/12/17 00:40:25
Done.
|
+ |
+ if (state_ != kNormal && state_ != kStopped) { |
+ NOTREACHED() << "ConsumeVideoSample: invalid state:" << state_; |
+ NotifyError(ILLEGAL_STATE); |
+ return; |
+ } |
+ |
+ DCHECK(state_ != kEosDrain); |
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
Redundant to the if above.
ananta
2011/12/17 00:40:25
Done.
|
+ state_ = kEosDrain; |
+ |
+ if (!SendMFTMessage(MFT_MESSAGE_COMMAND_DRAIN, 0)) { |
+ VLOG(1) << "Failed to send drain message"; |
+ state_ = kStopped; |
+ NotifyError(PLATFORM_FAILURE); |
+ return; |
+ } |
+ |
+ // As per MSDN docs after the client sends this message, it calls |
+ // IMFTransform::ProcessOutput in a loop, until ProcessOutput returns the |
+ // error code MF_E_TRANSFORM_NEED_MORE_INPUT. The DoDecode function sets |
+ // the state to kStopped when the decoder returns |
+ // MF_E_TRANSFORM_NEED_MORE_INPUT. |
+ while (state_ != kStopped) { |
+ DoDecode(); |
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
Given DoDecode() is synchronous does this loop bod
ananta
2011/12/17 00:40:25
What we are doing is as per msdn documentation. I
|
+ } |
+ |
+ MessageLoop::current()->PostTask(FROM_HERE, base::Bind( |
+ &media::VideoDecodeAccelerator::Client::NotifyFlushDone, |
+ base::Unretained(client_))); |
+ |
+ state_ = kNormal;} |
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
missing newline before brace.
ananta
2011/12/17 00:40:25
Done.
|
+ |
+void DXVAVideoDecodeAccelerator::Reset() { |
+ DCHECK(CalledOnValidThread()); |
+ |
+ VLOG(1) << "DXVAVideoDecodeAccelerator::Reset"; |
+ |
+ bool success = false; |
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
unused?
ananta
2011/12/17 00:40:25
Done.
|
+ |
+ if (state_ != kNormal && state_ != kStopped) { |
+ NOTREACHED() << "Reset: invalid state"; |
+ NotifyError(ILLEGAL_STATE); |
+ return; |
+ } |
+ |
+ DCHECK(state_ != kResetting); |
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
redundant
ananta
2011/12/17 00:40:25
Done.
|
+ state_ = kResetting; |
+ |
+ if (!SendMFTMessage(MFT_MESSAGE_COMMAND_FLUSH, 0)) { |
+ VLOG(1) << "DXVAVideoDecodeAccelerator::Flush failed to send message"; |
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
s/Flush/Reset/
ananta
2011/12/17 00:40:25
Done.
|
+ NotifyError(PLATFORM_FAILURE); |
+ return; |
+ } |
+ |
+ MessageLoop::current()->PostTask(FROM_HERE, base::Bind( |
+ &media::VideoDecodeAccelerator::Client::NotifyResetDone, |
+ base::Unretained(client_))); |
+ |
+ ClearState(); |
+ state_ = DXVAVideoDecodeAccelerator::kNormal; |
+} |
+ |
+void DXVAVideoDecodeAccelerator::Destroy() { |
+ DCHECK(CalledOnValidThread()); |
+ |
+ OutputBuffers::iterator index; |
+ for (index = output_picture_buffers_.begin(); |
+ index != output_picture_buffers_.end(); |
+ ++index) { |
+ MessageLoop::current()->PostTask(FROM_HERE, base::Bind( |
+ &media::VideoDecodeAccelerator::Client::DismissPictureBuffer, |
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
This violates Destroy's API http://codesearch.goog
ananta
2011/12/17 00:40:25
Done.
|
+ base::Unretained(client_), index->second.picture_buffer.id())); |
+ } |
+ ClearState(); |
+ decoder_.Release(); |
+ device_.Release(); |
+ device_manager_.Release(); |
+ MFShutdown(); |
+ state_ = kUninitialized; |
+} |
+ |
+bool DXVAVideoDecodeAccelerator::CreateD3DDevManager() { |
+ base::win::ScopedComPtr<IDirect3D9Ex> d3d9; |
+ |
+ Direct3DCreate9Ex(D3D_SDK_VERSION, d3d9.Receive()); |
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
Just to make sure you didn't miss them, apatrick m
ananta
2011/12/17 00:40:25
Yeah. We use the Ex versions of the APIs for that
|
+ if (d3d9.get() == NULL) { |
+ NOTREACHED() << "Failed to create D3D9"; |
+ return false; |
+ } |
+ |
+ D3DPRESENT_PARAMETERS present_params = {0}; |
+ present_params.BackBufferWidth = 1; |
+ present_params.BackBufferHeight = 1; |
+ present_params.BackBufferFormat = D3DFMT_UNKNOWN; |
+ present_params.BackBufferCount = 1; |
+ present_params.SwapEffect = D3DSWAPEFFECT_DISCARD; |
+ present_params.hDeviceWindow = GetShellWindow(); |
+ present_params.Windowed = TRUE; |
+ present_params.Flags = D3DPRESENTFLAG_VIDEO; |
+ present_params.FullScreen_RefreshRateInHz = 0; |
+ present_params.PresentationInterval = 0; |
+ |
+ HRESULT hr = d3d9->CreateDeviceEx(D3DADAPTER_DEFAULT, |
+ D3DDEVTYPE_HAL, |
+ GetShellWindow(), |
+ D3DCREATE_SOFTWARE_VERTEXPROCESSING, |
+ &present_params, |
+ NULL, |
+ device_.Receive()); |
+ RETURN_ON_HR_FAILURE(hr, "Failed to create D3D Device", false); |
+ |
+ hr = DXVA2CreateDirect3DDeviceManager9(&dev_manager_reset_token_, |
+ device_manager_.Receive()); |
+ RETURN_ON_HR_FAILURE(hr, "Couldn't create D3D Device manager", false); |
+ |
+ hr = device_manager_->ResetDevice(device_.get(), |
+ dev_manager_reset_token_); |
+ RETURN_ON_HR_FAILURE(hr, "Failed to set device to device manager", false); |
+ return true; |
+} |
+ |
+bool DXVAVideoDecodeAccelerator::InitDecoder() { |
+ HRESULT hr = CoCreateInstance(__uuidof(CMSH264DecoderMFT), |
+ NULL, |
+ CLSCTX_INPROC_SERVER, |
+ __uuidof(IMFTransform), |
+ reinterpret_cast<void**>(decoder_.Receive())); |
+ RETURN_ON_HR_FAILURE(hr, "Failed to CoCreate decoder.", false); |
+ |
+ if (!CheckDecoderDxvaSupport()) |
+ return false; |
+ |
+ hr = decoder_->ProcessMessage( |
+ MFT_MESSAGE_SET_D3D_MANAGER, |
+ reinterpret_cast<ULONG_PTR>(device_manager_.get())); |
+ RETURN_ON_HR_FAILURE(hr, "Failed to pass D3D9 device to decoder.", false); |
+ |
+ return SetDecoderMediaTypes(); |
+} |
+ |
+bool DXVAVideoDecodeAccelerator::CheckDecoderDxvaSupport() { |
+ base::win::ScopedComPtr<IMFAttributes> attributes; |
+ HRESULT hr = decoder_->GetAttributes(attributes.Receive()); |
+ RETURN_ON_HR_FAILURE(hr, "Failed to pass decoder attributes.", false); |
+ |
+ UINT32 dxva = 0; |
+ hr = attributes->GetUINT32(MF_SA_D3D_AWARE, &dxva); |
+ RETURN_ON_HR_FAILURE(hr, "Failed to get DXVA aware attribute.", false); |
+ |
+ hr = attributes->SetUINT32(CODECAPI_AVDecVideoAcceleration_H264, TRUE); |
+ RETURN_ON_HR_FAILURE(hr, "Failed to force codec to use H/W DXVA.", false); |
+ return true; |
+} |
+ |
+bool DXVAVideoDecodeAccelerator::SetDecoderMediaTypes() { |
+ if (!SetDecoderInputMediaType()) |
+ return false; |
+ return SetDecoderOutputMediaType(MFVideoFormat_NV12); |
+} |
+ |
+bool DXVAVideoDecodeAccelerator::SetDecoderInputMediaType() { |
+ base::win::ScopedComPtr<IMFMediaType> media_type; |
+ HRESULT hr = MFCreateMediaType(media_type.Receive()); |
+ RETURN_ON_HR_FAILURE(hr, "Failed to create empty media type object", false); |
+ |
+ hr = media_type->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video); |
+ RETURN_ON_HR_FAILURE(hr, "SetGUID for major type failed", false); |
+ |
+ hr = media_type->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_H264); |
+ RETURN_ON_HR_FAILURE(hr, "SetGUID for subtype failed", false); |
+ |
+ hr = decoder_->SetInputType(0, media_type.get(), 0); // No flags |
+ RETURN_ON_HR_FAILURE(hr, "Failed to set decoder's input type", false); |
+ return true; |
+} |
+ |
+bool DXVAVideoDecodeAccelerator::SetDecoderOutputMediaType( |
+ const GUID& subtype) { |
+ base::win::ScopedComPtr<IMFMediaType> out_media_type; |
+ |
+ for (uint32 i = 0; |
+ SUCCEEDED(decoder_->GetOutputAvailableType(0, i, |
+ out_media_type.Receive())); |
+ ++i) { |
+ GUID out_subtype = {0}; |
+ HRESULT hr = out_media_type->GetGUID(MF_MT_SUBTYPE, &out_subtype); |
+ RETURN_ON_HR_FAILURE(hr, "Failed to get output media type guid", false); |
+ |
+ if (out_subtype == subtype) { |
+ hr = decoder_->SetOutputType(0, out_media_type, 0); // No flags |
+ RETURN_ON_HR_FAILURE(hr, "Failed to set output media type", false); |
+ return true; |
+ } |
+ out_media_type.Release(); |
+ } |
+ return false; |
+} |
+ |
+bool DXVAVideoDecodeAccelerator::SendMFTMessage(MFT_MESSAGE_TYPE msg, |
+ int32 param) { |
+ HRESULT hr = decoder_->ProcessMessage(msg, param); |
+ return SUCCEEDED(hr); |
+} |
+ |
+// Gets the minimum buffer sizes for input and output samples. The MFT will not |
+// allocate buffer for input nor output, so we have to do it ourselves and make |
+// sure they're the correct size. We only provide decoding if DXVA is enabled. |
+bool DXVAVideoDecodeAccelerator::GetStreamsInfoAndBufferReqs() { |
+ HRESULT hr = decoder_->GetInputStreamInfo(0, &input_stream_info_); |
+ RETURN_ON_HR_FAILURE(hr, "Failed to get input stream info", false); |
+ |
+ hr = decoder_->GetOutputStreamInfo(0, &output_stream_info_); |
+ RETURN_ON_HR_FAILURE(hr, "Failed to get output stream info", false); |
+ |
+ VLOG(1) << "Input stream info: "; |
+ VLOG(1) << "Max latency: " << input_stream_info_.hnsMaxLatency; |
+ // There should be three flags, one for requiring a whole frame be in a |
+ // single sample, one for requiring there be one buffer only in a single |
+ // sample, and one that specifies a fixed sample size. (as in cbSize) |
+ CHECK_EQ(input_stream_info_.dwFlags, 0x7u); |
+ |
+ VLOG(1) << "Min buffer size: " << input_stream_info_.cbSize; |
+ VLOG(1) << "Max lookahead: " << input_stream_info_.cbMaxLookahead; |
+ VLOG(1) << "Alignment: " << input_stream_info_.cbAlignment; |
+ |
+ VLOG(1) << "Output stream info: "; |
+ // The flags here should be the same and mean the same thing, except when |
+ // DXVA is enabled, there is an extra 0x100 flag meaning decoder will |
+ // allocate its own sample. |
+ VLOG(1) << "Flags: " |
+ << std::hex << std::showbase << output_stream_info_.dwFlags; |
+ CHECK_EQ(output_stream_info_.dwFlags, 0x107u); |
+ VLOG(1) << "Min buffer size: " << output_stream_info_.cbSize; |
+ VLOG(1) << "Alignment: " << output_stream_info_.cbAlignment; |
+ return true; |
+} |
+ |
+void DXVAVideoDecodeAccelerator::DoDecode() { |
+ if (state_ != kNormal && state_ != kEosDrain) { |
+ NOTREACHED() << "DoDecode: not in normal or drain state"; |
+ return; |
+ } |
+ |
+ MFT_OUTPUT_DATA_BUFFER output_data_buffer = {0}; |
+ DWORD status = 0; |
+ |
+ HRESULT hr = decoder_->ProcessOutput(0, // No flags |
+ 1, // # of out streams to pull from |
+ &output_data_buffer, |
+ &status); |
+ IMFCollection* events = output_data_buffer.pEvents; |
+ if (events != NULL) { |
+ VLOG(1) << "Got events from ProcessOuput, but discarding"; |
+ events->Release(); |
+ } |
+ if (FAILED(hr)) { |
+ if (hr == MF_E_TRANSFORM_STREAM_CHANGE) { |
+ if (!SetDecoderOutputMediaType(MFVideoFormat_NV12)) { |
+ // No more output from the decoder. Notify EOS and stop playback. |
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
Comment is at odds with the following NOTREACHED.
ananta
2011/12/17 00:40:25
Not sure what we can do here. It is probably corre
|
+ NOTREACHED() << "Failed to set decoder output media type"; |
+ } |
+ // No more output from the decoder. Stop playback. |
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
copy/paste from the NEED_MORE_INPUT case below? I
ananta
2011/12/17 00:40:25
A stream change needs further process input calls
|
+ state_ = kStopped; |
+ return; |
+ } else if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) { |
+ // No more output from the decoder. Stop playback. |
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
Add a DCHECK_EQ(state_, kEosDrain); i.e. that we w
ananta
2011/12/17 00:40:25
Done.
|
+ state_ = kStopped; |
+ return; |
+ } else { |
+ NOTREACHED() << "Unhandled error in DoDecode()"; |
+ return; |
+ } |
+ } |
+ |
+ TRACE_EVENT_END_ETW("DXVAVideoDecodeAccelerator.Decoding", this, ""); |
+ |
+ TRACE_COUNTER1("DXVA Decoding", "TotalPacketsBeforeDecode", |
+ inputs_before_decode_); |
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
Can this ever *not* be 1, given the synchronous pa
ananta
2011/12/17 00:40:25
Covered with the decoder buffering behavior.
|
+ |
+ inputs_before_decode_ = 0; |
+ |
+ if (!ProcessOutputSample(output_data_buffer.pSample)) { |
+ NotifyError(PLATFORM_FAILURE); |
+ return; |
+ } |
+ state_ = kNormal; |
+} |
+ |
+bool DXVAVideoDecodeAccelerator::ProcessOutputSample(IMFSample* sample) { |
+ if (!sample) { |
+ NOTREACHED() << "ProcessOutput succeeded, but did not get a sample back"; |
+ return false; |
+ } |
+ base::win::ScopedComPtr<IMFSample> output_sample; |
+ output_sample.Attach(sample); |
+ |
+ base::win::ScopedComPtr<IMFMediaBuffer> output_buffer; |
+ HRESULT hr = sample->GetBufferByIndex(0, output_buffer.Receive()); |
+ RETURN_ON_HR_FAILURE(hr, "Failed to get buffer from sample.", false); |
+ |
+ base::win::ScopedComPtr<IDirect3DSurface9> surface; |
+ hr = MFGetService(output_buffer, MR_BUFFER_SERVICE, |
+ IID_PPV_ARGS(surface.Receive())); |
+ RETURN_ON_HR_FAILURE(hr, "Failed to get surface from buffer.", false); |
+ |
+ D3DSURFACE_DESC surface_desc; |
+ hr = surface->GetDesc(&surface_desc); |
+ RETURN_ON_HR_FAILURE(hr, "Failed to get surface description.", false); |
+ |
+ TRACE_EVENT_BEGIN_ETW("DXVAVideoDecodeAccelerator.SurfaceCreation", this, |
+ ""); |
+ // TODO(ananta) |
+ // The code below may not be necessary once we have an ANGLE extension which |
+ // allows us to pass the Direct 3D surface directly for rendering. |
+ |
+ // The decoded bits in the source direct 3d surface are in the YUV |
+ // format. Angle does not support that. As a workaround we create an |
+ // offscreen surface in the RGB format and copy the source surface |
+ // to this surface. |
+ base::win::ScopedComPtr<IDirect3DSurface9> dest_surface; |
+ hr = device_->CreateOffscreenPlainSurface(surface_desc.Width, |
+ surface_desc.Height, |
+ D3DFMT_A8R8G8B8, |
+ D3DPOOL_DEFAULT, |
+ dest_surface.Receive(), |
+ NULL); |
+ RETURN_ON_HR_FAILURE(hr, "Failed to create offscreen surface.", false); |
+ |
+ hr = D3DXLoadSurfaceFromSurface(dest_surface, NULL, NULL, surface, NULL, |
+ NULL, 0, 0); |
+ RETURN_ON_HR_FAILURE(hr, "Failed to copy source surface to dest.", false); |
+ |
+ TRACE_EVENT_END_ETW("DXVAVideoDecodeAccelerator.SurfaceCreation", this, ""); |
+ |
+ scoped_array<char> bits; |
+ if (!GetBitmapFromSurface(dest_surface, &bits)) { |
+ NOTREACHED() << "Failed to get bitmap from dest surface"; |
+ return false; |
+ } |
+ |
+ pending_output_samples_.push_back( |
+ PendingSampleInfo(last_input_buffer_id_, dest_surface)); |
+ |
+ // If we have available picture buffers to copy the output data then use the |
+ // first one and then flag it as not being available for use. |
+ if (output_picture_buffers_.size()) { |
+ ProcessPendingSamples(); |
+ return true; |
+ } |
+ if (pictures_requested_) { |
+ VLOG(1) << "Waiting for picture slots from the client."; |
+ return true; |
+ } |
+ // Go ahead and request picture buffers. |
+ MessageLoop::current()->PostTask(FROM_HERE, base::Bind( |
+ &media::VideoDecodeAccelerator::Client::ProvidePictureBuffers, |
+ base::Unretained(client_), kNumPictureBuffers, |
+ gfx::Size(surface_desc.Width, surface_desc.Height))); |
+ |
+ pictures_requested_ = true; |
+ return true; |
+} |
+ |
+bool DXVAVideoDecodeAccelerator::CopyOutputSampleDataToPictureBuffer( |
+ IDirect3DSurface9* dest_surface, media::PictureBuffer picture_buffer, |
+ int input_buffer_id) { |
+ DCHECK(dest_surface); |
+ |
+ D3DSURFACE_DESC surface_desc; |
+ HRESULT hr = dest_surface->GetDesc(&surface_desc); |
+ RETURN_ON_HR_FAILURE(hr, "Failed to get surface description.", false); |
+ |
+ scoped_array<char> bits; |
+ if (!GetBitmapFromSurface(dest_surface, &bits)) { |
+ NOTREACHED() << "Failed to read bits from D3D surface"; |
+ return false; |
+ } |
+ |
+ GLint current_texture = 0; |
+ glGetIntegerv(GL_TEXTURE_BINDING_2D, ¤t_texture); |
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
I'm surprised to see these (global-scope) gl* call
ananta
2011/12/17 00:40:25
When i step into these calls i always get a contex
ananta
2011/12/17 00:40:25
Based on a discussion with Al Patrick an open gl c
|
+ |
+ glBindTexture(GL_TEXTURE_2D, picture_buffer.texture_id()); |
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_BGRA_EXT, surface_desc.Width, |
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
How is this safe without a glFinish?
ananta
2011/12/17 00:40:25
Again based on a discussion with Al Patrick not ne
|
+ surface_desc.Height, 0, GL_BGRA_EXT, GL_UNSIGNED_BYTE, |
+ reinterpret_cast<GLvoid*>(bits.get())); |
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); |
+ |
+ glBindTexture(GL_TEXTURE_2D, current_texture); |
+ |
+ media::Picture output_picture(picture_buffer.id(), input_buffer_id); |
+ MessageLoop::current()->PostTask(FROM_HERE, base::Bind( |
+ &media::VideoDecodeAccelerator::Client::PictureReady, |
+ base::Unretained(client_), output_picture)); |
+ return true; |
+} |
+ |
+void DXVAVideoDecodeAccelerator::ProcessPendingSamples() { |
+ if (pending_output_samples_.empty()) |
+ return; |
+ |
+ OutputBuffers::iterator index; |
+ |
+ for (index = output_picture_buffers_.begin(); |
+ index != output_picture_buffers_.end(); |
+ ++index) { |
+ if (index->second.available) { |
+ PendingSampleInfo sample_info = pending_output_samples_.front(); |
+ |
+ CopyOutputSampleDataToPictureBuffer(sample_info.dest_surface, |
+ index->second.picture_buffer, |
+ sample_info.input_buffer_id); |
+ index->second.available = false; |
+ pending_output_samples_.erase(pending_output_samples_.begin()); |
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
pop_front()
ananta
2011/12/17 00:40:25
Done.
|
+ |
+ if (pending_output_samples_.empty()) |
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
If you move this to the for-loop test you can drop
ananta
2011/12/17 00:40:25
Done.
|
+ break; |
+ } |
+ } |
+} |
+ |
+void DXVAVideoDecodeAccelerator::ClearState() { |
+ last_input_buffer_id_ = -1; |
+ output_picture_buffers_.clear(); |
+ pending_output_samples_.clear(); |
+} |
+ |
+void DXVAVideoDecodeAccelerator::NotifyError( |
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
I think you actually want a StopOnError-equivalent
ananta
2011/12/17 00:40:25
Done.
|
+ media::VideoDecodeAccelerator::Error error) { |
+ MessageLoop::current()->PostTask(FROM_HERE, base::Bind( |
+ &media::VideoDecodeAccelerator::Client::NotifyError, |
+ base::Unretained(client_), error)); |
+} |
+ |
+bool DXVAVideoDecodeAccelerator::GetBitmapFromSurface( |
+ IDirect3DSurface9* surface, |
+ scoped_array<char>* bits) { |
+ // Get the currently loaded bitmap from the DC. |
+ HDC hdc = NULL; |
+ HRESULT hr = surface->GetDC(&hdc); |
+ RETURN_ON_HR_FAILURE(hr, "Failed to get HDC for dest offscreen surface.", |
+ false); |
+ HBITMAP bitmap = |
+ reinterpret_cast<HBITMAP>(GetCurrentObject(hdc, OBJ_BITMAP)); |
+ if (!bitmap) { |
+ NOTREACHED() << "Failed to get bitmap from DC"; |
+ surface->ReleaseDC(hdc); |
+ return false; |
+ } |
+ // TODO(ananta) |
+ // The code below may not be necessary once we have an ANGLE extension which |
+ // allows us to pass the Direct 3D surface directly for rendering. |
+ // The Device dependent bitmap is upside down for OpenGL. We convert the |
+ // bitmap to a DIB and render it on the texture instead. |
+ BITMAP bitmap_basic_info = {0}; |
+ if (!GetObject(bitmap, sizeof(BITMAP), &bitmap_basic_info)) { |
+ NOTREACHED() << "Failed to read bitmap info"; |
+ surface->ReleaseDC(hdc); |
+ return false; |
+ } |
+ BITMAPINFO bitmap_info = {0}; |
+ bitmap_info.bmiHeader.biSize = sizeof(BITMAPINFOHEADER); |
+ bitmap_info.bmiHeader.biWidth = bitmap_basic_info.bmWidth; |
+ bitmap_info.bmiHeader.biHeight = bitmap_basic_info.bmHeight; |
+ bitmap_info.bmiHeader.biPlanes = 1; |
+ bitmap_info.bmiHeader.biBitCount = bitmap_basic_info.bmBitsPixel; |
+ bitmap_info.bmiHeader.biCompression = BI_RGB; |
+ bitmap_info.bmiHeader.biSizeImage = 0; |
+ bitmap_info.bmiHeader.biClrUsed = 0; |
+ |
+ int ret = GetDIBits(hdc, bitmap, 0, 0, NULL, &bitmap_info, DIB_RGB_COLORS); |
+ if (!ret || bitmap_info.bmiHeader.biSizeImage <= 0) { |
+ NOTREACHED() << "Failed to read bitmap size"; |
+ surface->ReleaseDC(hdc); |
+ return false; |
+ } |
+ |
+ bits->reset(new char[bitmap_info.bmiHeader.biSizeImage]); |
+ ret = GetDIBits(hdc, bitmap, 0, bitmap_basic_info.bmHeight, bits->get(), |
+ &bitmap_info, DIB_RGB_COLORS); |
+ |
+ surface->ReleaseDC(hdc); |
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
combine into if below, or error-check otherwise?
ananta
2011/12/17 00:40:25
Done.
|
+ if (!ret) { |
+ NOTREACHED() << "Failed to retrieve bitmap bits."; |
+ return false; |
+ } |
+ return true; |
+} |