Index: content/common/gpu/media/dxva_video_decode_accelerator.cc |
=================================================================== |
--- content/common/gpu/media/dxva_video_decode_accelerator.cc (revision 0) |
+++ content/common/gpu/media/dxva_video_decode_accelerator.cc (revision 0) |
@@ -0,0 +1,714 @@ |
+// Copyright (c) 2011 The Chromium Authors. All rights reserved. |
+// Use of this source code is governed by a BSD-style license that can be |
+// found in the LICENSE file. |
+ |
+#include "content/common/gpu/media/dxva_video_decode_accelerator.h" |
+ |
+#include <mfapi.h> |
+#include <mferror.h> |
+#include <wmcodecdsp.h> |
+ |
+#include "base/lazy_instance.h" |
+#include "base/logging.h" |
+#include "base/memory/scoped_handle.h" |
+#include "base/memory/scoped_ptr.h" |
+#include "base/shared_memory.h" |
+#include "base/win/scoped_com_initializer.h" |
+#include "content/common/gpu/media/gles2_texture_to_egl_image_translator.h" |
+#include "media/base/video_frame.h" |
+#include "media/video/video_decode_accelerator.h" |
+ |
+base::LazyInstance<base::win::ScopedCOMInitializer> |
apatrick_chromium
2011/11/14 20:55:07
This happens in gpu_main.cc so it might not be nec
ananta
2011/12/13 01:39:15
Removed.
|
+ g_init_com(base::LINKER_INITIALIZED); |
+ |
+namespace { |
+GUID ConvertVideoFrameFormatToGuid(media::VideoFrame::Format format) { |
+ switch (format) { |
+ case media::VideoFrame::NV12: |
+ return MFVideoFormat_NV12; |
+ case media::VideoFrame::YV12: |
+ return MFVideoFormat_YV12; |
+ default: |
+ break; |
+ } |
+ NOTREACHED() << "Unsupported VideoFrame format"; |
+ return GUID_NULL; |
+} |
+ |
+IMFSample* CreateEmptySample() { |
+ HRESULT hr = E_FAIL; |
+ base::win::ScopedComPtr<IMFSample> sample; |
+ hr = MFCreateSample(sample.Receive()); |
+ if (FAILED(hr)) { |
+ NOTREACHED() << "Unable to create an empty sample"; |
+ return NULL; |
+ } |
+ return sample.Detach(); |
+} |
+ |
+// Creates a Media Foundation sample with one buffer of length |buffer_length| |
+// on a |align|-byte boundary. Alignment must be a perfect power of 2 or 0. |
+// If |align| is 0, then no alignment is specified. |
+IMFSample* CreateEmptySampleWithBuffer(int buffer_length, int align) { |
+ CHECK_GT(buffer_length, 0); |
+ base::win::ScopedComPtr<IMFSample> sample; |
+ sample.Attach(CreateEmptySample()); |
+ if (!sample.get()) |
+ return NULL; |
+ base::win::ScopedComPtr<IMFMediaBuffer> buffer; |
+ HRESULT hr = E_FAIL; |
+ if (align == 0) { |
+ // Note that MFCreateMemoryBuffer is same as MFCreateAlignedMemoryBuffer |
+ // with the align argument being 0. |
+ hr = MFCreateMemoryBuffer(buffer_length, buffer.Receive()); |
+ } else { |
+ hr = MFCreateAlignedMemoryBuffer(buffer_length, |
+ align - 1, |
+ buffer.Receive()); |
+ } |
+ if (FAILED(hr)) { |
+ NOTREACHED() << "Unable to create an empty buffer"; |
+ return NULL; |
+ } |
+ hr = sample->AddBuffer(buffer.get()); |
+ if (FAILED(hr)) { |
+ NOTREACHED() << "Failed to add empty buffer to sample"; |
+ return NULL; |
+ } |
+ return sample.Detach(); |
+} |
+ |
+// Creates a Media Foundation sample with one buffer containing a copy of the |
+// given Annex B stream data. |
+// If duration and sample time are not known, provide 0. |
+// |min_size| specifies the minimum size of the buffer (might be required by |
+// the decoder for input). The times here should be given in 100ns units. |
+// |alignment| specifies the buffer in the sample to be aligned. If no |
+// alignment is required, provide 0 or 1. |
+static IMFSample* CreateInputSample(const uint8* stream, int size, |
+ int64 timestamp, int64 duration, |
+ int min_size, int alignment) { |
+ CHECK(stream); |
+ CHECK_GT(size, 0); |
+ base::win::ScopedComPtr<IMFSample> sample; |
+ sample.Attach(CreateEmptySampleWithBuffer(std::max(min_size, size), |
+ alignment)); |
+ if (!sample.get()) { |
+ NOTREACHED() << "Failed to create empty buffer for input"; |
+ return NULL; |
+ } |
+ HRESULT hr = E_FAIL; |
+ if (duration > 0) { |
+ hr = sample->SetSampleDuration(duration); |
+ if (FAILED(hr)) { |
+ NOTREACHED() << "Failed to set sample duration"; |
+ return NULL; |
+ } |
+ } |
+ if (timestamp > 0) { |
+ hr = sample->SetSampleTime(timestamp); |
+ if (FAILED(hr)) { |
+ NOTREACHED() << "Failed to set sample time"; |
+ return NULL; |
+ } |
+ } |
+ base::win::ScopedComPtr<IMFMediaBuffer> buffer; |
+ hr = sample->GetBufferByIndex(0, buffer.Receive()); |
+ if (FAILED(hr)) { |
+ NOTREACHED() << "Failed to get buffer in sample"; |
+ return NULL; |
+ } |
+ DWORD max_length = 0, current_length = 0; |
+ uint8* destination = NULL; |
+ hr = buffer->Lock(&destination, &max_length, ¤t_length); |
+ if (FAILED(hr)) { |
+ NOTREACHED() << "Failed to lock buffer"; |
+ return NULL; |
+ } |
+ CHECK_EQ(current_length, 0u); |
+ CHECK_GE(static_cast<int>(max_length), size); |
+ memcpy(destination, stream, size); |
+ CHECK(SUCCEEDED(buffer->Unlock())); |
+ hr = buffer->SetCurrentLength(size); |
+ if (FAILED(hr)) { |
+ NOTREACHED() << "Failed to set current length to " << size; |
+ return NULL; |
+ } |
+ LOG(INFO) << __FUNCTION__ << " wrote " << size << " bytes into input sample"; |
+ return sample.Detach(); |
+} |
+ |
+} // namespace |
+ |
+DXVAVideoDecodeAccelerator::DXVAVideoDecodeAccelerator( |
+ media::VideoDecodeAccelerator::Client* client) |
+ : client_(client), |
+ message_loop_(MessageLoop::current()), |
+ surface_width_(0), |
+ surface_height_(0), |
+ state_(kUninitialized), |
+ input_buffers_at_component_(0), |
+ input_stream_info_(), |
+ output_stream_info_() { |
+ // Initialize COM on this thread. |
+ g_init_com.Get(); |
+} |
+ |
+DXVAVideoDecodeAccelerator::~DXVAVideoDecodeAccelerator() { |
+ DCHECK_EQ(message_loop_, MessageLoop::current()); |
+ client_ = NULL; |
+ message_loop_ = NULL; |
+} |
+ |
+bool DXVAVideoDecodeAccelerator::Initialize(Profile profile) { |
+ DCHECK_EQ(message_loop_, MessageLoop::current()); |
+ DCHECK(false); |
+ |
+ if (state_ != kUninitialized) { |
+ NOTREACHED() << "Initialize: invalid state: " |
+ << state_; |
+ return false; |
+ } |
+ |
+ HRESULT hr = MFStartup(MF_VERSION, MFSTARTUP_FULL); |
+ if (FAILED(hr)) { |
+ NOTREACHED() << "MFStartup failed. Error:" |
+ << std::hex << std::showbase << hr; |
+ return false; |
+ } |
+ if (!CreateD3DDevManager()) |
+ return false; |
+ if (!InitDecoder()) |
+ return false; |
+ if (!GetStreamsInfoAndBufferReqs()) |
+ return false; |
+ if (SendMFTMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING)) { |
+ state_ = DXVAVideoDecodeAccelerator::kNormal; |
+ client_->NotifyInitializeDone(); |
+ return true; |
+ } |
+ return false; |
+} |
+ |
+void DXVAVideoDecodeAccelerator::Decode( |
+ const media::BitstreamBuffer& bitstream_buffer) { |
+ DCHECK_EQ(message_loop_, MessageLoop::current()); |
+ if (state_ == DXVAVideoDecodeAccelerator::kUninitialized) { |
+ NOTREACHED() << "ConsumeVideoSample: invalid state"; |
+ return; |
+ } |
+ base::win::ScopedHandle source_process_handle( |
+ ::OpenProcess(PROCESS_DUP_HANDLE, |
apatrick_chromium
2011/11/14 20:55:07
I think the sandbox might prevent OpenProcess from
ananta
2011/12/13 01:39:15
Done.
|
+ FALSE, |
+ bitstream_buffer.source_process_id())); |
+ if (!source_process_handle.IsValid()) { |
+ NOTREACHED() << "Failed to open source process handle"; |
+ return; |
+ } |
+ |
+ HANDLE shared_memory_handle = NULL; |
+ if (!::DuplicateHandle(source_process_handle, |
+ bitstream_buffer.handle(), |
+ ::GetCurrentProcess(), |
+ &shared_memory_handle, |
+ 0, |
+ FALSE, |
+ DUPLICATE_SAME_ACCESS)) { |
+ NOTREACHED() << "Failed to open duplicate shared mem handle"; |
+ return; |
+ } |
+ |
+ base::SharedMemory shm(shared_memory_handle, true); |
+ if (!shm.Map(bitstream_buffer.size())) { |
+ NOTREACHED() << "Failed in SharedMemory::Map()"; |
+ return; |
+ } |
+ |
+ base::win::ScopedComPtr<IMFSample> sample; |
+ sample.Attach(CreateInputSample(reinterpret_cast<const uint8*>(shm.memory()), |
+ bitstream_buffer.size(), |
+ 0, |
+ 0, |
+ input_stream_info_.cbSize, |
+ input_stream_info_.cbAlignment)); |
+ if (!sample.get()) { |
+ NOTREACHED() << "Failed to create an input sample"; |
+ return; |
+ } else { |
+ if (FAILED(decoder_->ProcessInput(0, sample.get(), 0))) { |
+ NOTREACHED() << "Failed to process input"; |
+ return; |
+ } |
+ } |
+ if (state_ != DXVAVideoDecodeAccelerator::kEosDrain) { |
+ // End of stream, send drain messages. |
+ if (!SendMFTMessage(MFT_MESSAGE_NOTIFY_END_OF_STREAM) || |
+ !SendMFTMessage(MFT_MESSAGE_COMMAND_DRAIN)) { |
+ NOTREACHED() << "Failed to send EOS / drain messages to MFT"; |
+ } else { |
+ state_ = DXVAVideoDecodeAccelerator::kEosDrain; |
+ } |
+ } |
+ client_->NotifyEndOfBitstreamBuffer(bitstream_buffer.id()); |
+ DoDecode(); |
+} |
+ |
+void DXVAVideoDecodeAccelerator::AssignPictureBuffers( |
+ const std::vector<media::PictureBuffer>& buffers) { |
+ DCHECK_EQ(message_loop_, MessageLoop::current()); |
+ if (pending_output_samples_.size() != buffers.size()) { |
+ NOTREACHED() << "Mismatched picture buffers and pending samples."; |
+ return; |
+ } |
+ // Copy the picture buffers provided by the client to the available list, |
+ // and mark these buffers as available for use. |
+ for (size_t buffer_index = 0; buffer_index < buffers.size(); ++buffer_index) { |
+ DXVAPictureBuffer picture_buffer; |
+ picture_buffer.available = true; |
+ picture_buffer.picture_buffer = buffers[buffer_index]; |
+ |
+ DCHECK(available_pictures_.find(buffers[buffer_index].id()) == |
+ available_pictures_.end()); |
+ available_pictures_[buffers[buffer_index].id()] = picture_buffer; |
+ } |
+ int buffer_index = 0; |
+ PendingOutputSamples::iterator sample_index = |
+ pending_output_samples_.begin(); |
+ HRESULT hr = E_FAIL; |
+ |
+ while (sample_index != pending_output_samples_.end()) { |
+ const base::win::ScopedComPtr<IMFSample>& sample = *sample_index; |
+ base::win::ScopedComPtr<IMFMediaBuffer> output_buffer; |
+ hr = sample->GetBufferByIndex(0, output_buffer.Receive()); |
+ if (FAILED(hr)) { |
+ NOTREACHED() << "Failed to get buffer from sample"; |
+ continue; |
+ } |
+ base::win::ScopedComPtr<IDirect3DSurface9> surface; |
+ hr = MFGetService(output_buffer, MR_BUFFER_SERVICE, |
+ IID_PPV_ARGS(surface.Receive())); |
+ if (FAILED(hr)) { |
+ NOTREACHED() << "Failed to get surface from buffer"; |
+ continue; |
+ } |
+ CopyOutputSampleDataToPictureBuffer(sample, |
+ surface, |
+ buffers[buffer_index]); |
+ sample_index = pending_output_samples_.erase(sample_index); |
+ } |
+} |
+ |
+void DXVAVideoDecodeAccelerator::ReusePictureBuffer( |
+ int32 picture_buffer_id) { |
+ DCHECK_EQ(message_loop_, MessageLoop::current()); |
+ DCHECK(available_pictures_.find(picture_buffer_id) != |
+ available_pictures_.end()); |
+} |
+ |
+void DXVAVideoDecodeAccelerator::Flush() { |
+#if 0 |
+ DCHECK_EQ(message_loop_, MessageLoop::current()); |
+ LOG(INFO) << "DXVAVideoDecodeAccelerator::Flush"; |
+ if (state_ != kNormal) { |
+ NOTREACHED() << "Flush: invalid state"; |
+ return; |
+ } |
+ state_ = kFlushing; |
+ if (!SendMFTMessage(MFT_MESSAGE_COMMAND_FLUSH)) { |
+ LOG(WARNING) << "DXVAVideoDecodeAccelerator::Flush failed to send message"; |
+ } |
+ state_ = kNormal; |
+ client_->NotifyFlushDone(); |
+#endif |
+} |
+ |
+void DXVAVideoDecodeAccelerator::Reset() { |
+ DCHECK_EQ(message_loop_, MessageLoop::current()); |
+ LOG(INFO) << "DXVAVideoDecodeAccelerator::Reset"; |
+ if (state_ != kNormal) { |
+ NOTREACHED() << "Reset: invalid state"; |
+ return; |
+ } |
+ |
+ state_ = DXVAVideoDecodeAccelerator::kResetting; |
+ if (!SendMFTMessage(MFT_MESSAGE_COMMAND_FLUSH)) { |
+ LOG(WARNING) << "DXVAVideoDecodeAccelerator::Flush failed to send message"; |
+ } |
+ state_ = DXVAVideoDecodeAccelerator::kNormal; |
+ client_->NotifyResetDone(); |
+} |
+ |
+void DXVAVideoDecodeAccelerator::Destroy() { |
+ DCHECK_EQ(message_loop_, MessageLoop::current()); |
+ OutputBuffers::iterator index; |
+ for (index = available_pictures_.begin(); index != available_pictures_.end(); |
+ ++index) { |
+ client_->DismissPictureBuffer(index->second.picture_buffer.id()); |
+ } |
+ available_pictures_.clear(); |
+ pending_output_samples_.clear(); |
+} |
+ |
+void DXVAVideoDecodeAccelerator::SetEglState(EGLDisplay egl_display, |
+ EGLContext egl_context) { |
+ DCHECK_EQ(message_loop_, MessageLoop::current()); |
+ egl_display_ = egl_display; |
+ egl_context_ = egl_context; |
+} |
+ |
+bool DXVAVideoDecodeAccelerator::CreateD3DDevManager() { |
+ d3d9_.Attach(Direct3DCreate9(D3D_SDK_VERSION)); |
apatrick_chromium
2011/11/14 20:55:07
I think it will be easier to synchronize this code
|
+ if (d3d9_.get() == NULL) { |
+ NOTREACHED() << "Failed to create D3D9"; |
+ return false; |
+ } |
+ |
+ D3DPRESENT_PARAMETERS present_params = {0}; |
+ present_params.BackBufferWidth = 0; |
+ present_params.BackBufferHeight = 0; |
+ present_params.BackBufferFormat = D3DFMT_UNKNOWN; |
+ present_params.BackBufferCount = 1; |
+ present_params.SwapEffect = D3DSWAPEFFECT_DISCARD; |
+ present_params.hDeviceWindow = GetShellWindow(); |
+ present_params.Windowed = TRUE; |
+ present_params.Flags = D3DPRESENTFLAG_VIDEO; |
+ present_params.FullScreen_RefreshRateInHz = 0; |
+ present_params.PresentationInterval = 0; |
+ |
+ HRESULT hr = d3d9_->CreateDevice(D3DADAPTER_DEFAULT, |
apatrick_chromium
2011/11/16 22:57:17
Daniel, the DXVA video decoder needs a D3D device.
|
+ D3DDEVTYPE_HAL, |
+ GetShellWindow(), |
+ (D3DCREATE_HARDWARE_VERTEXPROCESSING | |
+ D3DCREATE_MULTITHREADED), |
+ &present_params, |
+ device_.Receive()); |
+ if (FAILED(hr)) { |
+ NOTREACHED() << "Failed to create D3D Device"; |
+ return false; |
+ } |
+ |
+ UINT dev_manager_reset_token = 0; |
+ hr = DXVA2CreateDirect3DDeviceManager9(&dev_manager_reset_token, |
+ device_manager_.Receive()); |
+ if (FAILED(hr)) { |
+ NOTREACHED() << "Couldn't create D3D Device manager"; |
+ return false; |
+ } |
+ |
+ hr = device_manager_->ResetDevice(device_.get(), |
+ dev_manager_reset_token); |
+ if (FAILED(hr)) { |
+ NOTREACHED() << "Failed to set device to device manager"; |
+ return false; |
+ } |
+ return true; |
+} |
+ |
+bool DXVAVideoDecodeAccelerator::InitDecoder() { |
+ HRESULT hr = CoCreateInstance(__uuidof(CMSH264DecoderMFT), |
apatrick_chromium
2011/11/14 20:55:07
Does this work when the sandbox is enabled?
ananta
2011/12/13 01:39:15
Yes.
|
+ NULL, |
+ CLSCTX_INPROC_SERVER, |
+ __uuidof(IMFTransform), |
+ reinterpret_cast<void**>(decoder_.Receive())); |
+ if (FAILED(hr) || !decoder_.get()) { |
+ NOTREACHED() << "CoCreateInstance failed " |
+ << std::hex << std::showbase << hr; |
+ return false; |
+ } |
+ if (!CheckDecoderDxvaSupport()) |
+ return false; |
+ hr = decoder_->ProcessMessage( |
+ MFT_MESSAGE_SET_D3D_MANAGER, |
+ reinterpret_cast<ULONG_PTR>(device_manager_.get())); |
apatrick_chromium
2011/11/16 22:57:17
DXVA also needs a pointer to the Direct3DDeviceMan
|
+ if (FAILED(hr)) { |
+ NOTREACHED() << "Failed to pass D3D9 device to decoder " |
+ << std::hex << hr; |
+ return false; |
+ } |
+ return SetDecoderMediaTypes(); |
+} |
+ |
+bool DXVAVideoDecodeAccelerator::CheckDecoderDxvaSupport() { |
+ base::win::ScopedComPtr<IMFAttributes> attributes; |
+ HRESULT hr = decoder_->GetAttributes(attributes.Receive()); |
+ if (FAILED(hr)) { |
+ NOTREACHED() << "Unlock: Failed to get attributes, hr = " |
+ << std::hex << std::showbase << hr; |
+ return false; |
+ } |
+ UINT32 dxva = 0; |
+ hr = attributes->GetUINT32(MF_SA_D3D_AWARE, &dxva); |
+ if (FAILED(hr) || !dxva) { |
+ NOTREACHED() << "Failed to get DXVA attr. Error:" |
+ << std::hex << std::showbase << hr |
+ << " .This might not be the right decoder."; |
+ return false; |
+ } |
+ return true; |
+} |
+ |
+bool DXVAVideoDecodeAccelerator::SetDecoderMediaTypes() { |
+ if (!SetDecoderInputMediaType()) |
+ return false; |
+ return SetDecoderOutputMediaType(ConvertVideoFrameFormatToGuid( |
+ media::VideoFrame::NV12)); |
+} |
+ |
+bool DXVAVideoDecodeAccelerator::SetDecoderInputMediaType() { |
+ base::win::ScopedComPtr<IMFMediaType> media_type; |
+ HRESULT hr = MFCreateMediaType(media_type.Receive()); |
+ if (FAILED(hr)) { |
+ NOTREACHED() << "Failed to create empty media type object"; |
+ return false; |
+ } |
+ |
+ hr = media_type->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video); |
+ if (FAILED(hr)) { |
+ NOTREACHED() << "SetGUID for major type failed"; |
+ return false; |
+ } |
+ |
+ hr = media_type->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_H264); |
+ if (FAILED(hr)) { |
+ NOTREACHED() << "SetGUID for subtype failed"; |
+ return false; |
+ } |
+ |
+ hr = decoder_->SetInputType(0, media_type.get(), 0); // No flags |
+ if (FAILED(hr)) { |
+ NOTREACHED() << "Failed to set decoder's input type"; |
+ return false; |
+ } |
+ return true; |
+} |
+ |
+bool DXVAVideoDecodeAccelerator::SetDecoderOutputMediaType( |
+ const GUID& subtype) { |
+ DWORD i = 0; |
+ IMFMediaType* out_media_type = NULL; |
+ bool found = false; |
+ while (SUCCEEDED(decoder_->GetOutputAvailableType(0, i, &out_media_type))) { |
+ GUID out_subtype = {0}; |
+ HRESULT hr = out_media_type->GetGUID(MF_MT_SUBTYPE, &out_subtype); |
+ if (FAILED(hr)) { |
+ NOTREACHED() << "Failed to GetGUID() on GetOutputAvailableType() " |
+ << i; |
+ out_media_type->Release(); |
+ continue; |
+ } |
+ if (out_subtype == subtype) { |
+ hr = decoder_->SetOutputType(0, out_media_type, 0); // No flags |
+ hr = MFGetAttributeSize(out_media_type, MF_MT_FRAME_SIZE, |
+ reinterpret_cast<UINT32*>(&surface_width_), |
+ reinterpret_cast<UINT32*>(&surface_height_)); |
+ if (FAILED(hr)) { |
+ NOTREACHED() << "Failed to SetOutputType to |subtype| or obtain " |
+ << "width/height " << std::hex << hr; |
+ } else { |
+ out_media_type->Release(); |
+ return true; |
+ } |
+ } |
+ i++; |
+ out_media_type->Release(); |
+ } |
+ return false; |
+} |
+ |
+bool DXVAVideoDecodeAccelerator::SendMFTMessage(MFT_MESSAGE_TYPE msg) { |
+ HRESULT hr = decoder_->ProcessMessage(msg, NULL); |
+ return SUCCEEDED(hr); |
+} |
+ |
+// Gets the minimum buffer sizes for input and output samples. |
+// The MFT will not allocate buffer for neither input nor output, so we have |
+// to do it ourselves and make sure they're the correct size. |
+// Exception is when dxva is enabled, the decoder will allocate output. |
+bool DXVAVideoDecodeAccelerator::GetStreamsInfoAndBufferReqs() { |
+ HRESULT hr = decoder_->GetInputStreamInfo(0, &input_stream_info_); |
+ if (FAILED(hr)) { |
+ LOG(ERROR) << "Failed to get input stream info"; |
+ return false; |
+ } |
+ LOG(INFO) << "Input stream info: "; |
+ LOG(INFO) << "Max latency: " << input_stream_info_.hnsMaxLatency; |
+ // There should be three flags, one for requiring a whole frame be in a |
+ // single sample, one for requiring there be one buffer only in a single |
+ // sample, and one that specifies a fixed sample size. (as in cbSize) |
+ LOG(INFO) << "Flags: " |
+ << std::hex << std::showbase << input_stream_info_.dwFlags; |
+ CHECK_EQ(input_stream_info_.dwFlags, 0x7u); |
+ LOG(INFO) << "Min buffer size: " << input_stream_info_.cbSize; |
+ LOG(INFO) << "Max lookahead: " << input_stream_info_.cbMaxLookahead; |
+ LOG(INFO) << "Alignment: " << input_stream_info_.cbAlignment; |
+ |
+ hr = decoder_->GetOutputStreamInfo(0, &output_stream_info_); |
+ if (FAILED(hr)) { |
+ LOG(ERROR) << "Failed to get output stream info"; |
+ return false; |
+ } |
+ LOG(INFO) << "Output stream info: "; |
+ // The flags here should be the same and mean the same thing, except when |
+ // DXVA is enabled, there is an extra 0x100 flag meaning decoder will |
+ // allocate its own sample. |
+ LOG(INFO) << "Flags: " |
+ << std::hex << std::showbase << output_stream_info_.dwFlags; |
+ CHECK_EQ(output_stream_info_.dwFlags, 0x107u); |
+ LOG(INFO) << "Min buffer size: " << output_stream_info_.cbSize; |
+ LOG(INFO) << "Alignment: " << output_stream_info_.cbAlignment; |
+ return true; |
+} |
+ |
+bool DXVAVideoDecodeAccelerator::DoDecode() { |
+ if (state_ != kNormal && state_ != kEosDrain) { |
+ NOTREACHED() << "DoDecode: not in normal or drain state"; |
+ return false; |
+ } |
+ // scoped_refptr<VideoFrame> frame; |
+ base::win::ScopedComPtr<IMFSample> output_sample; |
+ |
+ |
+ MFT_OUTPUT_DATA_BUFFER output_data_buffer; |
+ DWORD status = 0; |
+ |
+ memset(&output_data_buffer, 0, sizeof(output_data_buffer)); |
+ output_data_buffer.pSample = output_sample; |
+ status = 0; |
+ |
+ HRESULT hr = decoder_->ProcessOutput(0, // No flags |
+ 1, // # of out streams to pull from |
+ &output_data_buffer, |
+ &status); |
+ IMFCollection* events = output_data_buffer.pEvents; |
+ if (events != NULL) { |
+ LOG(INFO) << "Got events from ProcessOuput, but discarding"; |
+ events->Release(); |
+ } |
+ |
+ if (FAILED(hr)) { |
+ if (hr == MF_E_TRANSFORM_STREAM_CHANGE) { |
+ hr = SetDecoderOutputMediaType(ConvertVideoFrameFormatToGuid( |
+ media::VideoFrame::NV12)); |
+ if (SUCCEEDED(hr)) { |
+ return true; |
+ } else { |
+ NOTREACHED() << "Failed to set decoder output media type"; |
+ return false; |
+ } |
+ } else if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) { |
+ if (state_ == DXVAVideoDecodeAccelerator::kEosDrain) { |
+ // No more output from the decoder. Notify EOS and stop playback. |
+ // scoped_refptr<VideoFrame> frame; |
+ // VideoFrame::CreateEmptyFrame(&frame); |
+ state_ = DXVAVideoDecodeAccelerator::kStopped; |
+ return false; |
+ } |
+ return true; |
+ } else { |
+ NOTREACHED() << "Unhandled error in DoDecode()"; |
+ state_ = DXVAVideoDecodeAccelerator::kStopped; |
+ return false; |
+ } |
+ } |
+ if (!ProcessOutputSample(output_data_buffer.pSample)) { |
+ NOTREACHED() << "Failed to process output sample"; |
+ return false; |
+ } |
+ |
+#if 0 |
+ // No distinction between the 3 planes - all 3 point to the handle of |
+ // the texture. (There are actually only 2 planes since the output |
+ // D3D surface is in NV12 format.) |
+ VideoFrame::D3dTexture textures[VideoFrame::kMaxPlanes] = { surface.get(), |
+ surface.get(), |
+ surface.get() }; |
+ VideoFrame::CreateFrameD3dTexture(info_.stream_info.surface_format, |
+ info_.stream_info.surface_width, |
+ info_.stream_info.surface_height, |
+ textures, |
+ TimeDelta::FromMicroseconds(timestamp), |
+ TimeDelta::FromMicroseconds(duration), |
+ &frame); |
+ if (!frame.get()) { |
+ NOTREACHED() << "Failed to allocate video frame for d3d texture"; |
+ return true; |
+ } |
+#endif |
+ return true; |
+} |
+ |
+bool DXVAVideoDecodeAccelerator::ProcessOutputSample(IMFSample* sample) { |
+ if (!sample) { |
+ NOTREACHED() << "ProcessOutput succeeded, but did not get a sample back"; |
+ return false; |
+ } |
+ base::win::ScopedComPtr<IMFSample> output_sample(sample); |
+ |
+ int64 timestamp = 0, duration = 0; |
+ if (FAILED(output_sample->GetSampleTime(×tamp)) || |
+ FAILED(output_sample->GetSampleDuration(&duration))) { |
+ NOTREACHED() << "Failed to get timestamp/duration from output"; |
+ } |
+ |
+ // Sanity checks for checking if there is really something in the sample. |
+ DWORD buf_count = 0; |
+ HRESULT hr = output_sample->GetBufferCount(&buf_count); |
+ if (FAILED(hr) || buf_count != 1) { |
+ NOTREACHED() << "Failed to get buffer count, or buffer count mismatch"; |
+ return false; |
+ } |
+ |
+ base::win::ScopedComPtr<IMFMediaBuffer> output_buffer; |
+ hr = output_sample->GetBufferByIndex(0, output_buffer.Receive()); |
+ if (FAILED(hr)) { |
+ NOTREACHED() << "Failed to get buffer from sample"; |
+ return false; |
+ } |
+ |
+ base::win::ScopedComPtr<IDirect3DSurface9> surface; |
+ hr = MFGetService(output_buffer, MR_BUFFER_SERVICE, |
+ IID_PPV_ARGS(surface.Receive())); |
+ if (FAILED(hr)) { |
+ NOTREACHED() << "Failed to get surface from buffer"; |
+ return false; |
+ } |
+ |
+ // If we have available picture buffers to copy the output data then use the |
+ // first one and then flag it as not being available for use. |
+ OutputBuffers::iterator index; |
+ for (index = available_pictures_.begin(); |
+ index != available_pictures_.end(); |
+ ++index) { |
+ if (index->second.available) { |
+ CopyOutputSampleDataToPictureBuffer(output_sample, surface.get(), |
+ index->second.picture_buffer); |
+ index->second.available = false; |
+ return true; |
+ } |
+ } |
+ D3DSURFACE_DESC surface_desc; |
+ hr = surface->GetDesc(&surface_desc); |
+ if (FAILED(hr)) { |
+ NOTREACHED() << "Failed to get surface description"; |
+ return false; |
+ } |
+ client_->ProvidePictureBuffers( |
+ 1, gfx::Size(surface_desc.Width, surface_desc.Height)); |
+ pending_output_samples_.push_back(output_sample); |
+ return true; |
+} |
+ |
+bool DXVAVideoDecodeAccelerator::CopyOutputSampleDataToPictureBuffer( |
+ IMFSample* sample, IDirect3DSurface9* surface, |
+ media::PictureBuffer picture_buffer) { |
+ DCHECK(sample); |
+ DCHECK(surface); |
+ |
+ static Gles2TextureToEglImageTranslator texture2eglImage_translator; |
+ EGLImageKHR egl_image = texture2eglImage_translator.TranslateToEglImage( |
apatrick_chromium
2011/11/16 22:57:17
... and here we have a D3D surface (not a texture)
ananta
2011/12/13 01:39:15
Removed this. Was an incorrect cut paste.
|
+ egl_display_, egl_context_, picture_buffer.texture_id()); |
+ media::Picture output_picture(picture_buffer.id(), 0); |
+ client_->PictureReady(output_picture); |
+ return true; |
+} |
+ |