Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(101)

Unified Diff: content/common/gpu/media/dxva_video_decode_accelerator.cc

Issue 8510039: Initial implementation of the DXVA 2.0 H.264 hardware decoder for pepper for Windows. The decodin... (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: '' Created 9 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: content/common/gpu/media/dxva_video_decode_accelerator.cc
===================================================================
--- content/common/gpu/media/dxva_video_decode_accelerator.cc (revision 0)
+++ content/common/gpu/media/dxva_video_decode_accelerator.cc (revision 0)
@@ -0,0 +1,795 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/common/gpu/media/dxva_video_decode_accelerator.h"
+
+#if !defined(OS_WIN)
+#error This file should only be built on Windows.
+#endif // !defined(OS_WIN)
+
+#include <ks.h>
+#include <codecapi.h>
+#include <d3dx9tex.h>
+#include <mfapi.h>
+#include <mferror.h>
+#include <wmcodecdsp.h>
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/debug/trace_event.h"
+#include "base/logging.h"
+#include "base/memory/scoped_handle.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/message_loop.h"
+#include "base/process_util.h"
+#include "base/shared_memory.h"
+#include "media/video/video_decode_accelerator.h"
+#include "third_party/angle/include/GLES2/gl2.h"
+#include "third_party/angle/include/GLES2/gl2ext.h"
+
+// We only request 5 picture buffers from the client which are used to hold the
+// decoded samples. These buffers are then reused when the client tells us that
+// it is done with the buffer.
+static const int kNumPictureBuffers = 5;
+
+#define RETURN_ON_FAILURE(result, log, ret) \
Ami GONE FROM CHROMIUM 2011/12/19 22:53:44 A single failure in CreateEmptySample will result
ananta 2011/12/20 02:27:22 Replaced the logging with a CHECK condition instea
+ do { \
+ if (!(result)) { \
+ DVLOG(1) << log; \
+ return ret; \
+ } \
+ } while (0)
+
+#define RETURN_ON_HR_FAILURE(result, log, ret) \
+ RETURN_ON_FAILURE(SUCCEEDED(result), log, ret);
Ami GONE FROM CHROMIUM 2011/12/19 22:53:44 If you replaced log in this call with: log << ", H
ananta 2011/12/20 02:27:22 Done.
+
+#define RETURN_AND_NOTIFY_ON_FAILURE(result, log, error_code, ret) \
+ do { \
+ if (!(result)) { \
+ DVLOG(1) << log; \
+ StopOnError(error_code); \
+ return ret; \
+ } \
+ } while (0)
+
+#define RETURN_AND_NOTIFY_ON_HR_FAILURE(result, log, error_code, ret) \
+ RETURN_AND_NOTIFY_ON_FAILURE(SUCCEEDED(result), log, error_code, ret);
+
+static IMFSample* CreateEmptySample() {
+ base::win::ScopedComPtr<IMFSample> sample;
+ HRESULT hr = MFCreateSample(sample.Receive());
+ RETURN_ON_HR_FAILURE(hr, "Unable to create an empty sample", NULL);
+ return sample.Detach();
+}
+
+// Creates a Media Foundation sample with one buffer of length |buffer_length|
+// on a |align|-byte boundary. Alignment must be a perfect power of 2 or 0.
+static IMFSample* CreateEmptySampleWithBuffer(int buffer_length, int align) {
+ CHECK_GT(buffer_length, 0);
+
+ base::win::ScopedComPtr<IMFSample> sample;
+ sample.Attach(CreateEmptySample());
+ RETURN_ON_FAILURE(sample.get(), "Failed to create sample", NULL);
Ami GONE FROM CHROMIUM 2011/12/19 22:53:44 Here and elsewhere, I don't think you need the .ge
ananta 2011/12/20 02:27:22 Done.
+
+ base::win::ScopedComPtr<IMFMediaBuffer> buffer;
+ HRESULT hr = E_FAIL;
+ if (align == 0) {
+ // Note that MFCreateMemoryBuffer is same as MFCreateAlignedMemoryBuffer
+ // with the align argument being 0.
+ hr = MFCreateMemoryBuffer(buffer_length, buffer.Receive());
+ } else {
+ hr = MFCreateAlignedMemoryBuffer(buffer_length,
+ align - 1,
+ buffer.Receive());
+ }
+ RETURN_ON_HR_FAILURE(hr, "Unable to create an empty buffer", NULL);
+
+ hr = sample->AddBuffer(buffer.get());
+ RETURN_ON_HR_FAILURE(hr, "Failed to add empty buffer to sample", NULL);
+
+ return sample.Detach();
+}
+
+// Creates a Media Foundation sample with one buffer containing a copy of the
+// given Annex B stream data.
+// If duration and sample time are not known, provide 0.
+// |min_size| specifies the minimum size of the buffer (might be required by
+// the decoder for input). If no alignment is required, provide 0.
+static IMFSample* CreateInputSample(const uint8* stream, int size,
+ int min_size, int alignment) {
+ CHECK(stream);
+ CHECK_GT(size, 0);
+ base::win::ScopedComPtr<IMFSample> sample;
+ sample.Attach(CreateEmptySampleWithBuffer(std::max(min_size, size),
+ alignment));
+ RETURN_ON_FAILURE(sample.get(), "Failed to create empty buffer for input",
+ NULL);
+
+ base::win::ScopedComPtr<IMFMediaBuffer> buffer;
+ HRESULT hr = sample->GetBufferByIndex(0, buffer.Receive());
+ RETURN_ON_HR_FAILURE(hr, "Failed to get buffer in sample", NULL);
+
+ DWORD max_length = 0;
+ DWORD current_length = 0;
+ uint8* destination = NULL;
+ hr = buffer->Lock(&destination, &max_length, &current_length);
+ RETURN_ON_HR_FAILURE(hr, "Failed to lock buffer", NULL);
+
+ CHECK_EQ(current_length, 0u);
+ CHECK_GE(static_cast<int>(max_length), size);
+ memcpy(destination, stream, size);
+ CHECK(SUCCEEDED(buffer->Unlock()));
+
+ hr = buffer->SetCurrentLength(size);
+ RETURN_ON_HR_FAILURE(hr, "Failed to set current length", NULL);
+
+ hr = sample->SetUINT32(MFSampleExtension_CleanPoint, TRUE);
+ RETURN_ON_HR_FAILURE(hr, "Failed to mark sample as key sample", NULL);
+
+ return sample.Detach();
+}
+
+static IMFSample* CreateSampleFromInputBuffer(
+ const media::BitstreamBuffer& bitstream_buffer,
+ base::ProcessHandle renderer_process,
+ DWORD stream_size,
+ DWORD alignment) {
+ HANDLE shared_memory_handle = NULL;
+ RETURN_ON_FAILURE(::DuplicateHandle(renderer_process,
+ bitstream_buffer.handle(),
+ base::GetCurrentProcessHandle(),
+ &shared_memory_handle,
+ 0,
+ FALSE,
+ DUPLICATE_SAME_ACCESS),
+ "Failed to open duplicate shared mem handle", NULL);
+
+ base::SharedMemory shm(shared_memory_handle, true);
+ RETURN_ON_FAILURE(shm.Map(bitstream_buffer.size()),
+ "Failed in SharedMemory::Map", NULL);
+
+ return CreateInputSample(reinterpret_cast<const uint8*>(shm.memory()),
+ bitstream_buffer.size(),
+ stream_size,
+ alignment);
+}
+
+DXVAVideoDecodeAccelerator::DXVAPictureBuffer::DXVAPictureBuffer(
+ bool picture_available, const media::PictureBuffer& buffer)
+ : available(picture_available),
+ picture_buffer(buffer) {
+}
+
+DXVAVideoDecodeAccelerator::PendingSampleInfo::PendingSampleInfo(
+ int32 buffer_id, IDirect3DSurface9* surface)
+ : input_buffer_id(buffer_id),
+ dest_surface(surface) {
+}
+
+DXVAVideoDecodeAccelerator::PendingSampleInfo::~PendingSampleInfo() {}
+
+DXVAVideoDecodeAccelerator::DXVAVideoDecodeAccelerator(
+ media::VideoDecodeAccelerator::Client* client,
+ base::ProcessHandle renderer_process)
+ : client_(client),
+ state_(kUninitialized),
+ pictures_requested_(false),
+ renderer_process_(renderer_process),
+ dev_manager_reset_token_(0),
+ last_input_buffer_id_(-1),
+ inputs_before_decode_(0) {
+}
+
+DXVAVideoDecodeAccelerator::~DXVAVideoDecodeAccelerator() {
+ client_ = NULL;
+}
+
+bool DXVAVideoDecodeAccelerator::Initialize(Profile) {
+ DCHECK(CalledOnValidThread());
+
+ RETURN_AND_NOTIFY_ON_FAILURE((state_ == kUninitialized),
+ "Initialize: invalid state: " << state_, ILLEGAL_STATE, false);
+
+ HRESULT hr = MFStartup(MF_VERSION, MFSTARTUP_FULL);
+ RETURN_AND_NOTIFY_ON_HR_FAILURE(
+ hr, "MFStartup failed. Error:" << std::hex << std::showbase,
Ami GONE FROM CHROMIUM 2011/12/19 22:53:44 I think you forgot the final hr on this line (but
ananta 2011/12/20 02:27:22 Done.
+ PLATFORM_FAILURE, false);
+
+ RETURN_AND_NOTIFY_ON_FAILURE(CreateD3DDevManager(),
+ "Failed to create device manager", PLATFORM_FAILURE, false);
+
+ RETURN_AND_NOTIFY_ON_FAILURE(InitDecoder(),
+ "Failed to initialize decoder", PLATFORM_FAILURE, false);
+
+ RETURN_AND_NOTIFY_ON_FAILURE(GetStreamsInfoAndBufferReqs(),
+ "Failed to get input/output stream info.", PLATFORM_FAILURE, false);
+
+ RETURN_AND_NOTIFY_ON_FAILURE(
+ SendMFTMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0),
+ "Failed to start decoder", PLATFORM_FAILURE, false);
+
+ state_ = kNormal;
+ MessageLoop::current()->PostTask(FROM_HERE,
+ base::Bind(&DXVAVideoDecodeAccelerator::NotifyInitializeDone, this));
+ return true;
+}
+
+void DXVAVideoDecodeAccelerator::Decode(
+ const media::BitstreamBuffer& bitstream_buffer) {
+ DCHECK(CalledOnValidThread());
+
+ RETURN_AND_NOTIFY_ON_FAILURE((state_ == kNormal || state_ == kStopped),
+ "Invalid state: " << state_, ILLEGAL_STATE,);
+
+ base::win::ScopedComPtr<IMFSample> sample;
+ sample.Attach(CreateSampleFromInputBuffer(bitstream_buffer,
+ renderer_process_,
+ input_stream_info_.cbSize,
+ input_stream_info_.cbAlignment));
+ RETURN_AND_NOTIFY_ON_FAILURE(sample.get(), "Failed to create input sample",
+ PLATFORM_FAILURE,);
+ if (!inputs_before_decode_) {
+ TRACE_EVENT_BEGIN_ETW("DXVAVideoDecodeAccelerator.Decoding", this, "");
+ }
+ inputs_before_decode_++;
+
+ RETURN_AND_NOTIFY_ON_FAILURE(
+ SendMFTMessage(MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0),
+ "Failed to create input sample", PLATFORM_FAILURE,);
+
+ RETURN_AND_NOTIFY_ON_HR_FAILURE(decoder_->ProcessInput(0, sample.get(), 0),
Ami GONE FROM CHROMIUM 2011/12/19 22:53:44 Macros use text-substitution at compile time, so p
ananta 2011/12/20 02:27:22 Done.
+ "Failed to process input sample", PLATFORM_FAILURE,);
+
+ if (state_ != DXVAVideoDecodeAccelerator::kEosDrain) {
Ami GONE FROM CHROMIUM 2011/12/19 22:53:44 You have a check at the top of this function that
ananta 2011/12/20 02:27:22 Done.
+ RETURN_AND_NOTIFY_ON_FAILURE(
+ SendMFTMessage(MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0),
+ "Failed to send eos message to MFT", PLATFORM_FAILURE,);
+ state_ = kEosDrain;
Ami GONE FROM CHROMIUM 2011/12/19 22:53:44 IIUC DoDecode() always sets state_ to normal or st
ananta 2011/12/20 02:27:22 DoDecode also gets called from Flush which sets th
+ }
+
+ last_input_buffer_id_ = bitstream_buffer.id();
+
+ DoDecode();
+
+ RETURN_AND_NOTIFY_ON_FAILURE((state_ == kStopped || state_ == kNormal),
+ "Failed to process output. Unexpected decoder state: " << state_,
+ ILLEGAL_STATE,);
+
+ // The Microsoft Media foundation decoder internally buffers up to 30 frames
+ // before returning a decoded frame. We need to inform the client that this
+ // input buffer is processed as it may stop sending us further input.
+ // Note: This may break clients which expect every input buffer to be
+ // associated with a decoded output buffer.
+ MessageLoop::current()->PostTask(FROM_HERE, base::Bind(
+ &DXVAVideoDecodeAccelerator::NotifyInputBufferRead, this,
+ bitstream_buffer.id()));
+}
+
+void DXVAVideoDecodeAccelerator::AssignPictureBuffers(
+ const std::vector<media::PictureBuffer>& buffers) {
+ DCHECK(CalledOnValidThread());
+ // Copy the picture buffers provided by the client to the available list,
+ // and mark these buffers as available for use.
+ for (size_t buffer_index = 0; buffer_index < buffers.size();
+ ++buffer_index) {
+ bool inserted = output_picture_buffers_.insert(std::make_pair(
+ buffers[buffer_index].id(),
+ DXVAPictureBuffer(true, buffers[buffer_index]))).second;
+ DCHECK(inserted);
+ }
+ ProcessPendingSamples();
+}
+
+void DXVAVideoDecodeAccelerator::ReusePictureBuffer(
+ int32 picture_buffer_id) {
+ DCHECK(CalledOnValidThread());
+
+ OutputBuffers::iterator it = output_picture_buffers_.find(picture_buffer_id);
+ RETURN_AND_NOTIFY_ON_FAILURE(it != output_picture_buffers_.end(),
+ "Invalid picture id: " << picture_buffer_id, PLATFORM_FAILURE,);
Ami GONE FROM CHROMIUM 2011/12/19 22:53:44 s/PLATFORM_FAILURE/INVALID_ARGUMENT/
ananta 2011/12/20 02:27:22 Done.
+
+ it->second.available = true;
+ ProcessPendingSamples();
+}
+
+void DXVAVideoDecodeAccelerator::Flush() {
+ DCHECK(CalledOnValidThread());
+
+ DVLOG(1) << "DXVAVideoDecodeAccelerator::Flush";
+
+ RETURN_AND_NOTIFY_ON_FAILURE((state_ == kNormal || state_ != kStopped),
Ami GONE FROM CHROMIUM 2011/12/19 22:53:44 s/!=/==/ ???
ananta 2011/12/20 02:27:22 Thanks for catching this :(. Fixed.
+ "Unexpected decoder state: " << state_, ILLEGAL_STATE,);
+
+ state_ = kEosDrain;
+
+ RETURN_AND_NOTIFY_ON_FAILURE(SendMFTMessage(MFT_MESSAGE_COMMAND_DRAIN, 0),
+ "Failed to send drain message", PLATFORM_FAILURE,);
+
+ // As per MSDN docs after the client sends this message, it calls
+ // IMFTransform::ProcessOutput in a loop, until ProcessOutput returns the
+ // error code MF_E_TRANSFORM_NEED_MORE_INPUT. The DoDecode function sets
+ // the state to kStopped when the decoder returns
+ // MF_E_TRANSFORM_NEED_MORE_INPUT.
+ while (state_ != kStopped) {
Ami GONE FROM CHROMIUM 2011/12/19 22:53:44 I don't understand why Decode() is content to call
ananta 2011/12/20 02:27:22 As per our IM discussion, the MFT decoder can buff
+ DoDecode();
+ }
+
+ MessageLoop::current()->PostTask(FROM_HERE, base::Bind(
+ &DXVAVideoDecodeAccelerator::NotifyFlushDone, this));
+
+ state_ = kNormal;
+}
+
+void DXVAVideoDecodeAccelerator::Reset() {
+ DCHECK(CalledOnValidThread());
+
+ DVLOG(1) << "DXVAVideoDecodeAccelerator::Reset";
+
+ RETURN_AND_NOTIFY_ON_FAILURE((state_ == kNormal || state_ == kStopped),
+ "Reset: invalid state: " << state_, ILLEGAL_STATE,);
+
+ state_ = kResetting;
+
+ RETURN_AND_NOTIFY_ON_FAILURE(SendMFTMessage(MFT_MESSAGE_COMMAND_FLUSH, 0),
+ "Reset: Failed to send message.", PLATFORM_FAILURE,);
+
+ MessageLoop::current()->PostTask(FROM_HERE, base::Bind(
+ &DXVAVideoDecodeAccelerator::NotifyResetDone, this));
+
+ state_ = DXVAVideoDecodeAccelerator::kNormal;
+}
+
+void DXVAVideoDecodeAccelerator::Destroy() {
+ DCHECK(CalledOnValidThread());
+
+ OutputBuffers::iterator index;
+ for (index = output_picture_buffers_.begin();
+ index != output_picture_buffers_.end();
+ ++index) {
+ client_->DismissPictureBuffer(index->second.picture_buffer.id());
Ami GONE FROM CHROMIUM 2011/12/19 22:53:44 This still violates Destroy's API. You must not to
ananta 2011/12/20 02:27:22 Done.
+ }
+ Invalidate();
+}
+
+bool DXVAVideoDecodeAccelerator::CreateD3DDevManager() {
+ base::win::ScopedComPtr<IDirect3D9Ex> d3d9;
+
+ RETURN_ON_HR_FAILURE(Direct3DCreate9Ex(D3D_SDK_VERSION, d3d9.Receive()),
+ "Failed to create D3D9", false);
+
+ D3DPRESENT_PARAMETERS present_params = {0};
+ present_params.BackBufferWidth = 1;
+ present_params.BackBufferHeight = 1;
+ present_params.BackBufferFormat = D3DFMT_UNKNOWN;
+ present_params.BackBufferCount = 1;
+ present_params.SwapEffect = D3DSWAPEFFECT_DISCARD;
+ present_params.hDeviceWindow = GetShellWindow();
+ present_params.Windowed = TRUE;
+ present_params.Flags = D3DPRESENTFLAG_VIDEO;
+ present_params.FullScreen_RefreshRateInHz = 0;
+ present_params.PresentationInterval = 0;
+
+ HRESULT hr = d3d9->CreateDeviceEx(D3DADAPTER_DEFAULT,
+ D3DDEVTYPE_HAL,
+ GetShellWindow(),
+ D3DCREATE_SOFTWARE_VERTEXPROCESSING,
+ &present_params,
+ NULL,
+ device_.Receive());
+ RETURN_ON_HR_FAILURE(hr, "Failed to create D3D Device", false);
+
+ hr = DXVA2CreateDirect3DDeviceManager9(&dev_manager_reset_token_,
+ device_manager_.Receive());
+ RETURN_ON_HR_FAILURE(hr, "Couldn't create D3D Device manager", false);
+
+ hr = device_manager_->ResetDevice(device_.get(),
+ dev_manager_reset_token_);
+ RETURN_ON_HR_FAILURE(hr, "Failed to set device to device manager", false);
+ return true;
+}
+
+bool DXVAVideoDecodeAccelerator::InitDecoder() {
+ HRESULT hr = CoCreateInstance(__uuidof(CMSH264DecoderMFT),
+ NULL,
+ CLSCTX_INPROC_SERVER,
+ __uuidof(IMFTransform),
+ reinterpret_cast<void**>(decoder_.Receive()));
+ RETURN_ON_HR_FAILURE(hr, "Failed to CoCreate decoder.", false);
+
+ RETURN_ON_FAILURE(CheckDecoderDxvaSupport(),
+ "Failed to check decoder DXVA support", false);
+
+ hr = decoder_->ProcessMessage(
+ MFT_MESSAGE_SET_D3D_MANAGER,
+ reinterpret_cast<ULONG_PTR>(device_manager_.get()));
+ RETURN_ON_HR_FAILURE(hr, "Failed to pass D3D9 device to decoder.", false);
+
+ return SetDecoderMediaTypes();
+}
+
+bool DXVAVideoDecodeAccelerator::CheckDecoderDxvaSupport() {
+ base::win::ScopedComPtr<IMFAttributes> attributes;
+ HRESULT hr = decoder_->GetAttributes(attributes.Receive());
+ RETURN_ON_HR_FAILURE(hr, "Failed to pass decoder attributes.", false);
+
+ UINT32 dxva = 0;
+ hr = attributes->GetUINT32(MF_SA_D3D_AWARE, &dxva);
+ RETURN_ON_HR_FAILURE(hr, "Failed to get DXVA aware attribute.", false);
+
+ hr = attributes->SetUINT32(CODECAPI_AVDecVideoAcceleration_H264, TRUE);
+ RETURN_ON_HR_FAILURE(hr, "Failed to force codec to use H/W DXVA.", false);
+ return true;
+}
+
+bool DXVAVideoDecodeAccelerator::SetDecoderMediaTypes() {
+ RETURN_ON_FAILURE(SetDecoderInputMediaType(),
+ "Failed to set decoder input media type", false);
+ return SetDecoderOutputMediaType(MFVideoFormat_NV12);
+}
+
+bool DXVAVideoDecodeAccelerator::SetDecoderInputMediaType() {
+ base::win::ScopedComPtr<IMFMediaType> media_type;
+ HRESULT hr = MFCreateMediaType(media_type.Receive());
+ RETURN_ON_HR_FAILURE(hr, "Failed to create empty media type object", false);
+
+ hr = media_type->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
+ RETURN_ON_HR_FAILURE(hr, "SetGUID for major type failed", false);
+
+ hr = media_type->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_H264);
+ RETURN_ON_HR_FAILURE(hr, "SetGUID for subtype failed", false);
+
+ hr = decoder_->SetInputType(0, media_type.get(), 0); // No flags
+ RETURN_ON_HR_FAILURE(hr, "Failed to set decoder's input type", false);
+ return true;
+}
+
+bool DXVAVideoDecodeAccelerator::SetDecoderOutputMediaType(
+ const GUID& subtype) {
+ base::win::ScopedComPtr<IMFMediaType> out_media_type;
+
+ for (uint32 i = 0;
+ SUCCEEDED(decoder_->GetOutputAvailableType(0, i,
+ out_media_type.Receive()));
+ ++i) {
+ GUID out_subtype = {0};
+ HRESULT hr = out_media_type->GetGUID(MF_MT_SUBTYPE, &out_subtype);
+ RETURN_ON_HR_FAILURE(hr, "Failed to get output media type guid", false);
+
+ if (out_subtype == subtype) {
+ hr = decoder_->SetOutputType(0, out_media_type, 0); // No flags
+ RETURN_ON_HR_FAILURE(hr, "Failed to set output media type", false);
+ return true;
+ }
+ out_media_type.Release();
+ }
+ return false;
+}
+
+bool DXVAVideoDecodeAccelerator::SendMFTMessage(MFT_MESSAGE_TYPE msg,
+ int32 param) {
+ HRESULT hr = decoder_->ProcessMessage(msg, param);
+ return SUCCEEDED(hr);
+}
+
+// Gets the minimum buffer sizes for input and output samples. The MFT will not
+// allocate buffer for input nor output, so we have to do it ourselves and make
+// sure they're the correct size. We only provide decoding if DXVA is enabled.
+bool DXVAVideoDecodeAccelerator::GetStreamsInfoAndBufferReqs() {
+ HRESULT hr = decoder_->GetInputStreamInfo(0, &input_stream_info_);
+ RETURN_ON_HR_FAILURE(hr, "Failed to get input stream info", false);
+
+ hr = decoder_->GetOutputStreamInfo(0, &output_stream_info_);
+ RETURN_ON_HR_FAILURE(hr, "Failed to get output stream info", false);
+
+ VLOG(1) << "Input stream info: ";
+ VLOG(1) << "Max latency: " << input_stream_info_.hnsMaxLatency;
+ // There should be three flags, one for requiring a whole frame be in a
+ // single sample, one for requiring there be one buffer only in a single
+ // sample, and one that specifies a fixed sample size. (as in cbSize)
+ CHECK_EQ(input_stream_info_.dwFlags, 0x7u);
+
+ VLOG(1) << "Min buffer size: " << input_stream_info_.cbSize;
+ VLOG(1) << "Max lookahead: " << input_stream_info_.cbMaxLookahead;
+ VLOG(1) << "Alignment: " << input_stream_info_.cbAlignment;
+
+ VLOG(1) << "Output stream info: ";
+ // The flags here should be the same and mean the same thing, except when
+ // DXVA is enabled, there is an extra 0x100 flag meaning decoder will
+ // allocate its own sample.
+ VLOG(1) << "Flags: "
+ << std::hex << std::showbase << output_stream_info_.dwFlags;
+ CHECK_EQ(output_stream_info_.dwFlags, 0x107u);
+ VLOG(1) << "Min buffer size: " << output_stream_info_.cbSize;
+ VLOG(1) << "Alignment: " << output_stream_info_.cbAlignment;
+ return true;
+}
+
+void DXVAVideoDecodeAccelerator::DoDecode() {
+ RETURN_AND_NOTIFY_ON_FAILURE((state_ == kNormal || state_ == kEosDrain),
Ami GONE FROM CHROMIUM 2011/12/19 22:53:44 Always entered in kEosDrain ATM.
ananta 2011/12/20 02:27:22 Please note that the DoDecode function gets called
+ "DoDecode: not in normal or drain state", ILLEGAL_STATE,);
+
+ MFT_OUTPUT_DATA_BUFFER output_data_buffer = {0};
+ DWORD status = 0;
+
+ HRESULT hr = decoder_->ProcessOutput(0, // No flags
+ 1, // # of out streams to pull from
+ &output_data_buffer,
+ &status);
+ IMFCollection* events = output_data_buffer.pEvents;
+ if (events != NULL) {
+ VLOG(1) << "Got events from ProcessOuput, but discarding";
+ events->Release();
+ }
+ if (FAILED(hr)) {
+ // A stream change needs further ProcessInput calls to get back decoder
+ // output which is why we need to set the state to stopped.
+ if (hr == MF_E_TRANSFORM_STREAM_CHANGE) {
+ // No more output from the decoder. Notify EOS and stop playback.
Ami GONE FROM CHROMIUM 2011/12/19 22:53:44 I don't understand this. Please have someone who
ananta 2011/12/20 02:27:22 We don't have anyone who is very familiar with DXV
+ if (!SetDecoderOutputMediaType(MFVideoFormat_NV12)) {
+ NOTREACHED() << "Failed to set decoder output media type";
+ }
+ state_ = kStopped;
+ return;
+ } else if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) {
+ // This function is also called from Flush in a loop which could result
+ // in the state transitioning to kNormal due to decoded output.
+ DCHECK(state_ == kEosDrain || state_ == kNormal);
Ami GONE FROM CHROMIUM 2011/12/19 22:53:44 redundant to top-of-method check.
ananta 2011/12/20 02:27:22 Removed
+ // No more output from the decoder. Stop playback.
+ state_ = kStopped;
+ return;
+ } else {
+ NOTREACHED() << "Unhandled error in DoDecode()";
+ return;
+ }
+ }
+ TRACE_EVENT_END_ETW("DXVAVideoDecodeAccelerator.Decoding", this, "");
+
+ TRACE_COUNTER1("DXVA Decoding", "TotalPacketsBeforeDecode",
+ inputs_before_decode_);
+
+ inputs_before_decode_ = 0;
+
+ RETURN_AND_NOTIFY_ON_FAILURE(ProcessOutputSample(output_data_buffer.pSample),
+ "Failed to process output sample.", PLATFORM_FAILURE,);
+
+ state_ = kNormal;
+}
+
+bool DXVAVideoDecodeAccelerator::ProcessOutputSample(IMFSample* sample) {
+ RETURN_ON_FAILURE(sample,
+ "ProcessOutput succeeded, but did not get a sample back", false);
+
+ base::win::ScopedComPtr<IMFSample> output_sample;
+ output_sample.Attach(sample);
+
+ base::win::ScopedComPtr<IMFMediaBuffer> output_buffer;
+ HRESULT hr = sample->GetBufferByIndex(0, output_buffer.Receive());
+ RETURN_ON_HR_FAILURE(hr, "Failed to get buffer from sample.", false);
+
+ base::win::ScopedComPtr<IDirect3DSurface9> surface;
+ hr = MFGetService(output_buffer, MR_BUFFER_SERVICE,
+ IID_PPV_ARGS(surface.Receive()));
+ RETURN_ON_HR_FAILURE(hr, "Failed to get surface from buffer.", false);
+
+ D3DSURFACE_DESC surface_desc;
+ hr = surface->GetDesc(&surface_desc);
+ RETURN_ON_HR_FAILURE(hr, "Failed to get surface description.", false);
+
+ TRACE_EVENT_BEGIN_ETW("DXVAVideoDecodeAccelerator.SurfaceCreation", this,
+ "");
+ // TODO(ananta)
+ // The code below may not be necessary once we have an ANGLE extension which
+ // allows us to pass the Direct 3D surface directly for rendering.
+
+ // The decoded bits in the source direct 3d surface are in the YUV
+ // format. Angle does not support that. As a workaround we create an
+ // offscreen surface in the RGB format and copy the source surface
+ // to this surface.
+ base::win::ScopedComPtr<IDirect3DSurface9> dest_surface;
+ hr = device_->CreateOffscreenPlainSurface(surface_desc.Width,
+ surface_desc.Height,
+ D3DFMT_A8R8G8B8,
+ D3DPOOL_DEFAULT,
+ dest_surface.Receive(),
+ NULL);
+ RETURN_ON_HR_FAILURE(hr, "Failed to create offscreen surface.", false);
+
+ hr = D3DXLoadSurfaceFromSurface(dest_surface, NULL, NULL, surface, NULL,
+ NULL, 0, 0);
+ RETURN_ON_HR_FAILURE(hr, "Failed to copy source surface to dest.", false);
+
+ TRACE_EVENT_END_ETW("DXVAVideoDecodeAccelerator.SurfaceCreation", this, "");
+
+ pending_output_samples_.push_back(
+ PendingSampleInfo(last_input_buffer_id_, dest_surface));
+
+ // If we have available picture buffers to copy the output data then use the
+ // first one and then flag it as not being available for use.
+ if (output_picture_buffers_.size()) {
+ ProcessPendingSamples();
+ return true;
+ }
+ if (pictures_requested_) {
+ DVLOG(1) << "Waiting for picture slots from the client.";
+ return true;
+ }
+ // Go ahead and request picture buffers.
+ MessageLoop::current()->PostTask(FROM_HERE, base::Bind(
+ &DXVAVideoDecodeAccelerator::RequestPictureBuffers,
+ this, surface_desc.Width, surface_desc.Height));
+
+ pictures_requested_ = true;
+ return true;
+}
+
+bool DXVAVideoDecodeAccelerator::CopyOutputSampleDataToPictureBuffer(
+ IDirect3DSurface9* dest_surface, media::PictureBuffer picture_buffer,
+ int input_buffer_id) {
+ DCHECK(dest_surface);
+
+ D3DSURFACE_DESC surface_desc;
+ HRESULT hr = dest_surface->GetDesc(&surface_desc);
+ RETURN_ON_HR_FAILURE(hr, "Failed to get surface description.", false);
+
+ scoped_array<char> bits;
+ RETURN_ON_FAILURE(GetBitmapFromSurface(dest_surface, &bits),
+ "Failed to read bits from D3D surface", false);
+
+ // This function currently executes in the context of IPC handlers in the
+ // GPU process which ensures that there is always a OpenGL context.
+ GLint current_texture = 0;
+ glGetIntegerv(GL_TEXTURE_BINDING_2D, &current_texture);
+
+ glBindTexture(GL_TEXTURE_2D, picture_buffer.texture_id());
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_BGRA_EXT, surface_desc.Width,
+ surface_desc.Height, 0, GL_BGRA_EXT, GL_UNSIGNED_BYTE,
+ reinterpret_cast<GLvoid*>(bits.get()));
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+
+ glBindTexture(GL_TEXTURE_2D, current_texture);
+
+ media::Picture output_picture(picture_buffer.id(), input_buffer_id);
+ MessageLoop::current()->PostTask(FROM_HERE, base::Bind(
+ &DXVAVideoDecodeAccelerator::NotifyPictureReady, this, output_picture));
+ return true;
+}
+
+void DXVAVideoDecodeAccelerator::ProcessPendingSamples() {
+ if (pending_output_samples_.empty())
+ return;
+
+ OutputBuffers::iterator index;
+
+ for (index = output_picture_buffers_.begin();
+ index != output_picture_buffers_.end() &&
+ !pending_output_samples_.empty();
Ami GONE FROM CHROMIUM 2011/12/19 22:53:44 de-indent
ananta 2011/12/20 02:27:22 Done.
+ ++index) {
+ if (index->second.available) {
+ PendingSampleInfo sample_info = pending_output_samples_.front();
+
+ CopyOutputSampleDataToPictureBuffer(sample_info.dest_surface,
+ index->second.picture_buffer,
+ sample_info.input_buffer_id);
+ index->second.available = false;
+ pending_output_samples_.pop_front();
+ }
+ }
+}
+
+void DXVAVideoDecodeAccelerator::ClearState() {
+ last_input_buffer_id_ = -1;
+ output_picture_buffers_.clear();
+ pending_output_samples_.clear();
+}
+
+void DXVAVideoDecodeAccelerator::StopOnError(
+ media::VideoDecodeAccelerator::Error error) {
+ DCHECK(CalledOnValidThread());
+
+ if (client_)
+ client_->NotifyError(error);
+ client_ = NULL;
+
+ if (state_ != kUninitialized) {
+ Invalidate();
+ }
+}
+
+bool DXVAVideoDecodeAccelerator::GetBitmapFromSurface(
+ IDirect3DSurface9* surface,
+ scoped_array<char>* bits) {
+ // Get the currently loaded bitmap from the DC.
+ HDC hdc = NULL;
+ HRESULT hr = surface->GetDC(&hdc);
+ RETURN_ON_HR_FAILURE(hr, "Failed to get HDC for dest offscreen surface.",
+ false);
+ HBITMAP bitmap =
+ reinterpret_cast<HBITMAP>(GetCurrentObject(hdc, OBJ_BITMAP));
+ if (!bitmap) {
+ NOTREACHED() << "Failed to get bitmap from DC";
+ surface->ReleaseDC(hdc);
+ return false;
+ }
+ // TODO(ananta)
+ // The code below may not be necessary once we have an ANGLE extension which
+ // allows us to pass the Direct 3D surface directly for rendering.
+ // The Device dependent bitmap is upside down for OpenGL. We convert the
+ // bitmap to a DIB and render it on the texture instead.
+ BITMAP bitmap_basic_info = {0};
+ if (!GetObject(bitmap, sizeof(BITMAP), &bitmap_basic_info)) {
+ NOTREACHED() << "Failed to read bitmap info";
+ surface->ReleaseDC(hdc);
+ return false;
+ }
+ BITMAPINFO bitmap_info = {0};
+ bitmap_info.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
+ bitmap_info.bmiHeader.biWidth = bitmap_basic_info.bmWidth;
+ bitmap_info.bmiHeader.biHeight = bitmap_basic_info.bmHeight;
+ bitmap_info.bmiHeader.biPlanes = 1;
+ bitmap_info.bmiHeader.biBitCount = bitmap_basic_info.bmBitsPixel;
+ bitmap_info.bmiHeader.biCompression = BI_RGB;
+ bitmap_info.bmiHeader.biSizeImage = 0;
+ bitmap_info.bmiHeader.biClrUsed = 0;
+
+ int ret = GetDIBits(hdc, bitmap, 0, 0, NULL, &bitmap_info, DIB_RGB_COLORS);
+ if (!ret || bitmap_info.bmiHeader.biSizeImage <= 0) {
+ NOTREACHED() << "Failed to read bitmap size";
+ surface->ReleaseDC(hdc);
+ return false;
+ }
+
+ bits->reset(new char[bitmap_info.bmiHeader.biSizeImage]);
+ ret = GetDIBits(hdc, bitmap, 0, bitmap_basic_info.bmHeight, bits->get(),
+ &bitmap_info, DIB_RGB_COLORS);
+ if (!ret) {
+ NOTREACHED() << "Failed to retrieve bitmap bits.";
+ }
+ surface->ReleaseDC(hdc);
+ return !!ret;
+}
+
+void DXVAVideoDecodeAccelerator::Invalidate() {
+ if (state_ == kUninitialized)
+ return;
+ ClearState();
+ decoder_.Release();
+ device_.Release();
+ device_manager_.Release();
+ MFShutdown();
+ state_ = kUninitialized;
+}
+
+void DXVAVideoDecodeAccelerator::NotifyInitializeDone() {
+ if (client_)
+ client_->NotifyInitializeDone();
+}
+
+void DXVAVideoDecodeAccelerator::NotifyInputBufferRead(int input_buffer_id) {
+ if (client_)
+ client_->NotifyEndOfBitstreamBuffer(input_buffer_id);
+}
+
+void DXVAVideoDecodeAccelerator::NotifyFlushDone() {
+ if (client_)
+ client_->NotifyFlushDone();
+}
+
+void DXVAVideoDecodeAccelerator::NotifyResetDone() {
+ if (client_)
+ client_->NotifyResetDone();
+}
+
+void DXVAVideoDecodeAccelerator::RequestPictureBuffers(int width, int height) {
+ // This task could execute after the decoder has been torn down.
+ if (state_ == kNormal && client_)
+ client_->ProvidePictureBuffers(kNumPictureBuffers,
Ami GONE FROM CHROMIUM 2011/12/19 22:53:44 style-guide requires multi-line then clauses to ha
ananta 2011/12/20 02:27:22 Done.
+ gfx::Size(width, height));
+}
+
+void DXVAVideoDecodeAccelerator::NotifyPictureReady(
+ const media::Picture& picture) {
+ // This task could execute after the decoder has been torn down.
Ami GONE FROM CHROMIUM 2011/12/19 22:53:44 I don't follow this comment. Why should state_==k
ananta 2011/12/20 02:27:22 Replaced with a check for kUninitialized.
+ if (state_ == kNormal && client_)
+ client_->PictureReady(picture);
+}

Powered by Google App Engine
This is Rietveld 408576698