OLD | NEW |
(Empty) | |
| 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "content/common/gpu/media/dxva_video_decode_accelerator.h" |
| 6 |
| 7 #if !defined(OS_WIN) |
| 8 #error This file should only be built on Windows. |
| 9 #endif // !defined(OS_WIN) |
| 10 |
| 11 #include <ks.h> |
| 12 #include <codecapi.h> |
| 13 #include <d3dx9tex.h> |
| 14 #include <mfapi.h> |
| 15 #include <mferror.h> |
| 16 #include <wmcodecdsp.h> |
| 17 |
| 18 #include "base/bind.h" |
| 19 #include "base/callback.h" |
| 20 #include "base/debug/trace_event.h" |
| 21 #include "base/logging.h" |
| 22 #include "base/memory/scoped_handle.h" |
| 23 #include "base/memory/scoped_ptr.h" |
| 24 #include "base/message_loop.h" |
| 25 #include "base/process_util.h" |
| 26 #include "base/shared_memory.h" |
| 27 #include "media/video/video_decode_accelerator.h" |
| 28 #include "third_party/angle/include/GLES2/gl2.h" |
| 29 #include "third_party/angle/include/GLES2/gl2ext.h" |
| 30 |
| 31 // We only request 5 picture buffers from the client which are used to hold the |
| 32 // decoded samples. These buffers are then reused when the client tells us that |
| 33 // it is done with the buffer. |
| 34 static const int kNumPictureBuffers = 5; |
| 35 |
| 36 bool DXVAVideoDecodeAccelerator::pre_sandbox_init_done_ = false; |
| 37 uint32 DXVAVideoDecodeAccelerator::dev_manager_reset_token_ = 0; |
| 38 IDirect3DDeviceManager9* DXVAVideoDecodeAccelerator::device_manager_ = NULL; |
| 39 IDirect3DDevice9Ex* DXVAVideoDecodeAccelerator::device_ = NULL; |
| 40 |
| 41 #define RETURN_ON_FAILURE(result, log, ret) \ |
| 42 do { \ |
| 43 if (!(result)) { \ |
| 44 DLOG(ERROR) << log; \ |
| 45 return ret; \ |
| 46 } \ |
| 47 } while (0) |
| 48 |
| 49 #define RETURN_ON_HR_FAILURE(result, log, ret) \ |
| 50 RETURN_ON_FAILURE(SUCCEEDED(result), \ |
| 51 log << ", HRESULT: 0x" << std::hex << result, \ |
| 52 ret); |
| 53 |
| 54 #define RETURN_AND_NOTIFY_ON_FAILURE(result, log, error_code, ret) \ |
| 55 do { \ |
| 56 if (!(result)) { \ |
| 57 DVLOG(1) << log; \ |
| 58 StopOnError(error_code); \ |
| 59 return ret; \ |
| 60 } \ |
| 61 } while (0) |
| 62 |
| 63 #define RETURN_AND_NOTIFY_ON_HR_FAILURE(result, log, error_code, ret) \ |
| 64 RETURN_AND_NOTIFY_ON_FAILURE(SUCCEEDED(result), \ |
| 65 log << ", HRESULT: 0x" << std::hex << result, \ |
| 66 error_code, ret); |
| 67 |
| 68 static IMFSample* CreateEmptySample() { |
| 69 base::win::ScopedComPtr<IMFSample> sample; |
| 70 HRESULT hr = MFCreateSample(sample.Receive()); |
| 71 RETURN_ON_HR_FAILURE(hr, "MFCreateSample failed", NULL); |
| 72 return sample.Detach(); |
| 73 } |
| 74 |
| 75 // Creates a Media Foundation sample with one buffer of length |buffer_length| |
| 76 // on a |align|-byte boundary. Alignment must be a perfect power of 2 or 0. |
| 77 static IMFSample* CreateEmptySampleWithBuffer(int buffer_length, int align) { |
| 78 CHECK_GT(buffer_length, 0); |
| 79 |
| 80 base::win::ScopedComPtr<IMFSample> sample; |
| 81 sample.Attach(CreateEmptySample()); |
| 82 |
| 83 base::win::ScopedComPtr<IMFMediaBuffer> buffer; |
| 84 HRESULT hr = E_FAIL; |
| 85 if (align == 0) { |
| 86 // Note that MFCreateMemoryBuffer is same as MFCreateAlignedMemoryBuffer |
| 87 // with the align argument being 0. |
| 88 hr = MFCreateMemoryBuffer(buffer_length, buffer.Receive()); |
| 89 } else { |
| 90 hr = MFCreateAlignedMemoryBuffer(buffer_length, |
| 91 align - 1, |
| 92 buffer.Receive()); |
| 93 } |
| 94 RETURN_ON_HR_FAILURE(hr, "Failed to create memory buffer for sample", NULL); |
| 95 |
| 96 hr = sample->AddBuffer(buffer); |
| 97 RETURN_ON_HR_FAILURE(hr, "Failed to add buffer to sample", NULL); |
| 98 |
| 99 return sample.Detach(); |
| 100 } |
| 101 |
| 102 // Creates a Media Foundation sample with one buffer containing a copy of the |
| 103 // given Annex B stream data. |
| 104 // If duration and sample time are not known, provide 0. |
| 105 // |min_size| specifies the minimum size of the buffer (might be required by |
| 106 // the decoder for input). If no alignment is required, provide 0. |
| 107 static IMFSample* CreateInputSample(const uint8* stream, int size, |
| 108 int min_size, int alignment) { |
| 109 CHECK(stream); |
| 110 CHECK_GT(size, 0); |
| 111 base::win::ScopedComPtr<IMFSample> sample; |
| 112 sample.Attach(CreateEmptySampleWithBuffer(std::max(min_size, size), |
| 113 alignment)); |
| 114 RETURN_ON_FAILURE(sample, "Failed to create empty sample", NULL); |
| 115 |
| 116 base::win::ScopedComPtr<IMFMediaBuffer> buffer; |
| 117 HRESULT hr = sample->GetBufferByIndex(0, buffer.Receive()); |
| 118 RETURN_ON_HR_FAILURE(hr, "Failed to get buffer from sample", NULL); |
| 119 |
| 120 DWORD max_length = 0; |
| 121 DWORD current_length = 0; |
| 122 uint8* destination = NULL; |
| 123 hr = buffer->Lock(&destination, &max_length, ¤t_length); |
| 124 RETURN_ON_HR_FAILURE(hr, "Failed to lock buffer", NULL); |
| 125 |
| 126 CHECK_EQ(current_length, 0u); |
| 127 CHECK_GE(static_cast<int>(max_length), size); |
| 128 memcpy(destination, stream, size); |
| 129 |
| 130 hr = buffer->Unlock(); |
| 131 RETURN_ON_HR_FAILURE(hr, "Failed to unlock buffer", NULL); |
| 132 |
| 133 hr = buffer->SetCurrentLength(size); |
| 134 RETURN_ON_HR_FAILURE(hr, "Failed to set buffer length", NULL); |
| 135 |
| 136 return sample.Detach(); |
| 137 } |
| 138 |
| 139 static IMFSample* CreateSampleFromInputBuffer( |
| 140 const media::BitstreamBuffer& bitstream_buffer, |
| 141 base::ProcessHandle renderer_process, |
| 142 DWORD stream_size, |
| 143 DWORD alignment) { |
| 144 HANDLE shared_memory_handle = NULL; |
| 145 RETURN_ON_FAILURE(::DuplicateHandle(renderer_process, |
| 146 bitstream_buffer.handle(), |
| 147 base::GetCurrentProcessHandle(), |
| 148 &shared_memory_handle, |
| 149 0, |
| 150 FALSE, |
| 151 DUPLICATE_SAME_ACCESS), |
| 152 "Duplicate handle failed", NULL); |
| 153 |
| 154 base::SharedMemory shm(shared_memory_handle, true); |
| 155 RETURN_ON_FAILURE(shm.Map(bitstream_buffer.size()), |
| 156 "Failed in base::SharedMemory::Map", NULL); |
| 157 |
| 158 return CreateInputSample(reinterpret_cast<const uint8*>(shm.memory()), |
| 159 bitstream_buffer.size(), |
| 160 stream_size, |
| 161 alignment); |
| 162 } |
| 163 |
| 164 DXVAVideoDecodeAccelerator::DXVAPictureBuffer::DXVAPictureBuffer( |
| 165 const media::PictureBuffer& buffer) |
| 166 : available(true), |
| 167 picture_buffer(buffer) { |
| 168 } |
| 169 |
| 170 DXVAVideoDecodeAccelerator::PendingSampleInfo::PendingSampleInfo( |
| 171 int32 buffer_id, IDirect3DSurface9* surface) |
| 172 : input_buffer_id(buffer_id), |
| 173 dest_surface(surface) { |
| 174 } |
| 175 |
| 176 DXVAVideoDecodeAccelerator::PendingSampleInfo::~PendingSampleInfo() {} |
| 177 |
| 178 // static |
| 179 void DXVAVideoDecodeAccelerator::PreSandboxInitialization() { |
| 180 // Should be called only once during program startup. |
| 181 DCHECK(!pre_sandbox_init_done_); |
| 182 |
| 183 static wchar_t* decoding_dlls[] = { |
| 184 L"d3d9.dll", |
| 185 L"d3dx9_43.dll", |
| 186 L"dxva2.dll", |
| 187 L"mf.dll", |
| 188 L"mfplat.dll", |
| 189 L"msmpeg2vdec.dll", |
| 190 }; |
| 191 |
| 192 for (int i = 0; i < arraysize(decoding_dlls); ++i) { |
| 193 if (!::LoadLibrary(decoding_dlls[i])) { |
| 194 DLOG(ERROR) << "Failed to load decoder dll: " << decoding_dlls[i] |
| 195 << ", Error: " << ::GetLastError(); |
| 196 return; |
| 197 } |
| 198 } |
| 199 |
| 200 RETURN_ON_FAILURE(CreateD3DDevManager(), |
| 201 "Failed to initialize D3D device and manager",); |
| 202 pre_sandbox_init_done_ = true; |
| 203 } |
| 204 |
| 205 // static |
| 206 bool DXVAVideoDecodeAccelerator::CreateD3DDevManager() { |
| 207 base::win::ScopedComPtr<IDirect3D9Ex> d3d9; |
| 208 |
| 209 HRESULT hr = Direct3DCreate9Ex(D3D_SDK_VERSION, d3d9.Receive()); |
| 210 RETURN_ON_HR_FAILURE(hr, "Direct3DCreate9Ex failed", false); |
| 211 |
| 212 D3DPRESENT_PARAMETERS present_params = {0}; |
| 213 present_params.BackBufferWidth = 1; |
| 214 present_params.BackBufferHeight = 1; |
| 215 present_params.BackBufferFormat = D3DFMT_UNKNOWN; |
| 216 present_params.BackBufferCount = 1; |
| 217 present_params.SwapEffect = D3DSWAPEFFECT_DISCARD; |
| 218 present_params.hDeviceWindow = ::GetShellWindow(); |
| 219 present_params.Windowed = TRUE; |
| 220 present_params.Flags = D3DPRESENTFLAG_VIDEO; |
| 221 present_params.FullScreen_RefreshRateInHz = 0; |
| 222 present_params.PresentationInterval = 0; |
| 223 |
| 224 hr = d3d9->CreateDeviceEx(D3DADAPTER_DEFAULT, |
| 225 D3DDEVTYPE_HAL, |
| 226 ::GetShellWindow(), |
| 227 D3DCREATE_SOFTWARE_VERTEXPROCESSING, |
| 228 &present_params, |
| 229 NULL, |
| 230 &device_); |
| 231 RETURN_ON_HR_FAILURE(hr, "Failed to create D3D device", false); |
| 232 |
| 233 hr = DXVA2CreateDirect3DDeviceManager9(&dev_manager_reset_token_, |
| 234 &device_manager_); |
| 235 RETURN_ON_HR_FAILURE(hr, "DXVA2CreateDirect3DDeviceManager9 failed", false); |
| 236 |
| 237 hr = device_manager_->ResetDevice(device_, dev_manager_reset_token_); |
| 238 RETURN_ON_HR_FAILURE(hr, "Failed to reset device", false); |
| 239 return true; |
| 240 } |
| 241 |
| 242 DXVAVideoDecodeAccelerator::DXVAVideoDecodeAccelerator( |
| 243 media::VideoDecodeAccelerator::Client* client, |
| 244 base::ProcessHandle renderer_process) |
| 245 : client_(client), |
| 246 state_(kUninitialized), |
| 247 pictures_requested_(false), |
| 248 renderer_process_(renderer_process), |
| 249 last_input_buffer_id_(-1), |
| 250 inputs_before_decode_(0) { |
| 251 } |
| 252 |
| 253 DXVAVideoDecodeAccelerator::~DXVAVideoDecodeAccelerator() { |
| 254 client_ = NULL; |
| 255 } |
| 256 |
| 257 bool DXVAVideoDecodeAccelerator::Initialize(Profile) { |
| 258 DCHECK(CalledOnValidThread()); |
| 259 |
| 260 RETURN_AND_NOTIFY_ON_FAILURE(pre_sandbox_init_done_, |
| 261 "PreSandbox initialization not completed", PLATFORM_FAILURE, false); |
| 262 |
| 263 RETURN_AND_NOTIFY_ON_FAILURE((state_ == kUninitialized), |
| 264 "Initialize: invalid state: " << state_, ILLEGAL_STATE, false); |
| 265 |
| 266 HRESULT hr = MFStartup(MF_VERSION, MFSTARTUP_FULL); |
| 267 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "MFStartup failed.", PLATFORM_FAILURE, |
| 268 false); |
| 269 |
| 270 RETURN_AND_NOTIFY_ON_FAILURE(InitDecoder(), |
| 271 "Failed to initialize decoder", PLATFORM_FAILURE, false); |
| 272 |
| 273 RETURN_AND_NOTIFY_ON_FAILURE(GetStreamsInfoAndBufferReqs(), |
| 274 "Failed to get input/output stream info.", PLATFORM_FAILURE, false); |
| 275 |
| 276 RETURN_AND_NOTIFY_ON_FAILURE( |
| 277 SendMFTMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0), |
| 278 "Failed to start decoder", PLATFORM_FAILURE, false); |
| 279 |
| 280 state_ = kNormal; |
| 281 MessageLoop::current()->PostTask(FROM_HERE, |
| 282 base::Bind(&DXVAVideoDecodeAccelerator::NotifyInitializeDone, this)); |
| 283 return true; |
| 284 } |
| 285 |
| 286 void DXVAVideoDecodeAccelerator::Decode( |
| 287 const media::BitstreamBuffer& bitstream_buffer) { |
| 288 DCHECK(CalledOnValidThread()); |
| 289 |
| 290 RETURN_AND_NOTIFY_ON_FAILURE((state_ == kNormal || state_ == kStopped), |
| 291 "Invalid state: " << state_, ILLEGAL_STATE,); |
| 292 |
| 293 base::win::ScopedComPtr<IMFSample> sample; |
| 294 sample.Attach(CreateSampleFromInputBuffer(bitstream_buffer, |
| 295 renderer_process_, |
| 296 input_stream_info_.cbSize, |
| 297 input_stream_info_.cbAlignment)); |
| 298 RETURN_AND_NOTIFY_ON_FAILURE(sample, "Failed to create input sample", |
| 299 PLATFORM_FAILURE,); |
| 300 if (!inputs_before_decode_) { |
| 301 TRACE_EVENT_BEGIN_ETW("DXVAVideoDecodeAccelerator.Decoding", this, ""); |
| 302 } |
| 303 inputs_before_decode_++; |
| 304 |
| 305 RETURN_AND_NOTIFY_ON_FAILURE( |
| 306 SendMFTMessage(MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0), |
| 307 "Failed to create input sample", PLATFORM_FAILURE,); |
| 308 |
| 309 HRESULT hr = decoder_->ProcessInput(0, sample, 0); |
| 310 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "Failed to process input sample", |
| 311 PLATFORM_FAILURE,); |
| 312 |
| 313 RETURN_AND_NOTIFY_ON_FAILURE( |
| 314 SendMFTMessage(MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0), |
| 315 "Failed to send eos message to MFT", PLATFORM_FAILURE,); |
| 316 state_ = kEosDrain; |
| 317 |
| 318 last_input_buffer_id_ = bitstream_buffer.id(); |
| 319 |
| 320 DoDecode(); |
| 321 |
| 322 RETURN_AND_NOTIFY_ON_FAILURE((state_ == kStopped || state_ == kNormal), |
| 323 "Failed to process output. Unexpected decoder state: " << state_, |
| 324 ILLEGAL_STATE,); |
| 325 |
| 326 // The Microsoft Media foundation decoder internally buffers up to 30 frames |
| 327 // before returning a decoded frame. We need to inform the client that this |
| 328 // input buffer is processed as it may stop sending us further input. |
| 329 // Note: This may break clients which expect every input buffer to be |
| 330 // associated with a decoded output buffer. |
| 331 // TODO(ananta) |
| 332 // Do some more investigation into whether it is possible to get the MFT |
| 333 // decoder to emit an output packet for every input packet. |
| 334 // http://code.google.com/p/chromium/issues/detail?id=108121 |
| 335 MessageLoop::current()->PostTask(FROM_HERE, base::Bind( |
| 336 &DXVAVideoDecodeAccelerator::NotifyInputBufferRead, this, |
| 337 bitstream_buffer.id())); |
| 338 } |
| 339 |
| 340 void DXVAVideoDecodeAccelerator::AssignPictureBuffers( |
| 341 const std::vector<media::PictureBuffer>& buffers) { |
| 342 DCHECK(CalledOnValidThread()); |
| 343 // Copy the picture buffers provided by the client to the available list, |
| 344 // and mark these buffers as available for use. |
| 345 for (size_t buffer_index = 0; buffer_index < buffers.size(); |
| 346 ++buffer_index) { |
| 347 bool inserted = output_picture_buffers_.insert(std::make_pair( |
| 348 buffers[buffer_index].id(), |
| 349 DXVAPictureBuffer(buffers[buffer_index]))).second; |
| 350 DCHECK(inserted); |
| 351 } |
| 352 ProcessPendingSamples(); |
| 353 } |
| 354 |
| 355 void DXVAVideoDecodeAccelerator::ReusePictureBuffer( |
| 356 int32 picture_buffer_id) { |
| 357 DCHECK(CalledOnValidThread()); |
| 358 |
| 359 OutputBuffers::iterator it = output_picture_buffers_.find(picture_buffer_id); |
| 360 RETURN_AND_NOTIFY_ON_FAILURE(it != output_picture_buffers_.end(), |
| 361 "Invalid picture id: " << picture_buffer_id, INVALID_ARGUMENT,); |
| 362 |
| 363 it->second.available = true; |
| 364 ProcessPendingSamples(); |
| 365 } |
| 366 |
| 367 void DXVAVideoDecodeAccelerator::Flush() { |
| 368 DCHECK(CalledOnValidThread()); |
| 369 |
| 370 DVLOG(1) << "DXVAVideoDecodeAccelerator::Flush"; |
| 371 |
| 372 RETURN_AND_NOTIFY_ON_FAILURE((state_ == kNormal || state_ == kStopped), |
| 373 "Unexpected decoder state: " << state_, ILLEGAL_STATE,); |
| 374 |
| 375 state_ = kEosDrain; |
| 376 |
| 377 RETURN_AND_NOTIFY_ON_FAILURE(SendMFTMessage(MFT_MESSAGE_COMMAND_DRAIN, 0), |
| 378 "Failed to send drain message", PLATFORM_FAILURE,); |
| 379 |
| 380 // As per MSDN docs after the client sends this message, it calls |
| 381 // IMFTransform::ProcessOutput in a loop, until ProcessOutput returns the |
| 382 // error code MF_E_TRANSFORM_NEED_MORE_INPUT. The DoDecode function sets |
| 383 // the state to kStopped when the decoder returns |
| 384 // MF_E_TRANSFORM_NEED_MORE_INPUT. |
| 385 // The MFT decoder can buffer upto 30 frames worth of input before returning |
| 386 // an output frame. This loop here attempts to retrieve as many output frames |
| 387 // as possible from the buffered set. |
| 388 while (state_ != kStopped) { |
| 389 DoDecode(); |
| 390 } |
| 391 |
| 392 MessageLoop::current()->PostTask(FROM_HERE, base::Bind( |
| 393 &DXVAVideoDecodeAccelerator::NotifyFlushDone, this)); |
| 394 |
| 395 state_ = kNormal; |
| 396 } |
| 397 |
| 398 void DXVAVideoDecodeAccelerator::Reset() { |
| 399 DCHECK(CalledOnValidThread()); |
| 400 |
| 401 DVLOG(1) << "DXVAVideoDecodeAccelerator::Reset"; |
| 402 |
| 403 RETURN_AND_NOTIFY_ON_FAILURE((state_ == kNormal || state_ == kStopped), |
| 404 "Reset: invalid state: " << state_, ILLEGAL_STATE,); |
| 405 |
| 406 state_ = kResetting; |
| 407 |
| 408 RETURN_AND_NOTIFY_ON_FAILURE(SendMFTMessage(MFT_MESSAGE_COMMAND_FLUSH, 0), |
| 409 "Reset: Failed to send message.", PLATFORM_FAILURE,); |
| 410 |
| 411 MessageLoop::current()->PostTask(FROM_HERE, base::Bind( |
| 412 &DXVAVideoDecodeAccelerator::NotifyResetDone, this)); |
| 413 |
| 414 state_ = DXVAVideoDecodeAccelerator::kNormal; |
| 415 } |
| 416 |
| 417 void DXVAVideoDecodeAccelerator::Destroy() { |
| 418 DCHECK(CalledOnValidThread()); |
| 419 Invalidate(); |
| 420 } |
| 421 |
| 422 bool DXVAVideoDecodeAccelerator::InitDecoder() { |
| 423 // We cannot use CoCreateInstance to instantiate the decoder object as that |
| 424 // fails in the sandbox. We mimic the steps CoCreateInstance uses to |
| 425 // instantiate the object. |
| 426 HMODULE decoder_dll = ::GetModuleHandle(L"msmpeg2vdec.dll"); |
| 427 RETURN_ON_FAILURE(decoder_dll, |
| 428 "msmpeg2vdec.dll required for decoding is not loaded", |
| 429 false); |
| 430 |
| 431 typedef HRESULT (WINAPI* GetClassObject)(const CLSID& clsid, |
| 432 const IID& iid, |
| 433 void** object); |
| 434 |
| 435 GetClassObject get_class_object = reinterpret_cast<GetClassObject>( |
| 436 GetProcAddress(decoder_dll, "DllGetClassObject")); |
| 437 RETURN_ON_FAILURE(get_class_object, |
| 438 "Failed to get DllGetClassObject pointer", false); |
| 439 |
| 440 base::win::ScopedComPtr<IClassFactory> factory; |
| 441 HRESULT hr = get_class_object(__uuidof(CMSH264DecoderMFT), |
| 442 __uuidof(IClassFactory), |
| 443 reinterpret_cast<void**>(factory.Receive())); |
| 444 RETURN_ON_HR_FAILURE(hr, "DllGetClassObject for decoder failed", false); |
| 445 |
| 446 hr = factory->CreateInstance(NULL, __uuidof(IMFTransform), |
| 447 reinterpret_cast<void**>(decoder_.Receive())); |
| 448 RETURN_ON_HR_FAILURE(hr, "Failed to create decoder instance", false); |
| 449 |
| 450 RETURN_ON_FAILURE(CheckDecoderDxvaSupport(), |
| 451 "Failed to check decoder DXVA support", false); |
| 452 |
| 453 hr = decoder_->ProcessMessage( |
| 454 MFT_MESSAGE_SET_D3D_MANAGER, |
| 455 reinterpret_cast<ULONG_PTR>(device_manager_)); |
| 456 RETURN_ON_HR_FAILURE(hr, "Failed to pass D3D manager to decoder", false); |
| 457 |
| 458 return SetDecoderMediaTypes(); |
| 459 } |
| 460 |
| 461 bool DXVAVideoDecodeAccelerator::CheckDecoderDxvaSupport() { |
| 462 base::win::ScopedComPtr<IMFAttributes> attributes; |
| 463 HRESULT hr = decoder_->GetAttributes(attributes.Receive()); |
| 464 RETURN_ON_HR_FAILURE(hr, "Failed to get decoder attributes", false); |
| 465 |
| 466 UINT32 dxva = 0; |
| 467 hr = attributes->GetUINT32(MF_SA_D3D_AWARE, &dxva); |
| 468 RETURN_ON_HR_FAILURE(hr, "Failed to check if decoder supports DXVA", false); |
| 469 |
| 470 hr = attributes->SetUINT32(CODECAPI_AVDecVideoAcceleration_H264, TRUE); |
| 471 RETURN_ON_HR_FAILURE(hr, "Failed to enable DXVA H/W decoding", false); |
| 472 return true; |
| 473 } |
| 474 |
| 475 bool DXVAVideoDecodeAccelerator::SetDecoderMediaTypes() { |
| 476 RETURN_ON_FAILURE(SetDecoderInputMediaType(), |
| 477 "Failed to set decoder input media type", false); |
| 478 return SetDecoderOutputMediaType(MFVideoFormat_NV12); |
| 479 } |
| 480 |
| 481 bool DXVAVideoDecodeAccelerator::SetDecoderInputMediaType() { |
| 482 base::win::ScopedComPtr<IMFMediaType> media_type; |
| 483 HRESULT hr = MFCreateMediaType(media_type.Receive()); |
| 484 RETURN_ON_HR_FAILURE(hr, "MFCreateMediaType failed", false); |
| 485 |
| 486 hr = media_type->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video); |
| 487 RETURN_ON_HR_FAILURE(hr, "Failed to set major input type", false); |
| 488 |
| 489 hr = media_type->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_H264); |
| 490 RETURN_ON_HR_FAILURE(hr, "Failed to set subtype", false); |
| 491 |
| 492 hr = decoder_->SetInputType(0, media_type, 0); // No flags |
| 493 RETURN_ON_HR_FAILURE(hr, "Failed to set decoder input type", false); |
| 494 return true; |
| 495 } |
| 496 |
| 497 bool DXVAVideoDecodeAccelerator::SetDecoderOutputMediaType( |
| 498 const GUID& subtype) { |
| 499 base::win::ScopedComPtr<IMFMediaType> out_media_type; |
| 500 |
| 501 for (uint32 i = 0; |
| 502 SUCCEEDED(decoder_->GetOutputAvailableType(0, i, |
| 503 out_media_type.Receive())); |
| 504 ++i) { |
| 505 GUID out_subtype = {0}; |
| 506 HRESULT hr = out_media_type->GetGUID(MF_MT_SUBTYPE, &out_subtype); |
| 507 RETURN_ON_HR_FAILURE(hr, "Failed to get output major type", false); |
| 508 |
| 509 if (out_subtype == subtype) { |
| 510 hr = decoder_->SetOutputType(0, out_media_type, 0); // No flags |
| 511 RETURN_ON_HR_FAILURE(hr, "Failed to set decoder output type", false); |
| 512 return true; |
| 513 } |
| 514 out_media_type.Release(); |
| 515 } |
| 516 return false; |
| 517 } |
| 518 |
| 519 bool DXVAVideoDecodeAccelerator::SendMFTMessage(MFT_MESSAGE_TYPE msg, |
| 520 int32 param) { |
| 521 HRESULT hr = decoder_->ProcessMessage(msg, param); |
| 522 return SUCCEEDED(hr); |
| 523 } |
| 524 |
| 525 // Gets the minimum buffer sizes for input and output samples. The MFT will not |
| 526 // allocate buffer for input nor output, so we have to do it ourselves and make |
| 527 // sure they're the correct size. We only provide decoding if DXVA is enabled. |
| 528 bool DXVAVideoDecodeAccelerator::GetStreamsInfoAndBufferReqs() { |
| 529 HRESULT hr = decoder_->GetInputStreamInfo(0, &input_stream_info_); |
| 530 RETURN_ON_HR_FAILURE(hr, "Failed to get input stream info", false); |
| 531 |
| 532 hr = decoder_->GetOutputStreamInfo(0, &output_stream_info_); |
| 533 RETURN_ON_HR_FAILURE(hr, "Failed to get decoder output stream info", false); |
| 534 |
| 535 DVLOG(1) << "Input stream info: "; |
| 536 DVLOG(1) << "Max latency: " << input_stream_info_.hnsMaxLatency; |
| 537 // There should be three flags, one for requiring a whole frame be in a |
| 538 // single sample, one for requiring there be one buffer only in a single |
| 539 // sample, and one that specifies a fixed sample size. (as in cbSize) |
| 540 CHECK_EQ(input_stream_info_.dwFlags, 0x7u); |
| 541 |
| 542 DVLOG(1) << "Min buffer size: " << input_stream_info_.cbSize; |
| 543 DVLOG(1) << "Max lookahead: " << input_stream_info_.cbMaxLookahead; |
| 544 DVLOG(1) << "Alignment: " << input_stream_info_.cbAlignment; |
| 545 |
| 546 DVLOG(1) << "Output stream info: "; |
| 547 // The flags here should be the same and mean the same thing, except when |
| 548 // DXVA is enabled, there is an extra 0x100 flag meaning decoder will |
| 549 // allocate its own sample. |
| 550 DVLOG(1) << "Flags: " |
| 551 << std::hex << std::showbase << output_stream_info_.dwFlags; |
| 552 CHECK_EQ(output_stream_info_.dwFlags, 0x107u); |
| 553 DVLOG(1) << "Min buffer size: " << output_stream_info_.cbSize; |
| 554 DVLOG(1) << "Alignment: " << output_stream_info_.cbAlignment; |
| 555 return true; |
| 556 } |
| 557 |
| 558 void DXVAVideoDecodeAccelerator::DoDecode() { |
| 559 // This function is also called from Flush in a loop which could result |
| 560 // in the state transitioning to kNormal due to decoded output. |
| 561 RETURN_AND_NOTIFY_ON_FAILURE((state_ == kNormal || state_ == kEosDrain), |
| 562 "DoDecode: not in normal/drain state", ILLEGAL_STATE,); |
| 563 |
| 564 MFT_OUTPUT_DATA_BUFFER output_data_buffer = {0}; |
| 565 DWORD status = 0; |
| 566 |
| 567 HRESULT hr = decoder_->ProcessOutput(0, // No flags |
| 568 1, // # of out streams to pull from |
| 569 &output_data_buffer, |
| 570 &status); |
| 571 IMFCollection* events = output_data_buffer.pEvents; |
| 572 if (events != NULL) { |
| 573 VLOG(1) << "Got events from ProcessOuput, but discarding"; |
| 574 events->Release(); |
| 575 } |
| 576 if (FAILED(hr)) { |
| 577 // A stream change needs further ProcessInput calls to get back decoder |
| 578 // output which is why we need to set the state to stopped. |
| 579 if (hr == MF_E_TRANSFORM_STREAM_CHANGE) { |
| 580 if (!SetDecoderOutputMediaType(MFVideoFormat_NV12)) { |
| 581 // Decoder didn't let us set NV12 output format. Not sure as to why |
| 582 // this can happen. Give up in disgust. |
| 583 NOTREACHED() << "Failed to set decoder output media type to NV12"; |
| 584 state_ = kStopped; |
| 585 } else { |
| 586 DVLOG(1) << "Received output format change from the decoder." |
| 587 " Recursively invoking DoDecode"; |
| 588 DoDecode(); |
| 589 } |
| 590 return; |
| 591 } else if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) { |
| 592 // No more output from the decoder. Stop playback. |
| 593 state_ = kStopped; |
| 594 return; |
| 595 } else { |
| 596 NOTREACHED() << "Unhandled error in DoDecode()"; |
| 597 return; |
| 598 } |
| 599 } |
| 600 TRACE_EVENT_END_ETW("DXVAVideoDecodeAccelerator.Decoding", this, ""); |
| 601 |
| 602 TRACE_COUNTER1("DXVA Decoding", "TotalPacketsBeforeDecode", |
| 603 inputs_before_decode_); |
| 604 |
| 605 inputs_before_decode_ = 0; |
| 606 |
| 607 RETURN_AND_NOTIFY_ON_FAILURE(ProcessOutputSample(output_data_buffer.pSample), |
| 608 "Failed to process output sample.", PLATFORM_FAILURE,); |
| 609 |
| 610 state_ = kNormal; |
| 611 } |
| 612 |
| 613 bool DXVAVideoDecodeAccelerator::ProcessOutputSample(IMFSample* sample) { |
| 614 RETURN_ON_FAILURE(sample, "Decode succeeded with NULL output sample", false); |
| 615 |
| 616 base::win::ScopedComPtr<IMFSample> output_sample; |
| 617 output_sample.Attach(sample); |
| 618 |
| 619 base::win::ScopedComPtr<IMFMediaBuffer> output_buffer; |
| 620 HRESULT hr = sample->GetBufferByIndex(0, output_buffer.Receive()); |
| 621 RETURN_ON_HR_FAILURE(hr, "Failed to get buffer from output sample", false); |
| 622 |
| 623 base::win::ScopedComPtr<IDirect3DSurface9> surface; |
| 624 hr = MFGetService(output_buffer, MR_BUFFER_SERVICE, |
| 625 IID_PPV_ARGS(surface.Receive())); |
| 626 RETURN_ON_HR_FAILURE(hr, "Failed to get D3D surface from output sample", |
| 627 false); |
| 628 |
| 629 D3DSURFACE_DESC surface_desc; |
| 630 hr = surface->GetDesc(&surface_desc); |
| 631 RETURN_ON_HR_FAILURE(hr, "Failed to get surface description", false); |
| 632 |
| 633 TRACE_EVENT_BEGIN_ETW("DXVAVideoDecodeAccelerator.SurfaceCreation", this, |
| 634 ""); |
| 635 // TODO(ananta) |
| 636 // The code below may not be necessary once we have an ANGLE extension which |
| 637 // allows us to pass the Direct 3D surface directly for rendering. |
| 638 |
| 639 // The decoded bits in the source direct 3d surface are in the YUV |
| 640 // format. Angle does not support that. As a workaround we create an |
| 641 // offscreen surface in the RGB format and copy the source surface |
| 642 // to this surface. |
| 643 base::win::ScopedComPtr<IDirect3DSurface9> dest_surface; |
| 644 hr = device_->CreateOffscreenPlainSurface(surface_desc.Width, |
| 645 surface_desc.Height, |
| 646 D3DFMT_A8R8G8B8, |
| 647 D3DPOOL_DEFAULT, |
| 648 dest_surface.Receive(), |
| 649 NULL); |
| 650 RETURN_ON_HR_FAILURE(hr, "Failed to create offscreen surface", false); |
| 651 |
| 652 hr = D3DXLoadSurfaceFromSurface(dest_surface, NULL, NULL, surface, NULL, |
| 653 NULL, D3DX_DEFAULT, 0); |
| 654 RETURN_ON_HR_FAILURE(hr, "D3DXLoadSurfaceFromSurface failed", false); |
| 655 |
| 656 TRACE_EVENT_END_ETW("DXVAVideoDecodeAccelerator.SurfaceCreation", this, ""); |
| 657 |
| 658 pending_output_samples_.push_back( |
| 659 PendingSampleInfo(last_input_buffer_id_, dest_surface)); |
| 660 |
| 661 // If we have available picture buffers to copy the output data then use the |
| 662 // first one and then flag it as not being available for use. |
| 663 if (output_picture_buffers_.size()) { |
| 664 ProcessPendingSamples(); |
| 665 return true; |
| 666 } |
| 667 if (pictures_requested_) { |
| 668 DVLOG(1) << "Waiting for picture slots from the client."; |
| 669 return true; |
| 670 } |
| 671 // Go ahead and request picture buffers. |
| 672 MessageLoop::current()->PostTask(FROM_HERE, base::Bind( |
| 673 &DXVAVideoDecodeAccelerator::RequestPictureBuffers, |
| 674 this, surface_desc.Width, surface_desc.Height)); |
| 675 |
| 676 pictures_requested_ = true; |
| 677 return true; |
| 678 } |
| 679 |
| 680 bool DXVAVideoDecodeAccelerator::CopyOutputSampleDataToPictureBuffer( |
| 681 IDirect3DSurface9* dest_surface, media::PictureBuffer picture_buffer, |
| 682 int input_buffer_id) { |
| 683 DCHECK(dest_surface); |
| 684 |
| 685 D3DSURFACE_DESC surface_desc; |
| 686 HRESULT hr = dest_surface->GetDesc(&surface_desc); |
| 687 RETURN_ON_HR_FAILURE(hr, "Failed to get surface description", false); |
| 688 |
| 689 scoped_array<char> bits; |
| 690 RETURN_ON_FAILURE(GetBitmapFromSurface(dest_surface, &bits), |
| 691 "Failed to get bitmap from surface for rendering", false); |
| 692 |
| 693 // This function currently executes in the context of IPC handlers in the |
| 694 // GPU process which ensures that there is always a OpenGL context. |
| 695 GLint current_texture = 0; |
| 696 glGetIntegerv(GL_TEXTURE_BINDING_2D, ¤t_texture); |
| 697 |
| 698 glBindTexture(GL_TEXTURE_2D, picture_buffer.texture_id()); |
| 699 glTexImage2D(GL_TEXTURE_2D, 0, GL_BGRA_EXT, surface_desc.Width, |
| 700 surface_desc.Height, 0, GL_BGRA_EXT, GL_UNSIGNED_BYTE, |
| 701 reinterpret_cast<GLvoid*>(bits.get())); |
| 702 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); |
| 703 |
| 704 glBindTexture(GL_TEXTURE_2D, current_texture); |
| 705 |
| 706 media::Picture output_picture(picture_buffer.id(), input_buffer_id); |
| 707 MessageLoop::current()->PostTask(FROM_HERE, base::Bind( |
| 708 &DXVAVideoDecodeAccelerator::NotifyPictureReady, this, output_picture)); |
| 709 return true; |
| 710 } |
| 711 |
| 712 void DXVAVideoDecodeAccelerator::ProcessPendingSamples() { |
| 713 if (pending_output_samples_.empty()) |
| 714 return; |
| 715 |
| 716 OutputBuffers::iterator index; |
| 717 |
| 718 for (index = output_picture_buffers_.begin(); |
| 719 index != output_picture_buffers_.end() && |
| 720 !pending_output_samples_.empty(); |
| 721 ++index) { |
| 722 if (index->second.available) { |
| 723 PendingSampleInfo sample_info = pending_output_samples_.front(); |
| 724 |
| 725 CopyOutputSampleDataToPictureBuffer(sample_info.dest_surface, |
| 726 index->second.picture_buffer, |
| 727 sample_info.input_buffer_id); |
| 728 index->second.available = false; |
| 729 pending_output_samples_.pop_front(); |
| 730 } |
| 731 } |
| 732 } |
| 733 |
| 734 void DXVAVideoDecodeAccelerator::ClearState() { |
| 735 last_input_buffer_id_ = -1; |
| 736 output_picture_buffers_.clear(); |
| 737 pending_output_samples_.clear(); |
| 738 } |
| 739 |
| 740 void DXVAVideoDecodeAccelerator::StopOnError( |
| 741 media::VideoDecodeAccelerator::Error error) { |
| 742 DCHECK(CalledOnValidThread()); |
| 743 |
| 744 if (client_) |
| 745 client_->NotifyError(error); |
| 746 client_ = NULL; |
| 747 |
| 748 if (state_ != kUninitialized) { |
| 749 Invalidate(); |
| 750 } |
| 751 } |
| 752 |
| 753 bool DXVAVideoDecodeAccelerator::GetBitmapFromSurface( |
| 754 IDirect3DSurface9* surface, |
| 755 scoped_array<char>* bits) { |
| 756 // Get the currently loaded bitmap from the DC. |
| 757 HDC hdc = NULL; |
| 758 HRESULT hr = surface->GetDC(&hdc); |
| 759 RETURN_ON_HR_FAILURE(hr, "Failed to get HDC from surface", false); |
| 760 |
| 761 HBITMAP bitmap = |
| 762 reinterpret_cast<HBITMAP>(GetCurrentObject(hdc, OBJ_BITMAP)); |
| 763 if (!bitmap) { |
| 764 NOTREACHED() << "Failed to get bitmap from DC"; |
| 765 surface->ReleaseDC(hdc); |
| 766 return false; |
| 767 } |
| 768 // TODO(ananta) |
| 769 // The code below may not be necessary once we have an ANGLE extension which |
| 770 // allows us to pass the Direct 3D surface directly for rendering. |
| 771 // The Device dependent bitmap is upside down for OpenGL. We convert the |
| 772 // bitmap to a DIB and render it on the texture instead. |
| 773 BITMAP bitmap_basic_info = {0}; |
| 774 if (!GetObject(bitmap, sizeof(BITMAP), &bitmap_basic_info)) { |
| 775 NOTREACHED() << "Failed to read bitmap info"; |
| 776 surface->ReleaseDC(hdc); |
| 777 return false; |
| 778 } |
| 779 BITMAPINFO bitmap_info = {0}; |
| 780 bitmap_info.bmiHeader.biSize = sizeof(BITMAPINFOHEADER); |
| 781 bitmap_info.bmiHeader.biWidth = bitmap_basic_info.bmWidth; |
| 782 bitmap_info.bmiHeader.biHeight = bitmap_basic_info.bmHeight; |
| 783 bitmap_info.bmiHeader.biPlanes = 1; |
| 784 bitmap_info.bmiHeader.biBitCount = bitmap_basic_info.bmBitsPixel; |
| 785 bitmap_info.bmiHeader.biCompression = BI_RGB; |
| 786 bitmap_info.bmiHeader.biSizeImage = 0; |
| 787 bitmap_info.bmiHeader.biClrUsed = 0; |
| 788 |
| 789 int ret = GetDIBits(hdc, bitmap, 0, 0, NULL, &bitmap_info, DIB_RGB_COLORS); |
| 790 if (!ret || bitmap_info.bmiHeader.biSizeImage <= 0) { |
| 791 NOTREACHED() << "Failed to read bitmap size"; |
| 792 surface->ReleaseDC(hdc); |
| 793 return false; |
| 794 } |
| 795 |
| 796 bits->reset(new char[bitmap_info.bmiHeader.biSizeImage]); |
| 797 ret = GetDIBits(hdc, bitmap, 0, bitmap_basic_info.bmHeight, bits->get(), |
| 798 &bitmap_info, DIB_RGB_COLORS); |
| 799 if (!ret) { |
| 800 NOTREACHED() << "Failed to retrieve bitmap bits."; |
| 801 } |
| 802 surface->ReleaseDC(hdc); |
| 803 return !!ret; |
| 804 } |
| 805 |
| 806 void DXVAVideoDecodeAccelerator::Invalidate() { |
| 807 if (state_ == kUninitialized) |
| 808 return; |
| 809 ClearState(); |
| 810 decoder_.Release(); |
| 811 MFShutdown(); |
| 812 state_ = kUninitialized; |
| 813 } |
| 814 |
| 815 void DXVAVideoDecodeAccelerator::NotifyInitializeDone() { |
| 816 if (client_) |
| 817 client_->NotifyInitializeDone(); |
| 818 } |
| 819 |
| 820 void DXVAVideoDecodeAccelerator::NotifyInputBufferRead(int input_buffer_id) { |
| 821 if (client_) |
| 822 client_->NotifyEndOfBitstreamBuffer(input_buffer_id); |
| 823 } |
| 824 |
| 825 void DXVAVideoDecodeAccelerator::NotifyFlushDone() { |
| 826 if (client_) |
| 827 client_->NotifyFlushDone(); |
| 828 } |
| 829 |
| 830 void DXVAVideoDecodeAccelerator::NotifyResetDone() { |
| 831 if (client_) |
| 832 client_->NotifyResetDone(); |
| 833 } |
| 834 |
| 835 void DXVAVideoDecodeAccelerator::RequestPictureBuffers(int width, int height) { |
| 836 // This task could execute after the decoder has been torn down. |
| 837 if (state_ != kUninitialized && client_) { |
| 838 client_->ProvidePictureBuffers(kNumPictureBuffers, |
| 839 gfx::Size(width, height)); |
| 840 } |
| 841 } |
| 842 |
| 843 void DXVAVideoDecodeAccelerator::NotifyPictureReady( |
| 844 const media::Picture& picture) { |
| 845 // This task could execute after the decoder has been torn down. |
| 846 if (state_ != kUninitialized && client_) |
| 847 client_->PictureReady(picture); |
| 848 } |
| 849 |
OLD | NEW |