OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/gpu/dxva_video_decode_accelerator_win.h" | 5 #include "media/gpu/dxva_video_decode_accelerator_win.h" |
6 | 6 |
7 #include <memory> | 7 #include <memory> |
8 | 8 |
9 #if !defined(OS_WIN) | 9 #if !defined(OS_WIN) |
10 #error This file should only be built on Windows. | 10 #error This file should only be built on Windows. |
(...skipping 267 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
278 // The keyed mutex should always be released before the other thread | 278 // The keyed mutex should always be released before the other thread |
279 // attempts to acquire it, so AcquireSync should always return immediately. | 279 // attempts to acquire it, so AcquireSync should always return immediately. |
280 kAcquireSyncWaitMs = 0, | 280 kAcquireSyncWaitMs = 0, |
281 }; | 281 }; |
282 | 282 |
283 // Creates a Media Foundation sample with one buffer containing a copy of the | 283 // Creates a Media Foundation sample with one buffer containing a copy of the |
284 // given Annex B stream data. | 284 // given Annex B stream data. |
285 // If duration and sample time are not known, provide 0. | 285 // If duration and sample time are not known, provide 0. |
286 // |min_size| specifies the minimum size of the buffer (might be required by | 286 // |min_size| specifies the minimum size of the buffer (might be required by |
287 // the decoder for input). If no alignment is required, provide 0. | 287 // the decoder for input). If no alignment is required, provide 0. |
288 static IMFSample* CreateInputSample(const uint8_t* stream, | 288 static base::win::ScopedComPtr<IMFSample> CreateInputSample( |
289 uint32_t size, | 289 const uint8_t* stream, |
290 uint32_t min_size, | 290 uint32_t size, |
291 int alignment) { | 291 uint32_t min_size, |
| 292 int alignment) { |
292 CHECK(stream); | 293 CHECK(stream); |
293 CHECK_GT(size, 0U); | 294 CHECK_GT(size, 0U); |
294 base::win::ScopedComPtr<IMFSample> sample; | 295 base::win::ScopedComPtr<IMFSample> sample; |
295 sample.Attach( | 296 sample = mf::CreateEmptySampleWithBuffer(std::max(min_size, size), alignment); |
296 mf::CreateEmptySampleWithBuffer(std::max(min_size, size), alignment)); | 297 RETURN_ON_FAILURE(sample.get(), "Failed to create empty sample", |
297 RETURN_ON_FAILURE(sample.get(), "Failed to create empty sample", NULL); | 298 base::win::ScopedComPtr<IMFSample>()); |
298 | 299 |
299 base::win::ScopedComPtr<IMFMediaBuffer> buffer; | 300 base::win::ScopedComPtr<IMFMediaBuffer> buffer; |
300 HRESULT hr = sample->GetBufferByIndex(0, buffer.Receive()); | 301 HRESULT hr = sample->GetBufferByIndex(0, buffer.Receive()); |
301 RETURN_ON_HR_FAILURE(hr, "Failed to get buffer from sample", NULL); | 302 RETURN_ON_HR_FAILURE(hr, "Failed to get buffer from sample", |
| 303 base::win::ScopedComPtr<IMFSample>()); |
302 | 304 |
303 DWORD max_length = 0; | 305 DWORD max_length = 0; |
304 DWORD current_length = 0; | 306 DWORD current_length = 0; |
305 uint8_t* destination = NULL; | 307 uint8_t* destination = NULL; |
306 hr = buffer->Lock(&destination, &max_length, ¤t_length); | 308 hr = buffer->Lock(&destination, &max_length, ¤t_length); |
307 RETURN_ON_HR_FAILURE(hr, "Failed to lock buffer", NULL); | 309 RETURN_ON_HR_FAILURE(hr, "Failed to lock buffer", |
| 310 base::win::ScopedComPtr<IMFSample>()); |
308 | 311 |
309 CHECK_EQ(current_length, 0u); | 312 CHECK_EQ(current_length, 0u); |
310 CHECK_GE(max_length, size); | 313 CHECK_GE(max_length, size); |
311 memcpy(destination, stream, size); | 314 memcpy(destination, stream, size); |
312 | 315 |
313 hr = buffer->SetCurrentLength(size); | 316 hr = buffer->SetCurrentLength(size); |
314 RETURN_ON_HR_FAILURE(hr, "Failed to set buffer length", NULL); | 317 RETURN_ON_HR_FAILURE(hr, "Failed to set buffer length", |
| 318 base::win::ScopedComPtr<IMFSample>()); |
315 | 319 |
316 hr = buffer->Unlock(); | 320 hr = buffer->Unlock(); |
317 RETURN_ON_HR_FAILURE(hr, "Failed to unlock buffer", NULL); | 321 RETURN_ON_HR_FAILURE(hr, "Failed to unlock buffer", |
| 322 base::win::ScopedComPtr<IMFSample>()); |
318 | 323 |
319 return sample.Detach(); | 324 return sample; |
320 } | 325 } |
321 | 326 |
322 // Helper function to create a COM object instance from a DLL. The alternative | 327 // Helper function to create a COM object instance from a DLL. The alternative |
323 // is to use the CoCreateInstance API which requires the COM apartment to be | 328 // is to use the CoCreateInstance API which requires the COM apartment to be |
324 // initialized which is not the case on the GPU main thread. We want to avoid | 329 // initialized which is not the case on the GPU main thread. We want to avoid |
325 // initializing COM as it may have sideeffects. | 330 // initializing COM as it may have sideeffects. |
326 HRESULT CreateCOMObjectFromDll(HMODULE dll, | 331 HRESULT CreateCOMObjectFromDll(HMODULE dll, |
327 const CLSID& clsid, | 332 const CLSID& clsid, |
328 const IID& iid, | 333 const IID& iid, |
329 void** object) { | 334 void** object) { |
(...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
459 return gfx::ColorSpace(); | 464 return gfx::ColorSpace(); |
460 // TODO(hubbe): Is using last_sps_id_ correct here? | 465 // TODO(hubbe): Is using last_sps_id_ correct here? |
461 const H264SPS* sps = parser_->GetSPS(last_sps_id_); | 466 const H264SPS* sps = parser_->GetSPS(last_sps_id_); |
462 if (sps) | 467 if (sps) |
463 return sps->GetColorSpace(); | 468 return sps->GetColorSpace(); |
464 return gfx::ColorSpace(); | 469 return gfx::ColorSpace(); |
465 } | 470 } |
466 | 471 |
467 DXVAVideoDecodeAccelerator::PendingSampleInfo::PendingSampleInfo( | 472 DXVAVideoDecodeAccelerator::PendingSampleInfo::PendingSampleInfo( |
468 int32_t buffer_id, | 473 int32_t buffer_id, |
469 IMFSample* sample, | 474 base::win::ScopedComPtr<IMFSample> sample, |
470 const gfx::ColorSpace& color_space) | 475 const gfx::ColorSpace& color_space) |
471 : input_buffer_id(buffer_id), | 476 : input_buffer_id(buffer_id), |
472 picture_buffer_id(-1), | 477 picture_buffer_id(-1), |
473 color_space(color_space) { | 478 color_space(color_space), |
474 output_sample.Attach(sample); | 479 output_sample(sample) {} |
475 } | |
476 | 480 |
477 DXVAVideoDecodeAccelerator::PendingSampleInfo::PendingSampleInfo( | 481 DXVAVideoDecodeAccelerator::PendingSampleInfo::PendingSampleInfo( |
478 const PendingSampleInfo& other) = default; | 482 const PendingSampleInfo& other) = default; |
479 | 483 |
480 DXVAVideoDecodeAccelerator::PendingSampleInfo::~PendingSampleInfo() {} | 484 DXVAVideoDecodeAccelerator::PendingSampleInfo::~PendingSampleInfo() {} |
481 | 485 |
482 DXVAVideoDecodeAccelerator::DXVAVideoDecodeAccelerator( | 486 DXVAVideoDecodeAccelerator::DXVAVideoDecodeAccelerator( |
483 const GetGLContextCallback& get_gl_context_cb, | 487 const GetGLContextCallback& get_gl_context_cb, |
484 const MakeGLContextCurrentCallback& make_context_current_cb, | 488 const MakeGLContextCurrentCallback& make_context_current_cb, |
485 const BindGLImageCallback& bind_image_cb, | 489 const BindGLImageCallback& bind_image_cb, |
(...skipping 435 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
921 if (client_) | 925 if (client_) |
922 client_->NotifyEndOfBitstreamBuffer(bitstream_buffer.id()); | 926 client_->NotifyEndOfBitstreamBuffer(bitstream_buffer.id()); |
923 return; | 927 return; |
924 } | 928 } |
925 | 929 |
926 base::win::ScopedComPtr<IMFSample> sample; | 930 base::win::ScopedComPtr<IMFSample> sample; |
927 RETURN_AND_NOTIFY_ON_FAILURE(shm.Map(bitstream_buffer.size()), | 931 RETURN_AND_NOTIFY_ON_FAILURE(shm.Map(bitstream_buffer.size()), |
928 "Failed in base::SharedMemory::Map", | 932 "Failed in base::SharedMemory::Map", |
929 PLATFORM_FAILURE, ); | 933 PLATFORM_FAILURE, ); |
930 | 934 |
931 sample.Attach(CreateInputSample( | 935 sample = CreateInputSample( |
932 reinterpret_cast<const uint8_t*>(shm.memory()), bitstream_buffer.size(), | 936 reinterpret_cast<const uint8_t*>(shm.memory()), bitstream_buffer.size(), |
933 std::min<uint32_t>(bitstream_buffer.size(), input_stream_info_.cbSize), | 937 std::min<uint32_t>(bitstream_buffer.size(), input_stream_info_.cbSize), |
934 input_stream_info_.cbAlignment)); | 938 input_stream_info_.cbAlignment); |
935 RETURN_AND_NOTIFY_ON_FAILURE(sample.get(), "Failed to create input sample", | 939 RETURN_AND_NOTIFY_ON_FAILURE(sample.get(), "Failed to create input sample", |
936 PLATFORM_FAILURE, ); | 940 PLATFORM_FAILURE, ); |
937 | 941 |
938 RETURN_AND_NOTIFY_ON_HR_FAILURE( | 942 RETURN_AND_NOTIFY_ON_HR_FAILURE( |
939 sample->SetSampleTime(bitstream_buffer.id()), | 943 sample->SetSampleTime(bitstream_buffer.id()), |
940 "Failed to associate input buffer id with sample", PLATFORM_FAILURE, ); | 944 "Failed to associate input buffer id with sample", PLATFORM_FAILURE, ); |
941 | 945 |
942 decoder_thread_task_runner_->PostTask( | 946 decoder_thread_task_runner_->PostTask( |
943 FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::DecodeInternal, | 947 FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::DecodeInternal, |
944 base::Unretained(this), sample)); | 948 base::Unretained(this), sample)); |
(...skipping 798 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1743 g_last_process_output_time = GetCurrentQPC(); | 1747 g_last_process_output_time = GetCurrentQPC(); |
1744 hr = decoder_->ProcessOutput(0, // No flags | 1748 hr = decoder_->ProcessOutput(0, // No flags |
1745 1, // # of out streams to pull from | 1749 1, // # of out streams to pull from |
1746 &output_data_buffer, &status); | 1750 &output_data_buffer, &status); |
1747 } | 1751 } |
1748 IMFCollection* events = output_data_buffer.pEvents; | 1752 IMFCollection* events = output_data_buffer.pEvents; |
1749 if (events != NULL) { | 1753 if (events != NULL) { |
1750 DVLOG(1) << "Got events from ProcessOuput, but discarding"; | 1754 DVLOG(1) << "Got events from ProcessOuput, but discarding"; |
1751 events->Release(); | 1755 events->Release(); |
1752 } | 1756 } |
| 1757 base::win::ScopedComPtr<IMFSample> output_sample; |
| 1758 output_sample.Attach(output_data_buffer.pSample); |
1753 if (FAILED(hr)) { | 1759 if (FAILED(hr)) { |
1754 // A stream change needs further ProcessInput calls to get back decoder | 1760 // A stream change needs further ProcessInput calls to get back decoder |
1755 // output which is why we need to set the state to stopped. | 1761 // output which is why we need to set the state to stopped. |
1756 if (hr == MF_E_TRANSFORM_STREAM_CHANGE) { | 1762 if (hr == MF_E_TRANSFORM_STREAM_CHANGE) { |
1757 if (!SetDecoderOutputMediaType(MFVideoFormat_NV12) && | 1763 if (!SetDecoderOutputMediaType(MFVideoFormat_NV12) && |
1758 !SetDecoderOutputMediaType(MFVideoFormat_P010)) { | 1764 !SetDecoderOutputMediaType(MFVideoFormat_P010)) { |
1759 // Decoder didn't let us set NV12 output format. Not sure as to why | 1765 // Decoder didn't let us set NV12 output format. Not sure as to why |
1760 // this can happen. Give up in disgust. | 1766 // this can happen. Give up in disgust. |
1761 NOTREACHED() << "Failed to set decoder output media type to NV12"; | 1767 NOTREACHED() << "Failed to set decoder output media type to NV12"; |
1762 SetState(kStopped); | 1768 SetState(kStopped); |
(...skipping 13 matching lines...) Expand all Loading... |
1776 return; | 1782 return; |
1777 } | 1783 } |
1778 } | 1784 } |
1779 TRACE_EVENT_ASYNC_END0("gpu", "DXVAVideoDecodeAccelerator.Decoding", this); | 1785 TRACE_EVENT_ASYNC_END0("gpu", "DXVAVideoDecodeAccelerator.Decoding", this); |
1780 | 1786 |
1781 TRACE_COUNTER1("DXVA Decoding", "TotalPacketsBeforeDecode", | 1787 TRACE_COUNTER1("DXVA Decoding", "TotalPacketsBeforeDecode", |
1782 inputs_before_decode_); | 1788 inputs_before_decode_); |
1783 | 1789 |
1784 inputs_before_decode_ = 0; | 1790 inputs_before_decode_ = 0; |
1785 | 1791 |
1786 RETURN_AND_NOTIFY_ON_FAILURE( | 1792 RETURN_AND_NOTIFY_ON_FAILURE(ProcessOutputSample(output_sample, color_space), |
1787 ProcessOutputSample(output_data_buffer.pSample, color_space), | 1793 "Failed to process output sample.", |
1788 "Failed to process output sample.", PLATFORM_FAILURE, ); | 1794 PLATFORM_FAILURE, ); |
1789 } | 1795 } |
1790 | 1796 |
1791 bool DXVAVideoDecodeAccelerator::ProcessOutputSample( | 1797 bool DXVAVideoDecodeAccelerator::ProcessOutputSample( |
1792 IMFSample* sample, | 1798 base::win::ScopedComPtr<IMFSample> sample, |
1793 const gfx::ColorSpace& color_space) { | 1799 const gfx::ColorSpace& color_space) { |
1794 RETURN_ON_FAILURE(sample, "Decode succeeded with NULL output sample", false); | 1800 RETURN_ON_FAILURE(sample, "Decode succeeded with NULL output sample", false); |
1795 | 1801 |
1796 LONGLONG input_buffer_id = 0; | 1802 LONGLONG input_buffer_id = 0; |
1797 RETURN_ON_HR_FAILURE(sample->GetSampleTime(&input_buffer_id), | 1803 RETURN_ON_HR_FAILURE(sample->GetSampleTime(&input_buffer_id), |
1798 "Failed to get input buffer id associated with sample", | 1804 "Failed to get input buffer id associated with sample", |
1799 false); | 1805 false); |
1800 | 1806 |
1801 { | 1807 { |
1802 base::AutoLock lock(decoder_lock_); | 1808 base::AutoLock lock(decoder_lock_); |
1803 DCHECK(pending_output_samples_.empty()); | 1809 DCHECK(pending_output_samples_.empty()); |
1804 pending_output_samples_.push_back( | 1810 pending_output_samples_.push_back( |
1805 PendingSampleInfo(input_buffer_id, sample, color_space)); | 1811 PendingSampleInfo(input_buffer_id, sample, color_space)); |
1806 } | 1812 } |
1807 | 1813 |
1808 if (pictures_requested_) { | 1814 if (pictures_requested_) { |
1809 DVLOG(1) << "Waiting for picture slots from the client."; | 1815 DVLOG(1) << "Waiting for picture slots from the client."; |
1810 main_thread_task_runner_->PostTask( | 1816 main_thread_task_runner_->PostTask( |
1811 FROM_HERE, | 1817 FROM_HERE, |
1812 base::Bind(&DXVAVideoDecodeAccelerator::ProcessPendingSamples, | 1818 base::Bind(&DXVAVideoDecodeAccelerator::ProcessPendingSamples, |
1813 weak_ptr_)); | 1819 weak_ptr_)); |
1814 return true; | 1820 return true; |
1815 } | 1821 } |
1816 | 1822 |
1817 int width = 0; | 1823 int width = 0; |
1818 int height = 0; | 1824 int height = 0; |
1819 if (!GetVideoFrameDimensions(sample, &width, &height)) { | 1825 if (!GetVideoFrameDimensions(sample.get(), &width, &height)) { |
1820 RETURN_ON_FAILURE(false, "Failed to get D3D surface from output sample", | 1826 RETURN_ON_FAILURE(false, "Failed to get D3D surface from output sample", |
1821 false); | 1827 false); |
1822 } | 1828 } |
1823 | 1829 |
1824 // Go ahead and request picture buffers. | 1830 // Go ahead and request picture buffers. |
1825 main_thread_task_runner_->PostTask( | 1831 main_thread_task_runner_->PostTask( |
1826 FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::RequestPictureBuffers, | 1832 FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::RequestPictureBuffers, |
1827 weak_ptr_, width, height)); | 1833 weak_ptr_, width, height)); |
1828 | 1834 |
1829 pictures_requested_ = true; | 1835 pictures_requested_ = true; |
(...skipping 743 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2573 { | 2579 { |
2574 base::AutoLock lock(decoder_lock_); | 2580 base::AutoLock lock(decoder_lock_); |
2575 PendingSampleInfo& sample_info = pending_output_samples_.front(); | 2581 PendingSampleInfo& sample_info = pending_output_samples_.front(); |
2576 input_sample_for_conversion = sample_info.output_sample; | 2582 input_sample_for_conversion = sample_info.output_sample; |
2577 } | 2583 } |
2578 | 2584 |
2579 decoder_thread_task_runner_->PostTask( | 2585 decoder_thread_task_runner_->PostTask( |
2580 FROM_HERE, | 2586 FROM_HERE, |
2581 base::Bind(&DXVAVideoDecodeAccelerator::CopyTextureOnDecoderThread, | 2587 base::Bind(&DXVAVideoDecodeAccelerator::CopyTextureOnDecoderThread, |
2582 base::Unretained(this), dest_texture, dest_keyed_mutex, | 2588 base::Unretained(this), dest_texture, dest_keyed_mutex, |
2583 keyed_mutex_value, input_sample_for_conversion.Detach(), | 2589 keyed_mutex_value, input_sample_for_conversion, |
2584 picture_buffer_id, input_buffer_id)); | 2590 picture_buffer_id, input_buffer_id)); |
2585 } | 2591 } |
2586 | 2592 |
2587 void DXVAVideoDecodeAccelerator::CopyTextureOnDecoderThread( | 2593 void DXVAVideoDecodeAccelerator::CopyTextureOnDecoderThread( |
2588 ID3D11Texture2D* dest_texture, | 2594 ID3D11Texture2D* dest_texture, |
2589 base::win::ScopedComPtr<IDXGIKeyedMutex> dest_keyed_mutex, | 2595 base::win::ScopedComPtr<IDXGIKeyedMutex> dest_keyed_mutex, |
2590 uint64_t keyed_mutex_value, | 2596 uint64_t keyed_mutex_value, |
2591 IMFSample* video_frame, | 2597 base::win::ScopedComPtr<IMFSample> input_sample, |
2592 int picture_buffer_id, | 2598 int picture_buffer_id, |
2593 int input_buffer_id) { | 2599 int input_buffer_id) { |
2594 TRACE_EVENT0("media", | 2600 TRACE_EVENT0("media", |
2595 "DXVAVideoDecodeAccelerator::CopyTextureOnDecoderThread"); | 2601 "DXVAVideoDecodeAccelerator::CopyTextureOnDecoderThread"); |
2596 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | 2602 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); |
2597 HRESULT hr = E_FAIL; | 2603 HRESULT hr = E_FAIL; |
2598 | 2604 |
2599 DCHECK(use_dx11_); | 2605 DCHECK(use_dx11_); |
2600 DCHECK(video_frame); | 2606 DCHECK(!!input_sample); |
2601 | |
2602 base::win::ScopedComPtr<IMFSample> input_sample; | |
2603 input_sample.Attach(video_frame); | |
2604 | |
2605 DCHECK(video_format_converter_mft_.get()); | 2607 DCHECK(video_format_converter_mft_.get()); |
2606 | 2608 |
2607 if (dest_keyed_mutex) { | 2609 if (dest_keyed_mutex) { |
2608 HRESULT hr = | 2610 HRESULT hr = |
2609 dest_keyed_mutex->AcquireSync(keyed_mutex_value, kAcquireSyncWaitMs); | 2611 dest_keyed_mutex->AcquireSync(keyed_mutex_value, kAcquireSyncWaitMs); |
2610 RETURN_AND_NOTIFY_ON_FAILURE( | 2612 RETURN_AND_NOTIFY_ON_FAILURE( |
2611 hr == S_OK, "D3D11 failed to acquire keyed mutex for texture.", | 2613 hr == S_OK, "D3D11 failed to acquire keyed mutex for texture.", |
2612 PLATFORM_FAILURE, ); | 2614 PLATFORM_FAILURE, ); |
2613 } | 2615 } |
2614 // The video processor MFT requires output samples to be allocated by the | 2616 // The video processor MFT requires output samples to be allocated by the |
(...skipping 11 matching lines...) Expand all Loading... |
2626 base::win::ScopedComPtr<IMFMediaBuffer> output_buffer; | 2628 base::win::ScopedComPtr<IMFMediaBuffer> output_buffer; |
2627 hr = MFCreateDXGISurfaceBuffer(__uuidof(ID3D11Texture2D), dest_texture, 0, | 2629 hr = MFCreateDXGISurfaceBuffer(__uuidof(ID3D11Texture2D), dest_texture, 0, |
2628 FALSE, output_buffer.Receive()); | 2630 FALSE, output_buffer.Receive()); |
2629 if (FAILED(hr)) { | 2631 if (FAILED(hr)) { |
2630 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "Failed to create output sample.", | 2632 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "Failed to create output sample.", |
2631 PLATFORM_FAILURE, ); | 2633 PLATFORM_FAILURE, ); |
2632 } | 2634 } |
2633 | 2635 |
2634 output_sample->AddBuffer(output_buffer.get()); | 2636 output_sample->AddBuffer(output_buffer.get()); |
2635 | 2637 |
2636 hr = video_format_converter_mft_->ProcessInput(0, video_frame, 0); | 2638 hr = video_format_converter_mft_->ProcessInput(0, input_sample.get(), 0); |
2637 if (FAILED(hr)) { | 2639 if (FAILED(hr)) { |
2638 DCHECK(false); | 2640 DCHECK(false); |
2639 RETURN_AND_NOTIFY_ON_HR_FAILURE( | 2641 RETURN_AND_NOTIFY_ON_HR_FAILURE( |
2640 hr, "Failed to convert output sample format.", PLATFORM_FAILURE, ); | 2642 hr, "Failed to convert output sample format.", PLATFORM_FAILURE, ); |
2641 } | 2643 } |
2642 | 2644 |
| 2645 input_sample.Release(); |
| 2646 |
2643 DWORD status = 0; | 2647 DWORD status = 0; |
2644 MFT_OUTPUT_DATA_BUFFER format_converter_output = {}; | 2648 MFT_OUTPUT_DATA_BUFFER format_converter_output = {}; |
2645 format_converter_output.pSample = output_sample.get(); | 2649 format_converter_output.pSample = output_sample.get(); |
2646 hr = video_format_converter_mft_->ProcessOutput( | 2650 hr = video_format_converter_mft_->ProcessOutput( |
2647 0, // No flags | 2651 0, // No flags |
2648 1, // # of out streams to pull from | 2652 1, // # of out streams to pull from |
2649 &format_converter_output, &status); | 2653 &format_converter_output, &status); |
2650 | 2654 |
2651 if (FAILED(hr)) { | 2655 if (FAILED(hr)) { |
2652 DCHECK(false); | 2656 DCHECK(false); |
(...skipping 260 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2913 base::Bind(&DXVAVideoDecodeAccelerator::DecodePendingInputBuffers, | 2917 base::Bind(&DXVAVideoDecodeAccelerator::DecodePendingInputBuffers, |
2914 base::Unretained(this))); | 2918 base::Unretained(this))); |
2915 } | 2919 } |
2916 | 2920 |
2917 uint32_t DXVAVideoDecodeAccelerator::GetTextureTarget() const { | 2921 uint32_t DXVAVideoDecodeAccelerator::GetTextureTarget() const { |
2918 bool provide_nv12_textures = share_nv12_textures_ || copy_nv12_textures_; | 2922 bool provide_nv12_textures = share_nv12_textures_ || copy_nv12_textures_; |
2919 return provide_nv12_textures ? GL_TEXTURE_EXTERNAL_OES : GL_TEXTURE_2D; | 2923 return provide_nv12_textures ? GL_TEXTURE_EXTERNAL_OES : GL_TEXTURE_2D; |
2920 } | 2924 } |
2921 | 2925 |
2922 } // namespace media | 2926 } // namespace media |
OLD | NEW |