Index: media/gpu/media_foundation_video_encode_accelerator_win.cc |
diff --git a/media/gpu/media_foundation_video_encode_accelerator_win.cc b/media/gpu/media_foundation_video_encode_accelerator_win.cc |
index 6723aa7a0b9a3202291bab04d5e8ce56312f48e9..0ad8ba0da5965a834d1283a70f378a4170650018 100644 |
--- a/media/gpu/media_foundation_video_encode_accelerator_win.cc |
+++ b/media/gpu/media_foundation_video_encode_accelerator_win.cc |
@@ -357,7 +357,7 @@ bool MediaFoundationVideoEncodeAccelerator::CreateHardwareEncoderMFT() { |
DVLOG(3) << "HW encoder(s) found: " << count; |
hr = encoder_.CreateInstance(CLSIDs[0]); |
RETURN_ON_HR_FAILURE(hr, "Couldn't activate hardware encoder", false); |
- RETURN_ON_FAILURE((encoder_.get() != nullptr), |
+ RETURN_ON_FAILURE((encoder_.Get() != nullptr), |
"No HW encoder instance created", false); |
return true; |
} |
@@ -399,10 +399,10 @@ bool MediaFoundationVideoEncodeAccelerator::InitializeInputOutputSamples() { |
RETURN_ON_HR_FAILURE(hr, "Couldn't set video format", false); |
hr = imf_output_media_type_->SetUINT32(MF_MT_AVG_BITRATE, target_bitrate_); |
RETURN_ON_HR_FAILURE(hr, "Couldn't set bitrate", false); |
- hr = MFSetAttributeRatio(imf_output_media_type_.get(), MF_MT_FRAME_RATE, |
+ hr = MFSetAttributeRatio(imf_output_media_type_.Get(), MF_MT_FRAME_RATE, |
frame_rate_, 1); |
RETURN_ON_HR_FAILURE(hr, "Couldn't set frame rate", false); |
- hr = MFSetAttributeSize(imf_output_media_type_.get(), MF_MT_FRAME_SIZE, |
+ hr = MFSetAttributeSize(imf_output_media_type_.Get(), MF_MT_FRAME_SIZE, |
input_visible_size_.width(), |
input_visible_size_.height()); |
RETURN_ON_HR_FAILURE(hr, "Couldn't set frame size", false); |
@@ -412,7 +412,7 @@ bool MediaFoundationVideoEncodeAccelerator::InitializeInputOutputSamples() { |
hr = imf_output_media_type_->SetUINT32(MF_MT_MPEG2_PROFILE, |
eAVEncH264VProfile_Base); |
RETURN_ON_HR_FAILURE(hr, "Couldn't set codec profile", false); |
- hr = encoder_->SetOutputType(output_stream_id_, imf_output_media_type_.get(), |
+ hr = encoder_->SetOutputType(output_stream_id_, imf_output_media_type_.Get(), |
0); |
RETURN_ON_HR_FAILURE(hr, "Couldn't set output media type", false); |
@@ -423,17 +423,17 @@ bool MediaFoundationVideoEncodeAccelerator::InitializeInputOutputSamples() { |
RETURN_ON_HR_FAILURE(hr, "Couldn't set media type", false); |
hr = imf_input_media_type_->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_YV12); |
RETURN_ON_HR_FAILURE(hr, "Couldn't set video format", false); |
- hr = MFSetAttributeRatio(imf_input_media_type_.get(), MF_MT_FRAME_RATE, |
+ hr = MFSetAttributeRatio(imf_input_media_type_.Get(), MF_MT_FRAME_RATE, |
frame_rate_, 1); |
RETURN_ON_HR_FAILURE(hr, "Couldn't set frame rate", false); |
- hr = MFSetAttributeSize(imf_input_media_type_.get(), MF_MT_FRAME_SIZE, |
+ hr = MFSetAttributeSize(imf_input_media_type_.Get(), MF_MT_FRAME_SIZE, |
input_visible_size_.width(), |
input_visible_size_.height()); |
RETURN_ON_HR_FAILURE(hr, "Couldn't set frame size", false); |
hr = imf_input_media_type_->SetUINT32(MF_MT_INTERLACE_MODE, |
MFVideoInterlace_Progressive); |
RETURN_ON_HR_FAILURE(hr, "Couldn't set interlace mode", false); |
- hr = encoder_->SetInputType(input_stream_id_, imf_input_media_type_.get(), 0); |
+ hr = encoder_->SetInputType(input_stream_id_, imf_input_media_type_.Get(), 0); |
RETURN_ON_HR_FAILURE(hr, "Couldn't set input media type", false); |
return SUCCEEDED(hr); |
@@ -441,7 +441,7 @@ bool MediaFoundationVideoEncodeAccelerator::InitializeInputOutputSamples() { |
bool MediaFoundationVideoEncodeAccelerator::SetEncoderModes() { |
DCHECK(main_client_task_runner_->BelongsToCurrentThread()); |
- RETURN_ON_FAILURE((encoder_.get() != nullptr), |
+ RETURN_ON_FAILURE((encoder_.Get() != nullptr), |
"No HW encoder instance created", false); |
HRESULT hr = encoder_.QueryInterface(codec_api_.Receive()); |
@@ -470,17 +470,17 @@ bool MediaFoundationVideoEncodeAccelerator::IsResolutionSupported( |
DCHECK(encoder_); |
HRESULT hr = |
- MFSetAttributeSize(imf_output_media_type_.get(), MF_MT_FRAME_SIZE, |
+ MFSetAttributeSize(imf_output_media_type_.Get(), MF_MT_FRAME_SIZE, |
resolution.width(), resolution.height()); |
RETURN_ON_HR_FAILURE(hr, "Couldn't set frame size", false); |
- hr = encoder_->SetOutputType(output_stream_id_, imf_output_media_type_.get(), |
+ hr = encoder_->SetOutputType(output_stream_id_, imf_output_media_type_.Get(), |
0); |
RETURN_ON_HR_FAILURE(hr, "Couldn't set output media type", false); |
- hr = MFSetAttributeSize(imf_input_media_type_.get(), MF_MT_FRAME_SIZE, |
+ hr = MFSetAttributeSize(imf_input_media_type_.Get(), MF_MT_FRAME_SIZE, |
resolution.width(), resolution.height()); |
RETURN_ON_HR_FAILURE(hr, "Couldn't set frame size", false); |
- hr = encoder_->SetInputType(input_stream_id_, imf_input_media_type_.get(), 0); |
+ hr = encoder_->SetInputType(input_stream_id_, imf_input_media_type_.Get(), 0); |
RETURN_ON_HR_FAILURE(hr, "Couldn't set input media type", false); |
return true; |
@@ -503,7 +503,7 @@ void MediaFoundationVideoEncodeAccelerator::EncodeTask( |
input_sample_->GetBufferByIndex(0, input_buffer.Receive()); |
{ |
- MediaBufferScopedPointer scoped_buffer(input_buffer.get()); |
+ MediaBufferScopedPointer scoped_buffer(input_buffer.Get()); |
DCHECK(scoped_buffer.get()); |
libyuv::I420Copy(frame->visible_data(VideoFrame::kYPlane), |
frame->stride(VideoFrame::kYPlane), |
@@ -530,14 +530,14 @@ void MediaFoundationVideoEncodeAccelerator::EncodeTask( |
// Release frame after input is copied. |
frame = nullptr; |
- hr = encoder_->ProcessInput(input_stream_id_, input_sample_.get(), 0); |
+ hr = encoder_->ProcessInput(input_stream_id_, input_sample_.Get(), 0); |
// According to MSDN, if encoder returns MF_E_NOTACCEPTING, we need to try |
// processing the output. This error indicates that encoder does not accept |
// any more input data. |
if (hr == MF_E_NOTACCEPTING) { |
DVLOG(3) << "MF_E_NOTACCEPTING"; |
ProcessOutput(); |
- hr = encoder_->ProcessInput(input_stream_id_, input_sample_.get(), 0); |
+ hr = encoder_->ProcessInput(input_stream_id_, input_sample_.Get(), 0); |
if (!SUCCEEDED(hr)) { |
NotifyError(kPlatformFailureError); |
RETURN_ON_HR_FAILURE(hr, "Couldn't encode", ); |
@@ -567,7 +567,7 @@ void MediaFoundationVideoEncodeAccelerator::ProcessOutput() { |
output_data_buffer.dwStreamID = 0; |
output_data_buffer.dwStatus = 0; |
output_data_buffer.pEvents = NULL; |
- output_data_buffer.pSample = output_sample_.get(); |
+ output_data_buffer.pSample = output_sample_.Get(); |
DWORD status = 0; |
hr = encoder_->ProcessOutput(output_stream_id_, 1, &output_data_buffer, |
&status); |
@@ -594,7 +594,7 @@ void MediaFoundationVideoEncodeAccelerator::ProcessOutput() { |
} |
const bool keyframe = MFGetAttributeUINT32( |
- output_sample_.get(), MFSampleExtension_CleanPoint, false); |
+ output_sample_.Get(), MFSampleExtension_CleanPoint, false); |
DVLOG(3) << "We HAVE encoded data with size:" << size << " keyframe " |
<< keyframe; |
@@ -604,7 +604,7 @@ void MediaFoundationVideoEncodeAccelerator::ProcessOutput() { |
std::unique_ptr<EncodeOutput> encode_output( |
new EncodeOutput(size, keyframe, timestamp)); |
{ |
- MediaBufferScopedPointer scoped_buffer(output_buffer.get()); |
+ MediaBufferScopedPointer scoped_buffer(output_buffer.Get()); |
memcpy(encode_output->memory(), scoped_buffer.get(), size); |
} |
encoder_output_queue_.push_back(std::move(encode_output)); |
@@ -616,7 +616,7 @@ void MediaFoundationVideoEncodeAccelerator::ProcessOutput() { |
bitstream_buffer_queue_.pop_front(); |
{ |
- MediaBufferScopedPointer scoped_buffer(output_buffer.get()); |
+ MediaBufferScopedPointer scoped_buffer(output_buffer.Get()); |
memcpy(buffer_ref->shm->memory(), scoped_buffer.get(), size); |
} |