| Index: media/gpu/media_foundation_video_encode_accelerator_win.cc
|
| diff --git a/media/gpu/media_foundation_video_encode_accelerator_win.cc b/media/gpu/media_foundation_video_encode_accelerator_win.cc
|
| index 0594842011c0aa3bc6483dd40a984f5ad02c83d6..a43e2ad236bcecd15efa45ec2c10755c7a98cf53 100644
|
| --- a/media/gpu/media_foundation_video_encode_accelerator_win.cc
|
| +++ b/media/gpu/media_foundation_video_encode_accelerator_win.cc
|
| @@ -34,13 +34,12 @@ namespace {
|
| const int32_t kDefaultTargetBitrate = 5000000;
|
| const size_t kMaxFrameRateNumerator = 30;
|
| const size_t kMaxFrameRateDenominator = 1;
|
| -const size_t kMaxResolutionWidth = 1920;
|
| -const size_t kMaxResolutionHeight = 1088;
|
| +const size_t kMaxResolutionWidth = 3840;
|
| +const size_t kMaxResolutionHeight = 2176;
|
| const size_t kNumInputBuffers = 3;
|
| // Media Foundation uses 100 nanosecond units for time, see
|
| // https://msdn.microsoft.com/en-us/library/windows/desktop/ms697282(v=vs.85).aspx
|
| const size_t kOneMicrosecondInMFSampleTimeUnits = 10;
|
| -const size_t kOneSecondInMFSampleTimeUnits = 10000000;
|
| const size_t kOutputSampleBufferSizeRatio = 4;
|
|
|
| constexpr const wchar_t* const kMediaFoundationVideoEncoderDLLs[] = {
|
| @@ -177,7 +176,6 @@ bool MediaFoundationVideoEncodeAccelerator::Initialize(
|
| input_visible_size_)
|
| .GetArea();
|
|
|
| -
|
| if (!SetEncoderModes()) {
|
| DLOG(ERROR) << "Failed setting encoder parameters.";
|
| return false;
|
| @@ -187,13 +185,27 @@ bool MediaFoundationVideoEncodeAccelerator::Initialize(
|
| DLOG(ERROR) << "Failed initializing input-output samples.";
|
| return false;
|
| }
|
| +
|
| + MFT_INPUT_STREAM_INFO input_stream_info;
|
| + HRESULT hr =
|
| + encoder_->GetInputStreamInfo(input_stream_id_, &input_stream_info);
|
| + RETURN_ON_HR_FAILURE(hr, "Couldn't get input stream info", false);
|
| input_sample_.Attach(mf::CreateEmptySampleWithBuffer(
|
| - VideoFrame::AllocationSize(PIXEL_FORMAT_I420, input_visible_size_), 2));
|
| + input_stream_info.cbSize
|
| + ? input_stream_info.cbSize
|
| + : VideoFrame::AllocationSize(PIXEL_FORMAT_I420, input_visible_size_),
|
| + input_stream_info.cbAlignment));
|
| +
|
| + MFT_OUTPUT_STREAM_INFO output_stream_info;
|
| + hr = encoder_->GetOutputStreamInfo(output_stream_id_, &output_stream_info);
|
| + RETURN_ON_HR_FAILURE(hr, "Couldn't get output stream info", false);
|
| output_sample_.Attach(mf::CreateEmptySampleWithBuffer(
|
| - bitstream_buffer_size_ * kOutputSampleBufferSizeRatio, 2));
|
| + output_stream_info.cbSize
|
| + ? output_stream_info.cbSize
|
| + : bitstream_buffer_size_ * kOutputSampleBufferSizeRatio,
|
| + output_stream_info.cbAlignment));
|
|
|
| - HRESULT hr =
|
| - encoder_->ProcessMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, NULL);
|
| + hr = encoder_->ProcessMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, NULL);
|
| RETURN_ON_HR_FAILURE(hr, "Couldn't set ProcessMessage", false);
|
|
|
| // Pin all client callbacks to the main task runner initially. It can be
|
| @@ -340,8 +352,33 @@ bool MediaFoundationVideoEncodeAccelerator::CreateHardwareEncoderMFT() {
|
| bool MediaFoundationVideoEncodeAccelerator::InitializeInputOutputSamples() {
|
| DCHECK(main_client_task_runner_->BelongsToCurrentThread());
|
|
|
| + DWORD input_count = 0;
|
| + DWORD output_count = 0;
|
| + HRESULT hr = encoder_->GetStreamCount(&input_count, &output_count);
|
| + RETURN_ON_HR_FAILURE(hr, "Couldn't get stream count", false);
|
| + if (input_count < 1 || output_count < 1) {
|
| + LOG(ERROR) << "Stream count too few: input " << input_count << ", output "
|
| + << output_count;
|
| + return false;
|
| + }
|
| +
|
| + std::vector<DWORD> input_ids(input_count, 0);
|
| + std::vector<DWORD> output_ids(output_count, 0);
|
| + hr = encoder_->GetStreamIDs(input_count, input_ids.data(), output_count,
|
| + output_ids.data());
|
| + if (hr == S_OK) {
|
| + input_stream_id_ = input_ids[0];
|
| + output_stream_id_ = output_ids[0];
|
| + } else if (hr == E_NOTIMPL) {
|
| + input_stream_id_ = 0;
|
| + output_stream_id_ = 0;
|
| + } else {
|
| + LOG(ERROR) << "Couldn't find stream ids.";
|
| + return false;
|
| + }
|
| +
|
| // Initialize output parameters.
|
| - HRESULT hr = MFCreateMediaType(imf_output_media_type_.Receive());
|
| + hr = MFCreateMediaType(imf_output_media_type_.Receive());
|
| RETURN_ON_HR_FAILURE(hr, "Couldn't create media type", false);
|
| hr = imf_output_media_type_->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
|
| RETURN_ON_HR_FAILURE(hr, "Couldn't set media type", false);
|
| @@ -350,7 +387,7 @@ bool MediaFoundationVideoEncodeAccelerator::InitializeInputOutputSamples() {
|
| hr = imf_output_media_type_->SetUINT32(MF_MT_AVG_BITRATE, target_bitrate_);
|
| RETURN_ON_HR_FAILURE(hr, "Couldn't set bitrate", false);
|
| hr = MFSetAttributeRatio(imf_output_media_type_.get(), MF_MT_FRAME_RATE,
|
| - frame_rate_, kMaxFrameRateDenominator);
|
| + frame_rate_, 1);
|
| RETURN_ON_HR_FAILURE(hr, "Couldn't set frame rate", false);
|
| hr = MFSetAttributeSize(imf_output_media_type_.get(), MF_MT_FRAME_SIZE,
|
| input_visible_size_.width(),
|
| @@ -362,7 +399,8 @@ bool MediaFoundationVideoEncodeAccelerator::InitializeInputOutputSamples() {
|
| hr = imf_output_media_type_->SetUINT32(MF_MT_MPEG2_PROFILE,
|
| eAVEncH264VProfile_Base);
|
| RETURN_ON_HR_FAILURE(hr, "Couldn't set codec profile", false);
|
| - hr = encoder_->SetOutputType(0, imf_output_media_type_.get(), 0);
|
| + hr = encoder_->SetOutputType(output_stream_id_, imf_output_media_type_.get(),
|
| + 0);
|
| RETURN_ON_HR_FAILURE(hr, "Couldn't set output media type", false);
|
|
|
| // Initialize input parameters.
|
| @@ -373,7 +411,7 @@ bool MediaFoundationVideoEncodeAccelerator::InitializeInputOutputSamples() {
|
| hr = imf_input_media_type_->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_YV12);
|
| RETURN_ON_HR_FAILURE(hr, "Couldn't set video format", false);
|
| hr = MFSetAttributeRatio(imf_input_media_type_.get(), MF_MT_FRAME_RATE,
|
| - frame_rate_, kMaxFrameRateDenominator);
|
| + frame_rate_, 1);
|
| RETURN_ON_HR_FAILURE(hr, "Couldn't set frame rate", false);
|
| hr = MFSetAttributeSize(imf_input_media_type_.get(), MF_MT_FRAME_SIZE,
|
| input_visible_size_.width(),
|
| @@ -382,7 +420,7 @@ bool MediaFoundationVideoEncodeAccelerator::InitializeInputOutputSamples() {
|
| hr = imf_input_media_type_->SetUINT32(MF_MT_INTERLACE_MODE,
|
| MFVideoInterlace_Progressive);
|
| RETURN_ON_HR_FAILURE(hr, "Couldn't set interlace mode", false);
|
| - hr = encoder_->SetInputType(0, imf_input_media_type_.get(), 0);
|
| + hr = encoder_->SetInputType(input_stream_id_, imf_input_media_type_.get(), 0);
|
| RETURN_ON_HR_FAILURE(hr, "Couldn't set input media type", false);
|
|
|
| return SUCCEEDED(hr);
|
| @@ -401,7 +439,7 @@ bool MediaFoundationVideoEncodeAccelerator::SetEncoderModes() {
|
| var.ulVal = target_bitrate_;
|
| hr = codec_api_->SetValue(&CODECAPI_AVEncCommonMeanBitRate, &var);
|
| RETURN_ON_HR_FAILURE(hr, "Couldn't set bitrate", false);
|
| - var.ulVal = eAVEncAdaptiveMode_FrameRate;
|
| + var.ulVal = eAVEncAdaptiveMode_Resolution;
|
| hr = codec_api_->SetValue(&CODECAPI_AVEncAdaptiveMode, &var);
|
| RETURN_ON_HR_FAILURE(hr, "Couldn't set FrameRate", false);
|
| var.vt = VT_BOOL;
|
| @@ -446,19 +484,23 @@ void MediaFoundationVideoEncodeAccelerator::EncodeTask(
|
|
|
| input_sample_->SetSampleTime(frame->timestamp().InMicroseconds() *
|
| kOneMicrosecondInMFSampleTimeUnits);
|
| - input_sample_->SetSampleDuration(kOneSecondInMFSampleTimeUnits / frame_rate_);
|
| + UINT64 sample_duration = 1;
|
| + HRESULT hr =
|
| + MFFrameRateToAverageTimePerFrame(frame_rate_, 1, &sample_duration);
|
| + RETURN_ON_HR_FAILURE(hr, "Couldn't calculate sample duration", );
|
| + input_sample_->SetSampleDuration(sample_duration);
|
|
|
| // Release frame after input is copied.
|
| frame = nullptr;
|
|
|
| - HRESULT hr = encoder_->ProcessInput(0, input_sample_.get(), 0);
|
| + hr = encoder_->ProcessInput(input_stream_id_, input_sample_.get(), 0);
|
| // According to MSDN, if encoder returns MF_E_NOTACCEPTING, we need to try
|
| // processing the output. This error indicates that encoder does not accept
|
| // any more input data.
|
| if (hr == MF_E_NOTACCEPTING) {
|
| DVLOG(3) << "MF_E_NOTACCEPTING";
|
| ProcessOutput();
|
| - hr = encoder_->ProcessInput(0, input_sample_.get(), 0);
|
| + hr = encoder_->ProcessInput(input_stream_id_, input_sample_.get(), 0);
|
| if (!SUCCEEDED(hr)) {
|
| NotifyError(kPlatformFailureError);
|
| RETURN_ON_HR_FAILURE(hr, "Couldn't encode", );
|
| @@ -476,15 +518,24 @@ void MediaFoundationVideoEncodeAccelerator::ProcessOutput() {
|
| DVLOG(3) << __func__;
|
| DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
|
|
|
| + DWORD output_status = 0;
|
| + HRESULT hr = encoder_->GetOutputStatus(&output_status);
|
| + RETURN_ON_HR_FAILURE(hr, "Couldn't get output status", );
|
| + if (output_status != MFT_OUTPUT_STATUS_SAMPLE_READY) {
|
| + DVLOG(3) << "Output isnt ready";
|
| + return;
|
| + }
|
| +
|
| MFT_OUTPUT_DATA_BUFFER output_data_buffer = {0};
|
| output_data_buffer.dwStreamID = 0;
|
| output_data_buffer.dwStatus = 0;
|
| output_data_buffer.pEvents = NULL;
|
| output_data_buffer.pSample = output_sample_.get();
|
| DWORD status = 0;
|
| - HRESULT hr = encoder_->ProcessOutput(0, 1, &output_data_buffer, &status);
|
| + hr = encoder_->ProcessOutput(output_stream_id_, 1, &output_data_buffer,
|
| + &status);
|
| if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) {
|
| - DVLOG(3) << "MF_E_TRANSFORM_NEED_MORE_INPUT";
|
| + DVLOG(3) << "MF_E_TRANSFORM_NEED_MORE_INPUT" << status;
|
| return;
|
| }
|
| RETURN_ON_HR_FAILURE(hr, "Couldn't get encoded data", );
|
| @@ -580,20 +631,19 @@ void MediaFoundationVideoEncodeAccelerator::RequestEncodingParametersChangeTask(
|
| DVLOG(3) << __func__;
|
| DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
|
|
|
| - frame_rate_ = framerate ? framerate : 1;
|
| - target_bitrate_ = bitrate ? bitrate : 1;
|
| -
|
| - VARIANT var;
|
| - var.vt = VT_UI4;
|
| - var.ulVal = target_bitrate_;
|
| - HRESULT hr = codec_api_->SetValue(&CODECAPI_AVEncCommonMeanBitRate, &var);
|
| - RETURN_ON_HR_FAILURE(hr, "Couldn't set bitrate", );
|
| -
|
| - hr = imf_output_media_type_->SetUINT32(MF_MT_AVG_BITRATE, target_bitrate_);
|
| - RETURN_ON_HR_FAILURE(hr, "Couldn't set bitrate", );
|
| - hr = MFSetAttributeRatio(imf_output_media_type_.get(), MF_MT_FRAME_RATE,
|
| - frame_rate_, kMaxFrameRateDenominator);
|
| - RETURN_ON_HR_FAILURE(hr, "Couldn't set output type params", );
|
| + frame_rate_ =
|
| + framerate
|
| + ? std::min(framerate, static_cast<uint32_t>(kMaxFrameRateNumerator))
|
| + : 1;
|
| +
|
| + if (target_bitrate_ != bitrate) {
|
| + target_bitrate_ = bitrate ? bitrate : 1;
|
| + VARIANT var;
|
| + var.vt = VT_UI4;
|
| + var.ulVal = target_bitrate_;
|
| + HRESULT hr = codec_api_->SetValue(&CODECAPI_AVEncCommonMeanBitRate, &var);
|
| + RETURN_ON_HR_FAILURE(hr, "Couldn't set bitrate", );
|
| + }
|
| }
|
|
|
| void MediaFoundationVideoEncodeAccelerator::DestroyTask() {
|
|
|