OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2016 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "media/gpu/media_foundation_video_encode_accelerator_win.h" | |
6 | |
7 #if defined(OS_WIN) | |
8 #pragma warning(push) | |
9 #pragma warning(disable : 4800) // Disable warning for added padding. | |
10 #endif // !defined(OS_WIN) | |
11 | |
12 #include <codecapi.h> | |
13 #include <mferror.h> | |
14 #include <mftransform.h> | |
15 | |
16 #include <utility> | |
17 #include <vector> | |
18 | |
19 #include "base/threading/sequenced_task_runner_handle.h" | |
20 #include "base/win/scoped_co_mem.h" | |
21 #include "base/win/scoped_variant.h" | |
22 #include "base/win/windows_version.h" | |
23 #include "media/base/win/mf_helpers.h" | |
24 #include "media/base/win/mf_initializer.h" | |
25 #include "third_party/libyuv/include/libyuv.h" | |
26 | |
27 using base::win::ScopedComPtr; | |
28 using media::mf::MediaBufferScopedPointer; | |
29 | |
30 namespace media { | |
31 | |
32 namespace { | |
33 | |
34 const size_t kMaxFrameRateNumerator = 30; | |
35 const size_t kMaxFrameRateDenominator = 1; | |
36 const size_t kMaxResolutionWidth = 4096; | |
37 const size_t kMaxResolutionHeight = 2160; | |
38 const size_t kNumInputBuffers = 3; | |
39 const size_t kOneSecondInMicroseconds = 1000000; | |
40 const size_t kOutputSampleBufferSizeRatio = 4; | |
41 | |
42 static const wchar_t* const kMediaFoundationVideoEncoderDLLs[] = { | |
grt (UTC plus 2)
2016/07/18 08:35:04
nit: no "static" in the unnamed namespace. please
emircan
2016/07/18 20:45:22
Done.
| |
43 L"mf.dll", L"mfplat.dll", | |
44 }; | |
45 | |
46 } // namespace | |
47 | |
48 class MediaFoundationVideoEncodeAccelerator::EncodeOutput { | |
49 public: | |
50 EncodeOutput(uint32_t size, bool key_frame, base::TimeDelta timestamp) | |
51 : keyframe(key_frame), capture_timestamp(timestamp), data_(size) {} | |
52 | |
53 uint8_t* memory() { return data_.data(); } | |
54 int size() const { return static_cast<int>(data_.size()); } | |
55 const bool keyframe; | |
56 const base::TimeDelta capture_timestamp; | |
57 | |
58 private: | |
59 std::vector<uint8_t> data_; | |
60 DISALLOW_COPY_AND_ASSIGN(EncodeOutput); | |
61 }; | |
62 | |
63 struct MediaFoundationVideoEncodeAccelerator::BitstreamBufferRef { | |
64 BitstreamBufferRef(int32_t id, | |
65 std::unique_ptr<base::SharedMemory> shm, | |
66 size_t size) | |
67 : id(id), shm(std::move(shm)), size(size) {} | |
68 const int32_t id; | |
69 const std::unique_ptr<base::SharedMemory> shm; | |
70 const size_t size; | |
71 | |
72 private: | |
73 DISALLOW_IMPLICIT_CONSTRUCTORS(BitstreamBufferRef); | |
74 }; | |
75 | |
76 MediaFoundationVideoEncodeAccelerator::MediaFoundationVideoEncodeAccelerator() | |
77 : client_task_runner_(base::SequencedTaskRunnerHandle::Get()), | |
78 encoder_thread_("MFEncoderThread"), | |
79 encoder_task_weak_factory_(this) {} | |
80 | |
81 MediaFoundationVideoEncodeAccelerator:: | |
82 ~MediaFoundationVideoEncodeAccelerator() { | |
83 DVLOG(3) << __FUNCTION__; | |
84 DCHECK(sequence_checker_.CalledOnValidSequencedThread()); | |
85 | |
86 DCHECK(!encoder_thread_.IsRunning()); | |
87 DCHECK(!encoder_task_weak_factory_.HasWeakPtrs()); | |
88 } | |
89 | |
90 VideoEncodeAccelerator::SupportedProfiles | |
91 MediaFoundationVideoEncodeAccelerator::GetSupportedProfiles() { | |
92 DVLOG(3) << __FUNCTION__; | |
93 DCHECK(sequence_checker_.CalledOnValidSequencedThread()); | |
94 | |
95 SupportedProfiles profiles; | |
96 if (base::win::GetVersion() < base::win::VERSION_WIN8) { | |
97 DLOG(ERROR) << "Windows versions earlier than 8 are not supported."; | |
98 return profiles; | |
99 } | |
100 | |
101 SupportedProfile profile; | |
102 // More profiles can be supported here, but they should be available in SW | |
103 // fallback as well. | |
104 profile.profile = H264PROFILE_BASELINE; | |
105 profile.max_framerate_numerator = kMaxFrameRateNumerator; | |
106 profile.max_framerate_denominator = kMaxFrameRateDenominator; | |
107 profile.max_resolution = gfx::Size(kMaxResolutionWidth, kMaxResolutionHeight); | |
108 profiles.push_back(profile); | |
109 return profiles; | |
110 } | |
111 | |
112 bool MediaFoundationVideoEncodeAccelerator::Initialize( | |
113 VideoPixelFormat format, | |
114 const gfx::Size& input_visible_size, | |
115 VideoCodecProfile output_profile, | |
116 uint32_t initial_bitrate, | |
117 Client* client) { | |
118 DVLOG(3) << __FUNCTION__ | |
119 << ": input_format=" << VideoPixelFormatToString(format) | |
120 << ", input_visible_size=" << input_visible_size.ToString() | |
121 << ", output_profile=" << output_profile | |
122 << ", initial_bitrate=" << initial_bitrate; | |
123 DCHECK(sequence_checker_.CalledOnValidSequencedThread()); | |
124 | |
125 if (PIXEL_FORMAT_I420 != format) { | |
126 DLOG(ERROR) << "Input format not supported= " | |
127 << VideoPixelFormatToString(format); | |
128 return false; | |
129 } | |
130 | |
131 if (H264PROFILE_BASELINE != output_profile) { | |
132 DLOG(ERROR) << "Output profile not supported= " << output_profile; | |
133 return false; | |
134 } | |
135 | |
136 for (const wchar_t* mfdll : kMediaFoundationVideoEncoderDLLs) { | |
137 HMODULE dll = ::GetModuleHandle(mfdll); | |
138 if (!dll) { | |
139 DLOG(ERROR) << mfdll << " is required for encoding"; | |
140 return false; | |
141 } | |
142 } | |
143 | |
144 encoder_thread_.init_com_with_mta(false); | |
145 if (!encoder_thread_.Start()) { | |
146 DLOG(ERROR) << "Failed spawning encoder thread."; | |
147 return false; | |
148 } | |
149 encoder_thread_task_runner_ = encoder_thread_.task_runner(); | |
150 | |
151 InitializeMediaFoundation(); | |
152 | |
153 uint32_t flags = MFT_ENUM_FLAG_HARDWARE | MFT_ENUM_FLAG_SORTANDFILTER; | |
154 MFT_REGISTER_TYPE_INFO input_info; | |
155 input_info.guidMajorType = MFMediaType_Video; | |
156 input_info.guidSubtype = MFVideoFormat_NV12; | |
157 MFT_REGISTER_TYPE_INFO output_info; | |
158 output_info.guidMajorType = MFMediaType_Video; | |
159 output_info.guidSubtype = MFVideoFormat_H264; | |
160 | |
161 base::win::ScopedCoMem<CLSID> CLSIDs; | |
162 uint32_t count = 0; | |
163 HRESULT hr = MFTEnum(MFT_CATEGORY_VIDEO_ENCODER, flags, NULL, &output_info, | |
164 NULL, &CLSIDs, &count); | |
165 RETURN_ON_HR_FAILURE(hr, "Couldn't enumerate hardware encoder", false); | |
166 RETURN_ON_FAILURE((count > 0), "No HW encoder found", false); | |
167 DVLOG(3) << "HW encoder(s) found: " << count; | |
168 hr = encoder_.CreateInstance(CLSIDs[0]); | |
169 RETURN_ON_HR_FAILURE(hr, "Couldn't activate hardware encoder", false); | |
170 | |
171 client_ptr_factory_.reset(new base::WeakPtrFactory<Client>(client)); | |
172 client_ = client_ptr_factory_->GetWeakPtr(); | |
173 input_visible_size_ = input_visible_size; | |
174 frame_rate_ = kMaxFrameRateNumerator / kMaxFrameRateDenominator; | |
175 target_bitrate_ = initial_bitrate; | |
176 bitstream_buffer_size_ = input_visible_size.GetArea(); | |
177 | |
178 u_plane_offset_ = | |
179 VideoFrame::PlaneSize(PIXEL_FORMAT_I420, VideoFrame::kYPlane, | |
180 input_visible_size_) | |
181 .GetArea(); | |
182 v_plane_offset_ = | |
183 u_plane_offset_ + | |
184 VideoFrame::PlaneSize(PIXEL_FORMAT_I420, VideoFrame::kUPlane, | |
185 input_visible_size_) | |
186 .GetArea(); | |
187 | |
188 if (!InitializeInputOutputSamples()) { | |
189 DLOG(ERROR) << "Failed initializing input-output samples."; | |
190 return false; | |
191 } | |
192 | |
193 if (!SetEncoderModes()) { | |
194 DLOG(ERROR) << "Failed setting encoder parameters."; | |
195 return false; | |
196 } | |
197 | |
198 hr = encoder_->ProcessMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, NULL); | |
199 RETURN_ON_HR_FAILURE(hr, "Couldn't set ProcessMessage", false); | |
200 | |
201 client_task_runner_->PostTask( | |
202 FROM_HERE, | |
203 base::Bind(&Client::RequireBitstreamBuffers, client_, kNumInputBuffers, | |
204 input_visible_size_, bitstream_buffer_size_)); | |
205 return SUCCEEDED(hr); | |
206 } | |
207 | |
208 void MediaFoundationVideoEncodeAccelerator::Encode( | |
209 const scoped_refptr<VideoFrame>& frame, | |
210 bool force_keyframe) { | |
211 DVLOG(3) << __FUNCTION__; | |
212 DCHECK(sequence_checker_.CalledOnValidSequencedThread()); | |
213 | |
214 encoder_thread_task_runner_->PostTask( | |
215 FROM_HERE, base::Bind(&MediaFoundationVideoEncodeAccelerator::EncodeTask, | |
216 encoder_task_weak_factory_.GetWeakPtr(), frame, | |
217 force_keyframe)); | |
218 } | |
219 | |
220 void MediaFoundationVideoEncodeAccelerator::UseOutputBitstreamBuffer( | |
221 const BitstreamBuffer& buffer) { | |
222 DVLOG(3) << __FUNCTION__ << ": buffer size=" << buffer.size(); | |
223 DCHECK(sequence_checker_.CalledOnValidSequencedThread()); | |
224 | |
225 if (buffer.size() < bitstream_buffer_size_) { | |
226 DLOG(ERROR) << "Output BitstreamBuffer isn't big enough: " << buffer.size() | |
227 << " vs. " << bitstream_buffer_size_; | |
228 client_->NotifyError(kInvalidArgumentError); | |
229 return; | |
230 } | |
231 | |
232 std::unique_ptr<base::SharedMemory> shm( | |
233 new base::SharedMemory(buffer.handle(), false)); | |
234 if (!shm->Map(buffer.size())) { | |
235 DLOG(ERROR) << "Failed mapping shared memory."; | |
236 client_->NotifyError(kPlatformFailureError); | |
237 return; | |
238 } | |
239 | |
240 std::unique_ptr<BitstreamBufferRef> buffer_ref( | |
241 new BitstreamBufferRef(buffer.id(), std::move(shm), buffer.size())); | |
242 encoder_thread_task_runner_->PostTask( | |
243 FROM_HERE, | |
244 base::Bind( | |
245 &MediaFoundationVideoEncodeAccelerator::UseOutputBitstreamBufferTask, | |
246 encoder_task_weak_factory_.GetWeakPtr(), base::Passed(&buffer_ref))); | |
247 } | |
248 | |
249 void MediaFoundationVideoEncodeAccelerator::RequestEncodingParametersChange( | |
250 uint32_t bitrate, | |
251 uint32_t framerate) { | |
252 DVLOG(3) << __FUNCTION__ << ": bitrate=" << bitrate | |
253 << ": framerate=" << framerate; | |
254 DCHECK(sequence_checker_.CalledOnValidSequencedThread()); | |
255 | |
256 encoder_thread_task_runner_->PostTask( | |
257 FROM_HERE, | |
258 base::Bind(&MediaFoundationVideoEncodeAccelerator:: | |
259 RequestEncodingParametersChangeTask, | |
260 encoder_task_weak_factory_.GetWeakPtr(), bitrate, framerate)); | |
261 } | |
262 | |
263 void MediaFoundationVideoEncodeAccelerator::Destroy() { | |
264 DVLOG(3) << __FUNCTION__; | |
265 DCHECK(sequence_checker_.CalledOnValidSequencedThread()); | |
266 | |
267 // Cancel all callbacks. | |
268 client_ptr_factory_.reset(); | |
269 | |
270 if (encoder_thread_.IsRunning()) { | |
271 encoder_thread_task_runner_->PostTask( | |
grt (UTC plus 2)
2016/07/18 08:35:04
could there potentially be many tasks queued up on
emircan
2016/07/18 20:45:22
I think it makes more sense to finish those tasks
| |
272 FROM_HERE, | |
273 base::Bind(&MediaFoundationVideoEncodeAccelerator::DestroyTask, | |
274 encoder_task_weak_factory_.GetWeakPtr())); | |
275 encoder_thread_.Stop(); | |
276 } | |
277 | |
278 delete this; | |
279 } | |
280 | |
281 // static | |
282 void MediaFoundationVideoEncodeAccelerator::PreSandboxInitialization() { | |
283 for (const wchar_t* mfdll : kMediaFoundationVideoEncoderDLLs) | |
284 ::LoadLibrary(mfdll); | |
grt (UTC plus 2)
2016/07/18 08:35:04
is there any use in logging failures here in debug
emircan
2016/07/18 20:45:22
l.139 would add the logs indicating missing dll.
| |
285 } | |
286 | |
287 bool MediaFoundationVideoEncodeAccelerator::InitializeInputOutputSamples() { | |
288 DCHECK(sequence_checker_.CalledOnValidSequencedThread()); | |
289 | |
290 HRESULT hr = encoder_->GetStreamLimits( | |
291 &input_stream_count_min_, &input_stream_count_max_, | |
292 &output_stream_count_min_, &output_stream_count_max_); | |
293 RETURN_ON_HR_FAILURE(hr, "Couldn't query stream limits", false); | |
294 DVLOG(3) << "Stream limits: " << input_stream_count_min_ << "," | |
295 << input_stream_count_max_ << "," << output_stream_count_min_ << "," | |
296 << output_stream_count_max_; | |
297 | |
298 // Initialize output parameters. | |
299 base::win::ScopedComPtr<IMFMediaType> imf_output_media_type; | |
300 hr = MFCreateMediaType(imf_output_media_type.Receive()); | |
301 RETURN_ON_HR_FAILURE(hr, "Couldn't create media type", false); | |
302 hr = imf_output_media_type->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video); | |
303 hr |= imf_output_media_type->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_H264); | |
304 hr |= imf_output_media_type->SetUINT32(MF_MT_AVG_BITRATE, target_bitrate_); | |
305 hr |= MFSetAttributeRatio(imf_output_media_type.get(), MF_MT_FRAME_RATE, | |
306 frame_rate_, kMaxFrameRateDenominator); | |
307 hr |= MFSetAttributeSize(imf_output_media_type.get(), MF_MT_FRAME_SIZE, | |
308 input_visible_size_.width(), | |
309 input_visible_size_.height()); | |
310 hr |= imf_output_media_type->SetUINT32(MF_MT_INTERLACE_MODE, | |
311 MFVideoInterlace_Progressive); | |
312 hr |= imf_output_media_type->SetUINT32(MF_MT_MPEG2_PROFILE, | |
313 eAVEncH264VProfile_Base); | |
314 RETURN_ON_HR_FAILURE(hr, "Couldn't set output params", false); | |
grt (UTC plus 2)
2016/07/18 08:35:04
|hr| may be gibberish by the time you reach here d
emircan
2016/07/18 20:45:22
I am removing all |='s and treat all HResult indiv
| |
315 hr = encoder_->SetOutputType(0, imf_output_media_type.get(), 0); | |
316 RETURN_ON_HR_FAILURE(hr, "Couldn't set output media type", false); | |
317 | |
318 // Initialize input parameters. | |
319 base::win::ScopedComPtr<IMFMediaType> imf_input_media_type; | |
320 hr = MFCreateMediaType(imf_input_media_type.Receive()); | |
321 RETURN_ON_HR_FAILURE(hr, "Couldn't create media type", false); | |
322 hr = imf_input_media_type->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video); | |
323 hr |= imf_input_media_type->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_YV12); | |
324 hr |= MFSetAttributeRatio(imf_input_media_type.get(), MF_MT_FRAME_RATE, | |
325 frame_rate_, kMaxFrameRateDenominator); | |
326 hr |= MFSetAttributeSize(imf_input_media_type.get(), MF_MT_FRAME_SIZE, | |
327 input_visible_size_.width(), | |
328 input_visible_size_.height()); | |
329 hr |= imf_input_media_type->SetUINT32(MF_MT_INTERLACE_MODE, | |
330 MFVideoInterlace_Progressive); | |
331 RETURN_ON_HR_FAILURE(hr, "Couldn't set input params", false); | |
332 hr = encoder_->SetInputType(0, imf_input_media_type.get(), 0); | |
333 RETURN_ON_HR_FAILURE(hr, "Couldn't set input media type", false); | |
334 | |
335 input_sample_.Attach(mf::CreateEmptySampleWithBuffer( | |
336 VideoFrame::AllocationSize(PIXEL_FORMAT_I420, input_visible_size_), 2)); | |
337 output_sample_.Attach(mf::CreateEmptySampleWithBuffer( | |
338 bitstream_buffer_size_ * kOutputSampleBufferSizeRatio, 2)); | |
339 | |
340 return SUCCEEDED(hr); | |
341 } | |
342 | |
343 bool MediaFoundationVideoEncodeAccelerator::SetEncoderModes() { | |
344 DCHECK(sequence_checker_.CalledOnValidSequencedThread()); | |
345 | |
346 HRESULT hr = encoder_.QueryInterface(IID_ICodecAPI, codec_api_.ReceiveVoid()); | |
347 RETURN_ON_HR_FAILURE(hr, "Couldn't get ICodecAPI", false); | |
348 VARIANT var; | |
349 var.vt = VT_UI4; | |
350 var.ulVal = eAVEncCommonRateControlMode_CBR; | |
351 hr = codec_api_->SetValue(&CODECAPI_AVEncCommonRateControlMode, &var); | |
352 RETURN_ON_HR_FAILURE(hr, "Couldn't set CommonRateControlMode", false); | |
353 var.ulVal = target_bitrate_; | |
354 hr = codec_api_->SetValue(&CODECAPI_AVEncCommonMeanBitRate, &var); | |
355 RETURN_ON_HR_FAILURE(hr, "Couldn't set bitrate", false); | |
356 var.ulVal = eAVEncAdaptiveMode_FrameRate; | |
357 hr = codec_api_->SetValue(&CODECAPI_AVEncAdaptiveMode, &var); | |
358 RETURN_ON_HR_FAILURE(hr, "Couldn't set FrameRate", false); | |
359 var.vt = VT_BOOL; | |
360 var.boolVal = VARIANT_TRUE; | |
361 hr = codec_api_->SetValue(&CODECAPI_AVLowLatencyMode, &var); | |
362 RETURN_ON_HR_FAILURE(hr, "Couldn't set LowLatencyMode", false); | |
363 return SUCCEEDED(hr); | |
364 } | |
365 | |
366 void MediaFoundationVideoEncodeAccelerator::EncodeTask( | |
367 const scoped_refptr<VideoFrame>& frame, | |
368 bool force_keyframe) { | |
369 DVLOG(3) << __FUNCTION__; | |
370 DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread()); | |
371 | |
372 if (!encoder_) | |
373 return; | |
374 | |
375 base::win::ScopedComPtr<IMFMediaBuffer> input_buffer; | |
376 input_sample_->GetBufferByIndex(0, input_buffer.Receive()); | |
377 | |
378 { | |
379 MediaBufferScopedPointer scoped_buffer(input_buffer.get()); | |
380 DCHECK(scoped_buffer.get()); | |
381 libyuv::I420Copy(frame->visible_data(VideoFrame::kYPlane), | |
382 frame->stride(VideoFrame::kYPlane), | |
383 frame->visible_data(VideoFrame::kVPlane), | |
384 frame->stride(VideoFrame::kVPlane), | |
385 frame->visible_data(VideoFrame::kUPlane), | |
386 frame->stride(VideoFrame::kUPlane), scoped_buffer.get(), | |
387 frame->stride(VideoFrame::kYPlane), | |
388 scoped_buffer.get() + u_plane_offset_, | |
389 frame->stride(VideoFrame::kUPlane), | |
390 scoped_buffer.get() + v_plane_offset_, | |
391 frame->stride(VideoFrame::kVPlane), | |
392 input_visible_size_.width(), input_visible_size_.height()); | |
393 } | |
394 | |
395 input_sample_->SetSampleTime(frame->timestamp().InMicroseconds() * 10); | |
396 input_sample_->SetSampleDuration(kOneSecondInMicroseconds / frame_rate_); | |
397 HRESULT hr = encoder_->ProcessInput(0, input_sample_.get(), 0); | |
398 // According to MSDN, if encoder returns MF_E_NOTACCEPTING, we need to try | |
399 // processing the output. This error indicates that encoder does not accept | |
400 // any more input data. | |
401 if (hr == MF_E_NOTACCEPTING) { | |
402 DVLOG(3) << "MF_E_NOTACCEPTING"; | |
403 ProcessOutput(); | |
404 hr = encoder_->ProcessInput(0, input_sample_.get(), 0); | |
405 if (hr == MF_E_NOTACCEPTING) { | |
406 encoder_thread_task_runner_->PostTask( | |
407 FROM_HERE, | |
408 base::Bind(&MediaFoundationVideoEncodeAccelerator::EncodeTask, | |
409 encoder_task_weak_factory_.GetWeakPtr(), frame, | |
410 force_keyframe)); | |
411 } else { | |
412 RETURN_ON_HR_FAILURE(hr, "Couldn't encode", ); | |
413 } | |
414 } else { | |
415 RETURN_ON_HR_FAILURE(hr, "Couldn't encode", ); | |
416 } | |
417 DVLOG(3) << "Sent for encode " << hr; | |
418 | |
419 ProcessOutput(); | |
420 } | |
421 | |
422 void MediaFoundationVideoEncodeAccelerator::ProcessOutput() { | |
423 DVLOG(3) << __FUNCTION__; | |
424 DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread()); | |
425 | |
426 MFT_OUTPUT_DATA_BUFFER output_data_buffer = {0}; | |
427 output_data_buffer.dwStreamID = 0; | |
428 output_data_buffer.dwStatus = 0; | |
429 output_data_buffer.pEvents = NULL; | |
430 output_data_buffer.pSample = output_sample_.get(); | |
431 DWORD status = 0; | |
432 HRESULT hr = encoder_->ProcessOutput(0, 1, &output_data_buffer, &status); | |
433 if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) { | |
434 DVLOG(3) << "MF_E_TRANSFORM_NEED_MORE_INPUT"; | |
435 return; | |
436 } | |
437 RETURN_ON_HR_FAILURE(hr, "Couldn't get encoded data", ); | |
438 DVLOG(3) << "Got encoded data " << hr; | |
439 | |
440 base::win::ScopedComPtr<IMFMediaBuffer> output_buffer; | |
441 hr = output_sample_->GetBufferByIndex(0, output_buffer.Receive()); | |
442 RETURN_ON_HR_FAILURE(hr, "Couldn't get buffer by index", ); | |
443 DWORD size = 0; | |
444 hr = output_buffer->GetCurrentLength(&size); | |
445 RETURN_ON_HR_FAILURE(hr, "Couldn't get buffer length", ); | |
446 | |
447 const bool keyframe = MFGetAttributeUINT32( | |
448 output_sample_.get(), MFSampleExtension_CleanPoint, false); | |
449 DVLOG(3) << "We HAVE encoded data with size:" << size << " keyframe " | |
450 << keyframe; | |
451 | |
452 if (bitstream_buffer_queue_.empty()) { | |
453 DVLOG(3) << "No bitstream buffers."; | |
454 // We need to copy the output so that encoding can continue. | |
455 std::unique_ptr<EncodeOutput> encode_output( | |
456 new EncodeOutput(size, keyframe, base::Time::Now() - base::Time())); | |
457 { | |
458 MediaBufferScopedPointer scoped_buffer(output_buffer.get()); | |
459 memcpy(encode_output->memory(), scoped_buffer.get(), size); | |
460 } | |
461 encoder_output_queue_.push_back(std::move(encode_output)); | |
462 return; | |
463 } | |
464 | |
465 std::unique_ptr<MediaFoundationVideoEncodeAccelerator::BitstreamBufferRef> | |
466 buffer_ref = std::move(bitstream_buffer_queue_.front()); | |
467 bitstream_buffer_queue_.pop_front(); | |
468 | |
469 { | |
470 MediaBufferScopedPointer scoped_buffer(output_buffer.get()); | |
471 memcpy(buffer_ref->shm->memory(), scoped_buffer.get(), size); | |
472 } | |
473 | |
474 client_task_runner_->PostTask( | |
475 FROM_HERE, | |
476 base::Bind(&Client::BitstreamBufferReady, client_, buffer_ref->id, size, | |
477 keyframe, base::Time::Now() - base::Time())); | |
478 | |
479 // Keep calling ProcessOutput recursively until MF_E_TRANSFORM_NEED_MORE_INPUT | |
grt (UTC plus 2)
2016/07/18 08:35:04
how deep could this recursion go?
emircan
2016/07/18 20:45:22
ProcessOutput() might have 3 outcomes:
- HR succee
| |
480 // is returned to flush out all the output. | |
481 ProcessOutput(); | |
482 } | |
483 | |
484 void MediaFoundationVideoEncodeAccelerator::UseOutputBitstreamBufferTask( | |
485 std::unique_ptr<BitstreamBufferRef> buffer_ref) { | |
486 DVLOG(3) << __FUNCTION__; | |
487 DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread()); | |
488 | |
489 // If there is already EncodeOutput waiting, copy its output first. | |
490 if (!encoder_output_queue_.empty()) { | |
491 std::unique_ptr<MediaFoundationVideoEncodeAccelerator::EncodeOutput> | |
492 encode_output = std::move(encoder_output_queue_.front()); | |
493 encoder_output_queue_.pop_front(); | |
494 ReturnBitstreamBuffer(std::move(encode_output), std::move(buffer_ref)); | |
495 return; | |
496 } | |
497 | |
498 bitstream_buffer_queue_.push_back(std::move(buffer_ref)); | |
499 } | |
500 | |
501 void MediaFoundationVideoEncodeAccelerator::ReturnBitstreamBuffer( | |
502 std::unique_ptr<EncodeOutput> encode_output, | |
503 std::unique_ptr<MediaFoundationVideoEncodeAccelerator::BitstreamBufferRef> | |
504 buffer_ref) { | |
505 DVLOG(3) << __FUNCTION__; | |
506 DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread()); | |
507 | |
508 memcpy(buffer_ref->shm->memory(), encode_output->memory(), | |
509 encode_output->size()); | |
510 client_task_runner_->PostTask( | |
511 FROM_HERE, | |
512 base::Bind(&Client::BitstreamBufferReady, client_, buffer_ref->id, | |
513 encode_output->size(), encode_output->keyframe, | |
514 encode_output->capture_timestamp)); | |
515 } | |
516 | |
517 void MediaFoundationVideoEncodeAccelerator::RequestEncodingParametersChangeTask( | |
518 uint32_t bitrate, | |
519 uint32_t framerate) { | |
520 DVLOG(3) << __FUNCTION__; | |
521 DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread()); | |
522 | |
523 frame_rate_ = framerate ? framerate : 1; | |
524 target_bitrate_ = bitrate ? bitrate : 1; | |
525 | |
526 VARIANT var; | |
527 var.vt = VT_UI4; | |
528 var.ulVal = target_bitrate_; | |
529 HRESULT hr = codec_api_->SetValue(&CODECAPI_AVEncCommonMeanBitRate, &var); | |
530 RETURN_ON_HR_FAILURE(hr, "Couldn't set bitrate", ); | |
531 | |
532 base::win::ScopedComPtr<IMFMediaType> imf_output_media_type; | |
533 hr = MFCreateMediaType(imf_output_media_type.Receive()); | |
534 RETURN_ON_HR_FAILURE(hr, "Couldn't create output media type", ); | |
535 hr = imf_output_media_type->SetUINT32(MF_MT_AVG_BITRATE, target_bitrate_); | |
536 hr |= MFSetAttributeRatio(imf_output_media_type.get(), MF_MT_FRAME_RATE, | |
537 frame_rate_, kMaxFrameRateDenominator); | |
538 RETURN_ON_HR_FAILURE(hr, "Couldn't set output type params", ); | |
539 } | |
540 | |
541 void MediaFoundationVideoEncodeAccelerator::DestroyTask() { | |
542 DVLOG(3) << __FUNCTION__; | |
543 DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread()); | |
544 | |
545 // Cancel all encoder thread callbacks. | |
546 encoder_task_weak_factory_.InvalidateWeakPtrs(); | |
547 | |
548 encoder_.Release(); | |
549 } | |
550 | |
551 } // namespace content | |
OLD | NEW |