OLD | NEW |
| (Empty) |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "content/renderer/media/rtc_video_encoder.h" | |
6 | |
7 #include <string.h> | |
8 | |
9 #include "base/bind.h" | |
10 #include "base/location.h" | |
11 #include "base/logging.h" | |
12 #include "base/macros.h" | |
13 #include "base/memory/scoped_vector.h" | |
14 #include "base/metrics/histogram.h" | |
15 #include "base/numerics/safe_conversions.h" | |
16 #include "base/rand_util.h" | |
17 #include "base/single_thread_task_runner.h" | |
18 #include "base/synchronization/lock.h" | |
19 #include "base/synchronization/waitable_event.h" | |
20 #include "base/threading/thread_task_runner_handle.h" | |
21 #include "media/base/bind_to_current_loop.h" | |
22 #include "media/base/bitstream_buffer.h" | |
23 #include "media/base/video_frame.h" | |
24 #include "media/base/video_util.h" | |
25 #include "media/filters/h264_parser.h" | |
26 #include "media/renderers/gpu_video_accelerator_factories.h" | |
27 #include "media/video/video_encode_accelerator.h" | |
28 #include "third_party/libyuv/include/libyuv.h" | |
29 #include "third_party/webrtc/base/timeutils.h" | |
30 | |
31 namespace content { | |
32 | |
33 namespace { | |
34 | |
35 // Translate from webrtc::VideoCodecType and webrtc::VideoCodec to | |
36 // media::VideoCodecProfile. | |
37 media::VideoCodecProfile WebRTCVideoCodecToVideoCodecProfile( | |
38 webrtc::VideoCodecType type, | |
39 const webrtc::VideoCodec* codec_settings) { | |
40 DCHECK_EQ(type, codec_settings->codecType); | |
41 switch (type) { | |
42 case webrtc::kVideoCodecVP8: | |
43 return media::VP8PROFILE_ANY; | |
44 case webrtc::kVideoCodecH264: { | |
45 switch (codec_settings->codecSpecific.H264.profile) { | |
46 case webrtc::kProfileBase: | |
47 return media::H264PROFILE_BASELINE; | |
48 case webrtc::kProfileMain: | |
49 return media::H264PROFILE_MAIN; | |
50 } | |
51 } | |
52 default: | |
53 NOTREACHED() << "Unrecognized video codec type"; | |
54 return media::VIDEO_CODEC_PROFILE_UNKNOWN; | |
55 } | |
56 } | |
57 | |
58 // Populates struct webrtc::RTPFragmentationHeader for H264 codec. | |
59 // Each entry specifies the offset and length (excluding start code) of a NALU. | |
60 // Returns true if successful. | |
61 bool GetRTPFragmentationHeaderH264(webrtc::RTPFragmentationHeader* header, | |
62 const uint8_t* data, uint32_t length) { | |
63 media::H264Parser parser; | |
64 parser.SetStream(data, length); | |
65 | |
66 std::vector<media::H264NALU> nalu_vector; | |
67 while (true) { | |
68 media::H264NALU nalu; | |
69 const media::H264Parser::Result result = parser.AdvanceToNextNALU(&nalu); | |
70 if (result == media::H264Parser::kOk) { | |
71 nalu_vector.push_back(nalu); | |
72 } else if (result == media::H264Parser::kEOStream) { | |
73 break; | |
74 } else { | |
75 DLOG(ERROR) << "Unexpected H264 parser result"; | |
76 return false; | |
77 } | |
78 } | |
79 | |
80 header->VerifyAndAllocateFragmentationHeader(nalu_vector.size()); | |
81 for (size_t i = 0; i < nalu_vector.size(); ++i) { | |
82 header->fragmentationOffset[i] = nalu_vector[i].data - data; | |
83 header->fragmentationLength[i] = nalu_vector[i].size; | |
84 header->fragmentationPlType[i] = 0; | |
85 header->fragmentationTimeDiff[i] = 0; | |
86 } | |
87 return true; | |
88 } | |
89 | |
90 } // namespace | |
91 | |
92 // This private class of RTCVideoEncoder does the actual work of communicating | |
93 // with a media::VideoEncodeAccelerator for handling video encoding. It can | |
94 // be created on any thread, but should subsequently be posted to (and Destroy() | |
95 // called on) a single thread. | |
96 // | |
97 // This class separates state related to the thread that RTCVideoEncoder | |
98 // operates on from the thread that |gpu_factories_| provides for accelerator | |
99 // operations (presently the media thread). | |
100 class RTCVideoEncoder::Impl | |
101 : public media::VideoEncodeAccelerator::Client, | |
102 public base::RefCountedThreadSafe<RTCVideoEncoder::Impl> { | |
103 public: | |
104 Impl(media::GpuVideoAcceleratorFactories* gpu_factories, | |
105 webrtc::VideoCodecType video_codec_type); | |
106 | |
107 // Create the VEA and call Initialize() on it. Called once per instantiation, | |
108 // and then the instance is bound forevermore to whichever thread made the | |
109 // call. | |
110 // RTCVideoEncoder expects to be able to call this function synchronously from | |
111 // its own thread, hence the |async_waiter| and |async_retval| arguments. | |
112 void CreateAndInitializeVEA(const gfx::Size& input_visible_size, | |
113 uint32_t bitrate, | |
114 media::VideoCodecProfile profile, | |
115 base::WaitableEvent* async_waiter, | |
116 int32_t* async_retval); | |
117 // Enqueue a frame from WebRTC for encoding. | |
118 // RTCVideoEncoder expects to be able to call this function synchronously from | |
119 // its own thread, hence the |async_waiter| and |async_retval| arguments. | |
120 void Enqueue(const webrtc::VideoFrame* input_frame, | |
121 bool force_keyframe, | |
122 base::WaitableEvent* async_waiter, | |
123 int32_t* async_retval); | |
124 | |
125 // RTCVideoEncoder is given a buffer to be passed to WebRTC through the | |
126 // RTCVideoEncoder::ReturnEncodedImage() function. When that is complete, | |
127 // the buffer is returned to Impl by its index using this function. | |
128 void UseOutputBitstreamBufferId(int32_t bitstream_buffer_id); | |
129 | |
130 // Request encoding parameter change for the underlying encoder. | |
131 void RequestEncodingParametersChange(uint32_t bitrate, uint32_t framerate); | |
132 | |
133 void RegisterEncodeCompleteCallback(base::WaitableEvent* async_waiter, | |
134 int32_t* async_retval, | |
135 webrtc::EncodedImageCallback* callback); | |
136 | |
137 // Destroy this Impl's encoder. The destructor is not explicitly called, as | |
138 // Impl is a base::RefCountedThreadSafe. | |
139 void Destroy(base::WaitableEvent* async_waiter); | |
140 | |
141 // Return the status of Impl. One of WEBRTC_VIDEO_CODEC_XXX value. | |
142 int32_t GetStatus() const; | |
143 | |
144 webrtc::VideoCodecType video_codec_type() { return video_codec_type_; } | |
145 | |
146 // media::VideoEncodeAccelerator::Client implementation. | |
147 void RequireBitstreamBuffers(unsigned int input_count, | |
148 const gfx::Size& input_coded_size, | |
149 size_t output_buffer_size) override; | |
150 void BitstreamBufferReady(int32_t bitstream_buffer_id, | |
151 size_t payload_size, | |
152 bool key_frame, | |
153 base::TimeDelta timestamp) override; | |
154 void NotifyError(media::VideoEncodeAccelerator::Error error) override; | |
155 | |
156 private: | |
157 friend class base::RefCountedThreadSafe<Impl>; | |
158 | |
159 enum { | |
160 kInputBufferExtraCount = 1, // The number of input buffers allocated, more | |
161 // than what is requested by | |
162 // VEA::RequireBitstreamBuffers(). | |
163 kOutputBufferCount = 3, | |
164 }; | |
165 | |
166 ~Impl() override; | |
167 | |
168 // Logs the |error| and |str| sent from |location| and NotifyError()s forward. | |
169 void LogAndNotifyError(const tracked_objects::Location& location, | |
170 const std::string& str, | |
171 media::VideoEncodeAccelerator::Error error); | |
172 | |
173 // Perform encoding on an input frame from the input queue. | |
174 void EncodeOneFrame(); | |
175 | |
176 // Notify that an input frame is finished for encoding. |index| is the index | |
177 // of the completed frame in |input_buffers_|. | |
178 void EncodeFrameFinished(int index); | |
179 | |
180 // Set up/signal |async_waiter_| and |async_retval_|; see declarations below. | |
181 void RegisterAsyncWaiter(base::WaitableEvent* waiter, int32_t* retval); | |
182 void SignalAsyncWaiter(int32_t retval); | |
183 | |
184 // Checks if the bitrate would overflow when passing from kbps to bps. | |
185 bool IsBitrateTooHigh(uint32_t bitrate); | |
186 | |
187 // Checks if the frame size is different than hardware accelerator | |
188 // requirements. | |
189 bool RequiresSizeChange(const scoped_refptr<media::VideoFrame>& frame) const; | |
190 | |
191 // Return an encoded output buffer to WebRTC. | |
192 void ReturnEncodedImage(const webrtc::EncodedImage& image, | |
193 int32_t bitstream_buffer_id, | |
194 uint16_t picture_id); | |
195 | |
196 void SetStatus(int32_t status); | |
197 | |
198 // This is attached to |gpu_task_runner_|, not the thread class is constructed | |
199 // on. | |
200 base::ThreadChecker thread_checker_; | |
201 | |
202 // Factory for creating VEAs, shared memory buffers, etc. | |
203 media::GpuVideoAcceleratorFactories* gpu_factories_; | |
204 | |
205 // webrtc::VideoEncoder expects InitEncode() and Encode() to be synchronous. | |
206 // Do this by waiting on the |async_waiter_| and returning the return value in | |
207 // |async_retval_| when initialization completes, encoding completes, or | |
208 // an error occurs. | |
209 base::WaitableEvent* async_waiter_; | |
210 int32_t* async_retval_; | |
211 | |
212 // The underlying VEA to perform encoding on. | |
213 std::unique_ptr<media::VideoEncodeAccelerator> video_encoder_; | |
214 | |
215 // Next input frame. Since there is at most one next frame, a single-element | |
216 // queue is sufficient. | |
217 const webrtc::VideoFrame* input_next_frame_; | |
218 | |
219 // Whether to encode a keyframe next. | |
220 bool input_next_frame_keyframe_; | |
221 | |
222 // Frame sizes. | |
223 gfx::Size input_frame_coded_size_; | |
224 gfx::Size input_visible_size_; | |
225 | |
226 // Shared memory buffers for input/output with the VEA. | |
227 ScopedVector<base::SharedMemory> input_buffers_; | |
228 ScopedVector<base::SharedMemory> output_buffers_; | |
229 | |
230 // Input buffers ready to be filled with input from Encode(). As a LIFO since | |
231 // we don't care about ordering. | |
232 std::vector<int> input_buffers_free_; | |
233 | |
234 // The number of output buffers ready to be filled with output from the | |
235 // encoder. | |
236 int output_buffers_free_count_; | |
237 | |
238 // 15 bits running index of the VP8 frames. See VP8 RTP spec for details. | |
239 uint16_t picture_id_; | |
240 | |
241 // webrtc::VideoEncoder encode complete callback. | |
242 webrtc::EncodedImageCallback* encoded_image_callback_; | |
243 | |
244 // The video codec type, as reported to WebRTC. | |
245 const webrtc::VideoCodecType video_codec_type_; | |
246 | |
247 // Protect |status_|. |status_| is read or written on |gpu_task_runner_| in | |
248 // Impl. It can be read in RTCVideoEncoder on other threads. | |
249 mutable base::Lock status_lock_; | |
250 | |
251 // We cannot immediately return error conditions to the WebRTC user of this | |
252 // class, as there is no error callback in the webrtc::VideoEncoder interface. | |
253 // Instead, we cache an error status here and return it the next time an | |
254 // interface entry point is called. This is protected by |status_lock_|. | |
255 int32_t status_; | |
256 | |
257 DISALLOW_COPY_AND_ASSIGN(Impl); | |
258 }; | |
259 | |
260 RTCVideoEncoder::Impl::Impl(media::GpuVideoAcceleratorFactories* gpu_factories, | |
261 webrtc::VideoCodecType video_codec_type) | |
262 : gpu_factories_(gpu_factories), | |
263 async_waiter_(NULL), | |
264 async_retval_(NULL), | |
265 input_next_frame_(NULL), | |
266 input_next_frame_keyframe_(false), | |
267 output_buffers_free_count_(0), | |
268 encoded_image_callback_(nullptr), | |
269 video_codec_type_(video_codec_type), | |
270 status_(WEBRTC_VIDEO_CODEC_UNINITIALIZED) { | |
271 thread_checker_.DetachFromThread(); | |
272 // Picture ID should start on a random number. | |
273 picture_id_ = static_cast<uint16_t>(base::RandInt(0, 0x7FFF)); | |
274 } | |
275 | |
276 void RTCVideoEncoder::Impl::CreateAndInitializeVEA( | |
277 const gfx::Size& input_visible_size, | |
278 uint32_t bitrate, | |
279 media::VideoCodecProfile profile, | |
280 base::WaitableEvent* async_waiter, | |
281 int32_t* async_retval) { | |
282 DVLOG(3) << "Impl::CreateAndInitializeVEA()"; | |
283 DCHECK(thread_checker_.CalledOnValidThread()); | |
284 | |
285 SetStatus(WEBRTC_VIDEO_CODEC_UNINITIALIZED); | |
286 RegisterAsyncWaiter(async_waiter, async_retval); | |
287 | |
288 // Check for overflow converting bitrate (kilobits/sec) to bits/sec. | |
289 if (IsBitrateTooHigh(bitrate)) | |
290 return; | |
291 | |
292 video_encoder_ = gpu_factories_->CreateVideoEncodeAccelerator(); | |
293 if (!video_encoder_) { | |
294 LogAndNotifyError(FROM_HERE, "Error creating VideoEncodeAccelerator", | |
295 media::VideoEncodeAccelerator::kPlatformFailureError); | |
296 return; | |
297 } | |
298 input_visible_size_ = input_visible_size; | |
299 if (!video_encoder_->Initialize(media::PIXEL_FORMAT_I420, input_visible_size_, | |
300 profile, bitrate * 1000, this)) { | |
301 LogAndNotifyError(FROM_HERE, "Error initializing video_encoder", | |
302 media::VideoEncodeAccelerator::kInvalidArgumentError); | |
303 return; | |
304 } | |
305 // RequireBitstreamBuffers or NotifyError will be called and the waiter will | |
306 // be signaled. | |
307 } | |
308 | |
309 void RTCVideoEncoder::Impl::Enqueue(const webrtc::VideoFrame* input_frame, | |
310 bool force_keyframe, | |
311 base::WaitableEvent* async_waiter, | |
312 int32_t* async_retval) { | |
313 DVLOG(3) << "Impl::Enqueue()"; | |
314 DCHECK(thread_checker_.CalledOnValidThread()); | |
315 DCHECK(!input_next_frame_); | |
316 | |
317 RegisterAsyncWaiter(async_waiter, async_retval); | |
318 int32_t retval = GetStatus(); | |
319 if (retval != WEBRTC_VIDEO_CODEC_OK) { | |
320 SignalAsyncWaiter(retval); | |
321 return; | |
322 } | |
323 | |
324 // If there are no free input and output buffers, drop the frame to avoid a | |
325 // deadlock. If there is a free input buffer, EncodeOneFrame will run and | |
326 // unblock Encode(). If there are no free input buffers but there is a free | |
327 // output buffer, EncodeFrameFinished will be called later to unblock | |
328 // Encode(). | |
329 // | |
330 // The caller of Encode() holds a webrtc lock. The deadlock happens when: | |
331 // (1) Encode() is waiting for the frame to be encoded in EncodeOneFrame(). | |
332 // (2) There are no free input buffers and they cannot be freed because | |
333 // the encoder has no output buffers. | |
334 // (3) Output buffers cannot be freed because ReturnEncodedImage is queued | |
335 // on libjingle worker thread to be run. But the worker thread is waiting | |
336 // for the same webrtc lock held by the caller of Encode(). | |
337 // | |
338 // Dropping a frame is fine. The encoder has been filled with all input | |
339 // buffers. Returning an error in Encode() is not fatal and WebRTC will just | |
340 // continue. If this is a key frame, WebRTC will request a key frame again. | |
341 // Besides, webrtc will drop a frame if Encode() blocks too long. | |
342 if (input_buffers_free_.empty() && output_buffers_free_count_ == 0) { | |
343 DVLOG(2) << "Run out of input and output buffers. Drop the frame."; | |
344 SignalAsyncWaiter(WEBRTC_VIDEO_CODEC_ERROR); | |
345 return; | |
346 } | |
347 input_next_frame_ = input_frame; | |
348 input_next_frame_keyframe_ = force_keyframe; | |
349 | |
350 if (!input_buffers_free_.empty()) | |
351 EncodeOneFrame(); | |
352 } | |
353 | |
354 void RTCVideoEncoder::Impl::UseOutputBitstreamBufferId( | |
355 int32_t bitstream_buffer_id) { | |
356 DVLOG(3) << "Impl::UseOutputBitstreamBufferIndex(): " | |
357 "bitstream_buffer_id=" << bitstream_buffer_id; | |
358 DCHECK(thread_checker_.CalledOnValidThread()); | |
359 if (video_encoder_) { | |
360 video_encoder_->UseOutputBitstreamBuffer(media::BitstreamBuffer( | |
361 bitstream_buffer_id, | |
362 output_buffers_[bitstream_buffer_id]->handle(), | |
363 output_buffers_[bitstream_buffer_id]->mapped_size())); | |
364 output_buffers_free_count_++; | |
365 } | |
366 } | |
367 | |
368 void RTCVideoEncoder::Impl::RequestEncodingParametersChange( | |
369 uint32_t bitrate, | |
370 uint32_t framerate) { | |
371 DVLOG(3) << "Impl::RequestEncodingParametersChange(): bitrate=" << bitrate | |
372 << ", framerate=" << framerate; | |
373 DCHECK(thread_checker_.CalledOnValidThread()); | |
374 | |
375 // Check for overflow converting bitrate (kilobits/sec) to bits/sec. | |
376 if (IsBitrateTooHigh(bitrate)) | |
377 return; | |
378 | |
379 if (video_encoder_) | |
380 video_encoder_->RequestEncodingParametersChange(bitrate * 1000, framerate); | |
381 } | |
382 | |
383 void RTCVideoEncoder::Impl::Destroy(base::WaitableEvent* async_waiter) { | |
384 DVLOG(3) << "Impl::Destroy()"; | |
385 DCHECK(thread_checker_.CalledOnValidThread()); | |
386 if (video_encoder_) { | |
387 video_encoder_.reset(); | |
388 SetStatus(WEBRTC_VIDEO_CODEC_UNINITIALIZED); | |
389 } | |
390 async_waiter->Signal(); | |
391 } | |
392 | |
393 int32_t RTCVideoEncoder::Impl::GetStatus() const { | |
394 base::AutoLock lock(status_lock_); | |
395 return status_; | |
396 } | |
397 | |
398 void RTCVideoEncoder::Impl::SetStatus(int32_t status) { | |
399 base::AutoLock lock(status_lock_); | |
400 status_ = status; | |
401 } | |
402 | |
403 void RTCVideoEncoder::Impl::RequireBitstreamBuffers( | |
404 unsigned int input_count, | |
405 const gfx::Size& input_coded_size, | |
406 size_t output_buffer_size) { | |
407 DVLOG(3) << "Impl::RequireBitstreamBuffers(): input_count=" << input_count | |
408 << ", input_coded_size=" << input_coded_size.ToString() | |
409 << ", output_buffer_size=" << output_buffer_size; | |
410 DCHECK(thread_checker_.CalledOnValidThread()); | |
411 | |
412 if (!video_encoder_) | |
413 return; | |
414 | |
415 input_frame_coded_size_ = input_coded_size; | |
416 | |
417 for (unsigned int i = 0; i < input_count + kInputBufferExtraCount; ++i) { | |
418 std::unique_ptr<base::SharedMemory> shm = | |
419 gpu_factories_->CreateSharedMemory(media::VideoFrame::AllocationSize( | |
420 media::PIXEL_FORMAT_I420, input_coded_size)); | |
421 if (!shm) { | |
422 LogAndNotifyError(FROM_HERE, "failed to create input buffer ", | |
423 media::VideoEncodeAccelerator::kPlatformFailureError); | |
424 return; | |
425 } | |
426 input_buffers_.push_back(shm.release()); | |
427 input_buffers_free_.push_back(i); | |
428 } | |
429 | |
430 for (int i = 0; i < kOutputBufferCount; ++i) { | |
431 std::unique_ptr<base::SharedMemory> shm = | |
432 gpu_factories_->CreateSharedMemory(output_buffer_size); | |
433 if (!shm) { | |
434 LogAndNotifyError(FROM_HERE, "failed to create output buffer", | |
435 media::VideoEncodeAccelerator::kPlatformFailureError); | |
436 return; | |
437 } | |
438 output_buffers_.push_back(shm.release()); | |
439 } | |
440 | |
441 // Immediately provide all output buffers to the VEA. | |
442 for (size_t i = 0; i < output_buffers_.size(); ++i) { | |
443 video_encoder_->UseOutputBitstreamBuffer(media::BitstreamBuffer( | |
444 i, output_buffers_[i]->handle(), output_buffers_[i]->mapped_size())); | |
445 output_buffers_free_count_++; | |
446 } | |
447 DCHECK_EQ(GetStatus(), WEBRTC_VIDEO_CODEC_UNINITIALIZED); | |
448 SetStatus(WEBRTC_VIDEO_CODEC_OK); | |
449 SignalAsyncWaiter(WEBRTC_VIDEO_CODEC_OK); | |
450 } | |
451 | |
452 void RTCVideoEncoder::Impl::BitstreamBufferReady(int32_t bitstream_buffer_id, | |
453 size_t payload_size, | |
454 bool key_frame, | |
455 base::TimeDelta timestamp) { | |
456 DVLOG(3) << "Impl::BitstreamBufferReady(): bitstream_buffer_id=" | |
457 << bitstream_buffer_id << ", payload_size=" << payload_size | |
458 << ", key_frame=" << key_frame | |
459 << ", timestamp ms=" << timestamp.InMilliseconds(); | |
460 DCHECK(thread_checker_.CalledOnValidThread()); | |
461 | |
462 if (bitstream_buffer_id < 0 || | |
463 bitstream_buffer_id >= static_cast<int>(output_buffers_.size())) { | |
464 LogAndNotifyError(FROM_HERE, "invalid bitstream_buffer_id", | |
465 media::VideoEncodeAccelerator::kPlatformFailureError); | |
466 return; | |
467 } | |
468 base::SharedMemory* output_buffer = output_buffers_[bitstream_buffer_id]; | |
469 if (payload_size > output_buffer->mapped_size()) { | |
470 LogAndNotifyError(FROM_HERE, "invalid payload_size", | |
471 media::VideoEncodeAccelerator::kPlatformFailureError); | |
472 return; | |
473 } | |
474 output_buffers_free_count_--; | |
475 | |
476 // CrOS Nyan provides invalid timestamp. Use the current time for now. | |
477 // TODO(wuchengli): use the timestamp in BitstreamBufferReady after Nyan is | |
478 // fixed. http://crbug.com/620565. | |
479 const int64_t capture_time_us = rtc::TimeMicros(); | |
480 | |
481 // Derive the capture time (in ms) and RTP timestamp (in 90KHz ticks). | |
482 const int64_t capture_time_ms = | |
483 capture_time_us / base::Time::kMicrosecondsPerMillisecond; | |
484 | |
485 const uint32_t rtp_timestamp = static_cast<uint32_t>( | |
486 capture_time_us * 90 / base::Time::kMicrosecondsPerMillisecond); | |
487 | |
488 webrtc::EncodedImage image( | |
489 reinterpret_cast<uint8_t*>(output_buffer->memory()), payload_size, | |
490 output_buffer->mapped_size()); | |
491 image._encodedWidth = input_visible_size_.width(); | |
492 image._encodedHeight = input_visible_size_.height(); | |
493 image._timeStamp = rtp_timestamp; | |
494 image.capture_time_ms_ = capture_time_ms; | |
495 image._frameType = | |
496 (key_frame ? webrtc::kVideoFrameKey : webrtc::kVideoFrameDelta); | |
497 image._completeFrame = true; | |
498 | |
499 ReturnEncodedImage(image, bitstream_buffer_id, picture_id_); | |
500 // Picture ID must wrap after reaching the maximum. | |
501 picture_id_ = (picture_id_ + 1) & 0x7FFF; | |
502 } | |
503 | |
504 void RTCVideoEncoder::Impl::NotifyError( | |
505 media::VideoEncodeAccelerator::Error error) { | |
506 DCHECK(thread_checker_.CalledOnValidThread()); | |
507 int32_t retval; | |
508 switch (error) { | |
509 case media::VideoEncodeAccelerator::kInvalidArgumentError: | |
510 retval = WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | |
511 break; | |
512 default: | |
513 retval = WEBRTC_VIDEO_CODEC_ERROR; | |
514 } | |
515 | |
516 video_encoder_.reset(); | |
517 | |
518 SetStatus(retval); | |
519 if (async_waiter_) | |
520 SignalAsyncWaiter(retval); | |
521 } | |
522 | |
523 RTCVideoEncoder::Impl::~Impl() { DCHECK(!video_encoder_); } | |
524 | |
525 void RTCVideoEncoder::Impl::LogAndNotifyError( | |
526 const tracked_objects::Location& location, | |
527 const std::string& str, | |
528 media::VideoEncodeAccelerator::Error error) { | |
529 static const char* const kErrorNames[] = { | |
530 "kIllegalStateError", "kInvalidArgumentError", "kPlatformFailureError"}; | |
531 static_assert( | |
532 arraysize(kErrorNames) == media::VideoEncodeAccelerator::kErrorMax + 1, | |
533 "Different number of errors and textual descriptions"); | |
534 DLOG(ERROR) << location.ToString() << kErrorNames[error] << " - " << str; | |
535 NotifyError(error); | |
536 } | |
537 | |
538 void RTCVideoEncoder::Impl::EncodeOneFrame() { | |
539 DVLOG(3) << "Impl::EncodeOneFrame()"; | |
540 DCHECK(thread_checker_.CalledOnValidThread()); | |
541 DCHECK(input_next_frame_); | |
542 DCHECK(!input_buffers_free_.empty()); | |
543 | |
544 // EncodeOneFrame() may re-enter EncodeFrameFinished() if VEA::Encode() fails, | |
545 // we receive a VEA::NotifyError(), and the media::VideoFrame we pass to | |
546 // Encode() gets destroyed early. Handle this by resetting our | |
547 // input_next_frame_* state before we hand off the VideoFrame to the VEA. | |
548 const webrtc::VideoFrame* next_frame = input_next_frame_; | |
549 const bool next_frame_keyframe = input_next_frame_keyframe_; | |
550 input_next_frame_ = NULL; | |
551 input_next_frame_keyframe_ = false; | |
552 | |
553 if (!video_encoder_) { | |
554 SignalAsyncWaiter(WEBRTC_VIDEO_CODEC_ERROR); | |
555 return; | |
556 } | |
557 | |
558 const int index = input_buffers_free_.back(); | |
559 bool requires_copy = false; | |
560 scoped_refptr<media::VideoFrame> frame; | |
561 if (next_frame->video_frame_buffer()->native_handle()) { | |
562 frame = static_cast<media::VideoFrame*>( | |
563 next_frame->video_frame_buffer()->native_handle()); | |
564 requires_copy = RequiresSizeChange(frame); | |
565 } else { | |
566 requires_copy = true; | |
567 } | |
568 | |
569 if (requires_copy) { | |
570 base::SharedMemory* input_buffer = input_buffers_[index]; | |
571 frame = media::VideoFrame::WrapExternalSharedMemory( | |
572 media::PIXEL_FORMAT_I420, input_frame_coded_size_, | |
573 gfx::Rect(input_visible_size_), input_visible_size_, | |
574 reinterpret_cast<uint8_t*>(input_buffer->memory()), | |
575 input_buffer->mapped_size(), input_buffer->handle(), 0, | |
576 base::TimeDelta::FromMilliseconds(next_frame->ntp_time_ms())); | |
577 if (!frame.get()) { | |
578 LogAndNotifyError(FROM_HERE, "failed to create frame", | |
579 media::VideoEncodeAccelerator::kPlatformFailureError); | |
580 return; | |
581 } | |
582 // Do a strided copy of the input frame to match the input requirements for | |
583 // the encoder. | |
584 // TODO(sheu): support zero-copy from WebRTC. http://crbug.com/269312 | |
585 if (libyuv::I420Copy(next_frame->video_frame_buffer()->DataY(), | |
586 next_frame->video_frame_buffer()->StrideY(), | |
587 next_frame->video_frame_buffer()->DataU(), | |
588 next_frame->video_frame_buffer()->StrideU(), | |
589 next_frame->video_frame_buffer()->DataV(), | |
590 next_frame->video_frame_buffer()->StrideV(), | |
591 frame->data(media::VideoFrame::kYPlane), | |
592 frame->stride(media::VideoFrame::kYPlane), | |
593 frame->data(media::VideoFrame::kUPlane), | |
594 frame->stride(media::VideoFrame::kUPlane), | |
595 frame->data(media::VideoFrame::kVPlane), | |
596 frame->stride(media::VideoFrame::kVPlane), | |
597 next_frame->width(), next_frame->height())) { | |
598 LogAndNotifyError(FROM_HERE, "Failed to copy buffer", | |
599 media::VideoEncodeAccelerator::kPlatformFailureError); | |
600 return; | |
601 } | |
602 } | |
603 frame->AddDestructionObserver(media::BindToCurrentLoop( | |
604 base::Bind(&RTCVideoEncoder::Impl::EncodeFrameFinished, this, index))); | |
605 video_encoder_->Encode(frame, next_frame_keyframe); | |
606 input_buffers_free_.pop_back(); | |
607 SignalAsyncWaiter(WEBRTC_VIDEO_CODEC_OK); | |
608 } | |
609 | |
610 void RTCVideoEncoder::Impl::EncodeFrameFinished(int index) { | |
611 DVLOG(3) << "Impl::EncodeFrameFinished(): index=" << index; | |
612 DCHECK(thread_checker_.CalledOnValidThread()); | |
613 DCHECK_GE(index, 0); | |
614 DCHECK_LT(index, static_cast<int>(input_buffers_.size())); | |
615 input_buffers_free_.push_back(index); | |
616 if (input_next_frame_) | |
617 EncodeOneFrame(); | |
618 } | |
619 | |
620 void RTCVideoEncoder::Impl::RegisterAsyncWaiter(base::WaitableEvent* waiter, | |
621 int32_t* retval) { | |
622 DCHECK(thread_checker_.CalledOnValidThread()); | |
623 DCHECK(!async_waiter_); | |
624 DCHECK(!async_retval_); | |
625 async_waiter_ = waiter; | |
626 async_retval_ = retval; | |
627 } | |
628 | |
629 void RTCVideoEncoder::Impl::SignalAsyncWaiter(int32_t retval) { | |
630 DCHECK(thread_checker_.CalledOnValidThread()); | |
631 *async_retval_ = retval; | |
632 async_waiter_->Signal(); | |
633 async_retval_ = NULL; | |
634 async_waiter_ = NULL; | |
635 } | |
636 | |
637 bool RTCVideoEncoder::Impl::IsBitrateTooHigh(uint32_t bitrate) { | |
638 if (base::IsValueInRangeForNumericType<uint32_t>(bitrate * UINT64_C(1000))) | |
639 return false; | |
640 LogAndNotifyError(FROM_HERE, "Overflow converting bitrate from kbps to bps", | |
641 media::VideoEncodeAccelerator::kInvalidArgumentError); | |
642 return true; | |
643 } | |
644 | |
645 bool RTCVideoEncoder::Impl::RequiresSizeChange( | |
646 const scoped_refptr<media::VideoFrame>& frame) const { | |
647 return (frame->coded_size() != input_frame_coded_size_ || | |
648 frame->visible_rect() != gfx::Rect(input_visible_size_)); | |
649 } | |
650 | |
651 void RTCVideoEncoder::Impl::RegisterEncodeCompleteCallback( | |
652 base::WaitableEvent* async_waiter, | |
653 int32_t* async_retval, | |
654 webrtc::EncodedImageCallback* callback) { | |
655 DCHECK(thread_checker_.CalledOnValidThread()); | |
656 DVLOG(3) << "RegisterEncodeCompleteCallback()"; | |
657 RegisterAsyncWaiter(async_waiter, async_retval); | |
658 int32_t retval = GetStatus(); | |
659 if (retval == WEBRTC_VIDEO_CODEC_OK) | |
660 encoded_image_callback_ = callback; | |
661 SignalAsyncWaiter(retval); | |
662 } | |
663 | |
664 void RTCVideoEncoder::Impl::ReturnEncodedImage( | |
665 const webrtc::EncodedImage& image, | |
666 int32_t bitstream_buffer_id, | |
667 uint16_t picture_id) { | |
668 DCHECK(thread_checker_.CalledOnValidThread()); | |
669 DVLOG(3) << "ReturnEncodedImage(): " | |
670 << "bitstream_buffer_id=" << bitstream_buffer_id | |
671 << ", picture_id=" << picture_id; | |
672 | |
673 if (!encoded_image_callback_) | |
674 return; | |
675 | |
676 webrtc::RTPFragmentationHeader header; | |
677 memset(&header, 0, sizeof(header)); | |
678 switch (video_codec_type_) { | |
679 case webrtc::kVideoCodecVP8: | |
680 // Generate a header describing a single fragment. | |
681 header.VerifyAndAllocateFragmentationHeader(1); | |
682 header.fragmentationOffset[0] = 0; | |
683 header.fragmentationLength[0] = image._length; | |
684 header.fragmentationPlType[0] = 0; | |
685 header.fragmentationTimeDiff[0] = 0; | |
686 break; | |
687 case webrtc::kVideoCodecH264: | |
688 if (!GetRTPFragmentationHeaderH264(&header, image._buffer, | |
689 image._length)) { | |
690 DLOG(ERROR) << "Failed to get RTP fragmentation header for H264"; | |
691 NotifyError( | |
692 (media::VideoEncodeAccelerator::Error)WEBRTC_VIDEO_CODEC_ERROR); | |
693 return; | |
694 } | |
695 break; | |
696 default: | |
697 NOTREACHED() << "Invalid video codec type"; | |
698 return; | |
699 } | |
700 | |
701 webrtc::CodecSpecificInfo info; | |
702 memset(&info, 0, sizeof(info)); | |
703 info.codecType = video_codec_type_; | |
704 if (video_codec_type_ == webrtc::kVideoCodecVP8) { | |
705 info.codecSpecific.VP8.pictureId = picture_id; | |
706 info.codecSpecific.VP8.tl0PicIdx = -1; | |
707 info.codecSpecific.VP8.keyIdx = -1; | |
708 } | |
709 | |
710 const int32_t retval = | |
711 encoded_image_callback_->Encoded(image, &info, &header); | |
712 if (retval < 0) { | |
713 DVLOG(2) << "ReturnEncodedImage(): encoded_image_callback_ returned " | |
714 << retval; | |
715 } | |
716 | |
717 UseOutputBitstreamBufferId(bitstream_buffer_id); | |
718 } | |
719 | |
720 RTCVideoEncoder::RTCVideoEncoder( | |
721 webrtc::VideoCodecType type, | |
722 media::GpuVideoAcceleratorFactories* gpu_factories) | |
723 : video_codec_type_(type), | |
724 gpu_factories_(gpu_factories), | |
725 gpu_task_runner_(gpu_factories->GetTaskRunner()) { | |
726 DVLOG(1) << "RTCVideoEncoder(): codec type=" << type; | |
727 } | |
728 | |
729 RTCVideoEncoder::~RTCVideoEncoder() { | |
730 DVLOG(3) << "~RTCVideoEncoder"; | |
731 Release(); | |
732 DCHECK(!impl_.get()); | |
733 } | |
734 | |
735 int32_t RTCVideoEncoder::InitEncode(const webrtc::VideoCodec* codec_settings, | |
736 int32_t number_of_cores, | |
737 size_t max_payload_size) { | |
738 DVLOG(1) << "InitEncode(): codecType=" << codec_settings->codecType | |
739 << ", width=" << codec_settings->width | |
740 << ", height=" << codec_settings->height | |
741 << ", startBitrate=" << codec_settings->startBitrate; | |
742 DCHECK(!impl_.get()); | |
743 | |
744 impl_ = new Impl(gpu_factories_, video_codec_type_); | |
745 const media::VideoCodecProfile profile = WebRTCVideoCodecToVideoCodecProfile( | |
746 impl_->video_codec_type(), codec_settings); | |
747 | |
748 base::WaitableEvent initialization_waiter( | |
749 base::WaitableEvent::ResetPolicy::MANUAL, | |
750 base::WaitableEvent::InitialState::NOT_SIGNALED); | |
751 int32_t initialization_retval = WEBRTC_VIDEO_CODEC_UNINITIALIZED; | |
752 gpu_task_runner_->PostTask( | |
753 FROM_HERE, | |
754 base::Bind(&RTCVideoEncoder::Impl::CreateAndInitializeVEA, | |
755 impl_, | |
756 gfx::Size(codec_settings->width, codec_settings->height), | |
757 codec_settings->startBitrate, | |
758 profile, | |
759 &initialization_waiter, | |
760 &initialization_retval)); | |
761 | |
762 // webrtc::VideoEncoder expects this call to be synchronous. | |
763 initialization_waiter.Wait(); | |
764 RecordInitEncodeUMA(initialization_retval, profile); | |
765 return initialization_retval; | |
766 } | |
767 | |
768 int32_t RTCVideoEncoder::Encode( | |
769 const webrtc::VideoFrame& input_image, | |
770 const webrtc::CodecSpecificInfo* codec_specific_info, | |
771 const std::vector<webrtc::FrameType>* frame_types) { | |
772 DVLOG(3) << "Encode()"; | |
773 if (!impl_.get()) { | |
774 DVLOG(3) << "Encoder is not initialized"; | |
775 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | |
776 } | |
777 | |
778 const bool want_key_frame = frame_types && frame_types->size() && | |
779 frame_types->front() == webrtc::kVideoFrameKey; | |
780 base::WaitableEvent encode_waiter( | |
781 base::WaitableEvent::ResetPolicy::MANUAL, | |
782 base::WaitableEvent::InitialState::NOT_SIGNALED); | |
783 int32_t encode_retval = WEBRTC_VIDEO_CODEC_UNINITIALIZED; | |
784 gpu_task_runner_->PostTask( | |
785 FROM_HERE, | |
786 base::Bind(&RTCVideoEncoder::Impl::Enqueue, | |
787 impl_, | |
788 &input_image, | |
789 want_key_frame, | |
790 &encode_waiter, | |
791 &encode_retval)); | |
792 | |
793 // webrtc::VideoEncoder expects this call to be synchronous. | |
794 encode_waiter.Wait(); | |
795 DVLOG(3) << "Encode(): returning encode_retval=" << encode_retval; | |
796 return encode_retval; | |
797 } | |
798 | |
799 int32_t RTCVideoEncoder::RegisterEncodeCompleteCallback( | |
800 webrtc::EncodedImageCallback* callback) { | |
801 DVLOG(3) << "RegisterEncodeCompleteCallback()"; | |
802 if (!impl_.get()) { | |
803 DVLOG(3) << "Encoder is not initialized"; | |
804 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | |
805 } | |
806 | |
807 base::WaitableEvent register_waiter( | |
808 base::WaitableEvent::ResetPolicy::MANUAL, | |
809 base::WaitableEvent::InitialState::NOT_SIGNALED); | |
810 int32_t register_retval = WEBRTC_VIDEO_CODEC_UNINITIALIZED; | |
811 gpu_task_runner_->PostTask( | |
812 FROM_HERE, | |
813 base::Bind(&RTCVideoEncoder::Impl::RegisterEncodeCompleteCallback, impl_, | |
814 ®ister_waiter, ®ister_retval, callback)); | |
815 register_waiter.Wait(); | |
816 return register_retval; | |
817 } | |
818 | |
819 int32_t RTCVideoEncoder::Release() { | |
820 DVLOG(3) << "Release()"; | |
821 if (!impl_.get()) | |
822 return WEBRTC_VIDEO_CODEC_OK; | |
823 | |
824 base::WaitableEvent release_waiter( | |
825 base::WaitableEvent::ResetPolicy::MANUAL, | |
826 base::WaitableEvent::InitialState::NOT_SIGNALED); | |
827 gpu_task_runner_->PostTask( | |
828 FROM_HERE, | |
829 base::Bind(&RTCVideoEncoder::Impl::Destroy, impl_, &release_waiter)); | |
830 release_waiter.Wait(); | |
831 impl_ = NULL; | |
832 return WEBRTC_VIDEO_CODEC_OK; | |
833 } | |
834 | |
835 int32_t RTCVideoEncoder::SetChannelParameters(uint32_t packet_loss, | |
836 int64_t rtt) { | |
837 DVLOG(3) << "SetChannelParameters(): packet_loss=" << packet_loss | |
838 << ", rtt=" << rtt; | |
839 // Ignored. | |
840 return WEBRTC_VIDEO_CODEC_OK; | |
841 } | |
842 | |
843 int32_t RTCVideoEncoder::SetRates(uint32_t new_bit_rate, uint32_t frame_rate) { | |
844 DVLOG(3) << "SetRates(): new_bit_rate=" << new_bit_rate | |
845 << ", frame_rate=" << frame_rate; | |
846 if (!impl_.get()) { | |
847 DVLOG(3) << "Encoder is not initialized"; | |
848 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | |
849 } | |
850 | |
851 const int32_t retval = impl_->GetStatus(); | |
852 if (retval != WEBRTC_VIDEO_CODEC_OK) { | |
853 DVLOG(3) << "SetRates(): returning " << retval; | |
854 return retval; | |
855 } | |
856 | |
857 gpu_task_runner_->PostTask( | |
858 FROM_HERE, | |
859 base::Bind(&RTCVideoEncoder::Impl::RequestEncodingParametersChange, | |
860 impl_, | |
861 new_bit_rate, | |
862 frame_rate)); | |
863 return WEBRTC_VIDEO_CODEC_OK; | |
864 } | |
865 | |
866 void RTCVideoEncoder::RecordInitEncodeUMA( | |
867 int32_t init_retval, media::VideoCodecProfile profile) { | |
868 UMA_HISTOGRAM_BOOLEAN("Media.RTCVideoEncoderInitEncodeSuccess", | |
869 init_retval == WEBRTC_VIDEO_CODEC_OK); | |
870 if (init_retval == WEBRTC_VIDEO_CODEC_OK) { | |
871 UMA_HISTOGRAM_ENUMERATION("Media.RTCVideoEncoderProfile", | |
872 profile, | |
873 media::VIDEO_CODEC_PROFILE_MAX + 1); | |
874 } | |
875 } | |
876 | |
877 } // namespace content | |
OLD | NEW |