OLD | NEW |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/renderer/media_recorder/video_track_recorder.h" | 5 #include "content/renderer/media_recorder/video_track_recorder.h" |
6 | 6 |
7 #include <utility> | 7 #include <utility> |
8 | 8 |
9 #include "base/bind.h" | 9 #include "base/bind.h" |
10 #include "base/logging.h" | 10 #include "base/logging.h" |
11 #include "base/macros.h" | 11 #include "base/macros.h" |
12 #include "base/memory/ptr_util.h" | 12 #include "base/memory/ptr_util.h" |
13 #include "base/sys_info.h" | |
14 #include "base/task_runner_util.h" | 13 #include "base/task_runner_util.h" |
15 #include "base/threading/thread.h" | 14 #include "base/threading/thread.h" |
16 #include "base/threading/thread_task_runner_handle.h" | 15 #include "base/threading/thread_task_runner_handle.h" |
17 #include "base/time/time.h" | 16 #include "base/time/time.h" |
18 #include "base/trace_event/trace_event.h" | |
19 #include "cc/paint/skia_paint_canvas.h" | 17 #include "cc/paint/skia_paint_canvas.h" |
20 #include "content/renderer/media/renderer_gpu_video_accelerator_factories.h" | 18 #include "content/renderer/media/renderer_gpu_video_accelerator_factories.h" |
| 19 #include "content/renderer/media_recorder/vea_encoder.h" |
| 20 #include "content/renderer/media_recorder/vpx_encoder.h" |
21 #include "content/renderer/render_thread_impl.h" | 21 #include "content/renderer/render_thread_impl.h" |
22 #include "media/base/bind_to_current_loop.h" | 22 #include "media/base/bind_to_current_loop.h" |
23 #include "media/base/video_frame.h" | 23 #include "media/base/video_frame.h" |
24 #include "media/base/video_util.h" | 24 #include "media/base/video_util.h" |
25 #include "media/filters/context_3d.h" | 25 #include "media/filters/context_3d.h" |
26 #include "media/renderers/skcanvas_video_renderer.h" | 26 #include "media/renderers/skcanvas_video_renderer.h" |
27 #include "services/ui/public/cpp/gpu/context_provider_command_buffer.h" | 27 #include "services/ui/public/cpp/gpu/context_provider_command_buffer.h" |
28 #include "skia/ext/platform_canvas.h" | 28 #include "skia/ext/platform_canvas.h" |
29 #include "third_party/libyuv/include/libyuv.h" | 29 #include "third_party/libyuv/include/libyuv.h" |
30 #include "ui/gfx/geometry/size.h" | 30 #include "ui/gfx/geometry/size.h" |
31 | 31 |
32 #if BUILDFLAG(RTC_USE_H264) | 32 #if BUILDFLAG(RTC_USE_H264) |
33 #include "third_party/openh264/src/codec/api/svc/codec_api.h" | 33 #include "content/renderer/media_recorder/h264_encoder.h" |
34 #include "third_party/openh264/src/codec/api/svc/codec_app_def.h" | |
35 #include "third_party/openh264/src/codec/api/svc/codec_def.h" | |
36 #endif // #if BUILDFLAG(RTC_USE_H264) | 34 #endif // #if BUILDFLAG(RTC_USE_H264) |
37 | 35 |
38 extern "C" { | |
39 // VPX_CODEC_DISABLE_COMPAT excludes parts of the libvpx API that provide | |
40 // backwards compatibility for legacy applications using the library. | |
41 #define VPX_CODEC_DISABLE_COMPAT 1 | |
42 #include "third_party/libvpx/source/libvpx/vpx/vp8cx.h" | |
43 #include "third_party/libvpx/source/libvpx/vpx/vpx_encoder.h" | |
44 } | |
45 | |
46 using media::VideoFrame; | 36 using media::VideoFrame; |
47 using media::VideoFrameMetadata; | |
48 using video_track_recorder::kVEAEncoderMinResolutionWidth; | 37 using video_track_recorder::kVEAEncoderMinResolutionWidth; |
49 using video_track_recorder::kVEAEncoderMinResolutionHeight; | 38 using video_track_recorder::kVEAEncoderMinResolutionHeight; |
50 | 39 |
51 namespace content { | 40 namespace content { |
52 | 41 |
53 namespace { | 42 namespace { |
54 | 43 |
55 // HW encoders expect a nonzero bitrate, so |kVEADefaultBitratePerPixel| is used | |
56 // to estimate bits per second for ~30 fps with ~1/16 compression rate. | |
57 const int kVEADefaultBitratePerPixel = 2; | |
58 // Number of output buffers used to copy the encoded data coming from HW | |
59 // encoders. | |
60 const int kVEAEncoderOutputBufferCount = 4; | |
61 | |
62 using CodecId = VideoTrackRecorder::CodecId; | 44 using CodecId = VideoTrackRecorder::CodecId; |
63 | 45 |
64 static const struct { | 46 static const struct { |
65 CodecId codec_id; | 47 CodecId codec_id; |
66 media::VideoCodecProfile min_profile; | 48 media::VideoCodecProfile min_profile; |
67 media::VideoCodecProfile max_profile; | 49 media::VideoCodecProfile max_profile; |
68 } kPreferredCodecIdAndVEAProfiles[] = { | 50 } kPreferredCodecIdAndVEAProfiles[] = { |
69 {CodecId::VP8, media::VP8PROFILE_MIN, media::VP8PROFILE_MAX}, | 51 {CodecId::VP8, media::VP8PROFILE_MIN, media::VP8PROFILE_MAX}, |
70 {CodecId::VP9, media::VP9PROFILE_MIN, media::VP9PROFILE_MAX}, | 52 {CodecId::VP9, media::VP9PROFILE_MIN, media::VP9PROFILE_MAX}, |
71 #if BUILDFLAG(RTC_USE_H264) | 53 #if BUILDFLAG(RTC_USE_H264) |
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
153 | 135 |
154 media::VideoCodecProfile CodecEnumerator::CodecIdToVEAProfile(CodecId codec) { | 136 media::VideoCodecProfile CodecEnumerator::CodecIdToVEAProfile(CodecId codec) { |
155 const auto profile = codec_id_to_profile_.find(codec); | 137 const auto profile = codec_id_to_profile_.find(codec); |
156 return profile == codec_id_to_profile_.end() | 138 return profile == codec_id_to_profile_.end() |
157 ? media::VIDEO_CODEC_PROFILE_UNKNOWN | 139 ? media::VIDEO_CODEC_PROFILE_UNKNOWN |
158 : profile->second; | 140 : profile->second; |
159 } | 141 } |
160 | 142 |
161 } // anonymous namespace | 143 } // anonymous namespace |
162 | 144 |
163 // Base class to describe a generic Encoder, encapsulating all actual encoder | 145 VideoTrackRecorder::Encoder::Encoder( |
164 // (re)configurations, encoding and delivery of received frames. This class is | 146 const OnEncodedVideoCB& on_encoded_video_callback, |
165 // ref-counted to allow the MediaStreamVideoTrack to hold a reference to it (via | 147 int32_t bits_per_second, |
166 // the callback that MediaStreamVideoSink passes along) and to jump back and | 148 scoped_refptr<base::SingleThreadTaskRunner> encoding_task_runner) |
167 // forth to an internal encoder thread. Moreover, this class: | 149 : main_task_runner_(base::ThreadTaskRunnerHandle::Get()), |
168 // - is created on its parent's thread (usually the main Render thread), | 150 encoding_task_runner_(encoding_task_runner), |
169 // that is, |main_task_runner_|. | 151 paused_(false), |
170 // - receives VideoFrames on |origin_task_runner_| and runs OnEncodedVideoCB on | 152 on_encoded_video_callback_(on_encoded_video_callback), |
171 // that thread as well. This task runner is cached on first frame arrival, and | 153 bits_per_second_(bits_per_second) { |
172 // is supposed to be the render IO thread (but this is not enforced); | 154 DCHECK(!on_encoded_video_callback_.is_null()); |
173 // - uses an internal |encoding_task_runner_| for actual encoder interactions, | 155 if (encoding_task_runner_) |
174 // namely configuration, encoding (which might take some time) and destruction. | 156 return; |
175 // This task runner can be passed on the creation. If nothing is passed, a new | 157 encoding_thread_.reset(new base::Thread("EncodingThread")); |
176 // encoding thread is created and used. | 158 encoding_thread_->Start(); |
177 class VideoTrackRecorder::Encoder : public base::RefCountedThreadSafe<Encoder> { | 159 encoding_task_runner_ = encoding_thread_->task_runner(); |
178 public: | 160 } |
179 Encoder(const OnEncodedVideoCB& on_encoded_video_callback, | |
180 int32_t bits_per_second, | |
181 scoped_refptr<base::SingleThreadTaskRunner> encoding_task_runner = | |
182 nullptr) | |
183 : main_task_runner_(base::ThreadTaskRunnerHandle::Get()), | |
184 encoding_task_runner_(encoding_task_runner), | |
185 paused_(false), | |
186 on_encoded_video_callback_(on_encoded_video_callback), | |
187 bits_per_second_(bits_per_second) { | |
188 DCHECK(!on_encoded_video_callback_.is_null()); | |
189 if (encoding_task_runner_) | |
190 return; | |
191 encoding_thread_.reset(new base::Thread("EncodingThread")); | |
192 encoding_thread_->Start(); | |
193 encoding_task_runner_ = encoding_thread_->task_runner(); | |
194 } | |
195 | 161 |
196 // Start encoding |frame|, returning via |on_encoded_video_callback_|. This | 162 VideoTrackRecorder::Encoder::~Encoder() { |
197 // call will also trigger an encode configuration upon first frame arrival | 163 main_task_runner_->DeleteSoon(FROM_HERE, video_renderer_.release()); |
198 // or parameter change, and an EncodeOnEncodingTaskRunner() to actually | 164 } |
199 // encode the frame. If the |frame|'s data is not directly available (e.g. | |
200 // it's a texture) then RetrieveFrameOnMainThread() is called, and if even | |
201 // that fails, black frames are sent instead. | |
202 void StartFrameEncode(const scoped_refptr<VideoFrame>& frame, | |
203 base::TimeTicks capture_timestamp); | |
204 void RetrieveFrameOnMainThread(const scoped_refptr<VideoFrame>& video_frame, | |
205 base::TimeTicks capture_timestamp); | |
206 | |
207 void SetPaused(bool paused); | |
208 virtual bool CanEncodeAlphaChannel() { return false; } | |
209 | |
210 protected: | |
211 friend class base::RefCountedThreadSafe<Encoder>; | |
212 virtual ~Encoder() { | |
213 main_task_runner_->DeleteSoon(FROM_HERE, video_renderer_.release()); | |
214 } | |
215 | |
216 virtual void EncodeOnEncodingTaskRunner( | |
217 scoped_refptr<VideoFrame> frame, | |
218 base::TimeTicks capture_timestamp) = 0; | |
219 | |
220 // Used to shutdown properly on the same thread we were created. | |
221 const scoped_refptr<base::SingleThreadTaskRunner> main_task_runner_; | |
222 | |
223 // Task runner where frames to encode and reply callbacks must happen. | |
224 scoped_refptr<base::SingleThreadTaskRunner> origin_task_runner_; | |
225 | |
226 // Task runner where encoding interactions happen. | |
227 scoped_refptr<base::SingleThreadTaskRunner> encoding_task_runner_; | |
228 | |
229 // Optional thread for encoding. Active for the lifetime of VpxEncoder. | |
230 std::unique_ptr<base::Thread> encoding_thread_; | |
231 | |
232 // While |paused_|, frames are not encoded. Used only from |encoding_thread_|. | |
233 bool paused_; | |
234 | |
235 // This callback should be exercised on IO thread. | |
236 const OnEncodedVideoCB on_encoded_video_callback_; | |
237 | |
238 // Target bitrate for video encoding. If 0, a standard bitrate is used. | |
239 const int32_t bits_per_second_; | |
240 | |
241 // Used to retrieve incoming opaque VideoFrames (i.e. VideoFrames backed by | |
242 // textures). Created on-demand on |main_task_runner_|. | |
243 std::unique_ptr<media::SkCanvasVideoRenderer> video_renderer_; | |
244 SkBitmap bitmap_; | |
245 std::unique_ptr<cc::PaintCanvas> canvas_; | |
246 | |
247 DISALLOW_COPY_AND_ASSIGN(Encoder); | |
248 }; | |
249 | 165 |
250 void VideoTrackRecorder::Encoder::StartFrameEncode( | 166 void VideoTrackRecorder::Encoder::StartFrameEncode( |
251 const scoped_refptr<VideoFrame>& video_frame, | 167 const scoped_refptr<VideoFrame>& video_frame, |
252 base::TimeTicks capture_timestamp) { | 168 base::TimeTicks capture_timestamp) { |
253 // Cache the thread sending frames on first frame arrival. | 169 // Cache the thread sending frames on first frame arrival. |
254 if (!origin_task_runner_.get()) | 170 if (!origin_task_runner_.get()) |
255 origin_task_runner_ = base::ThreadTaskRunnerHandle::Get(); | 171 origin_task_runner_ = base::ThreadTaskRunnerHandle::Get(); |
256 DCHECK(origin_task_runner_->BelongsToCurrentThread()); | 172 DCHECK(origin_task_runner_->BelongsToCurrentThread()); |
257 if (paused_) | 173 if (paused_) |
258 return; | 174 return; |
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
354 DLOG(ERROR) << "Error converting frame to I420"; | 270 DLOG(ERROR) << "Error converting frame to I420"; |
355 return; | 271 return; |
356 } | 272 } |
357 } | 273 } |
358 | 274 |
359 encoding_task_runner_->PostTask( | 275 encoding_task_runner_->PostTask( |
360 FROM_HERE, base::Bind(&Encoder::EncodeOnEncodingTaskRunner, this, frame, | 276 FROM_HERE, base::Bind(&Encoder::EncodeOnEncodingTaskRunner, this, frame, |
361 capture_timestamp)); | 277 capture_timestamp)); |
362 } | 278 } |
363 | 279 |
364 void VideoTrackRecorder::Encoder::SetPaused(bool paused) { | 280 // static |
365 if (!encoding_task_runner_->BelongsToCurrentThread()) { | 281 void VideoTrackRecorder::Encoder::OnFrameEncodeCompleted( |
366 encoding_task_runner_->PostTask( | |
367 FROM_HERE, base::Bind(&Encoder::SetPaused, this, paused)); | |
368 return; | |
369 } | |
370 paused_ = paused; | |
371 } | |
372 | |
373 namespace { | |
374 | |
375 // Originally from remoting/codec/scoped_vpx_codec.h. | |
376 // TODO(mcasas): Refactor into a common location. | |
377 struct VpxCodecDeleter { | |
378 void operator()(vpx_codec_ctx_t* codec) { | |
379 if (!codec) | |
380 return; | |
381 vpx_codec_err_t ret = vpx_codec_destroy(codec); | |
382 CHECK_EQ(ret, VPX_CODEC_OK); | |
383 delete codec; | |
384 } | |
385 }; | |
386 typedef std::unique_ptr<vpx_codec_ctx_t, VpxCodecDeleter> ScopedVpxCodecCtxPtr; | |
387 | |
388 static void OnFrameEncodeCompleted( | |
389 const VideoTrackRecorder::OnEncodedVideoCB& on_encoded_video_cb, | 282 const VideoTrackRecorder::OnEncodedVideoCB& on_encoded_video_cb, |
390 const media::WebmMuxer::VideoParameters& params, | 283 const media::WebmMuxer::VideoParameters& params, |
391 std::unique_ptr<std::string> data, | 284 std::unique_ptr<std::string> data, |
392 std::unique_ptr<std::string> alpha_data, | 285 std::unique_ptr<std::string> alpha_data, |
393 base::TimeTicks capture_timestamp, | 286 base::TimeTicks capture_timestamp, |
394 bool keyframe) { | 287 bool keyframe) { |
395 DVLOG(1) << (keyframe ? "" : "non ") << "keyframe "<< data->length() << "B, " | 288 DVLOG(1) << (keyframe ? "" : "non ") << "keyframe "<< data->length() << "B, " |
396 << capture_timestamp << " ms"; | 289 << capture_timestamp << " ms"; |
397 on_encoded_video_cb.Run(params, std::move(data), std::move(alpha_data), | 290 on_encoded_video_cb.Run(params, std::move(data), std::move(alpha_data), |
398 capture_timestamp, keyframe); | 291 capture_timestamp, keyframe); |
399 } | 292 } |
400 | 293 |
401 static int GetNumberOfThreadsForEncoding() { | 294 void VideoTrackRecorder::Encoder::SetPaused(bool paused) { |
402 // Do not saturate CPU utilization just for encoding. On a lower-end system | 295 if (!encoding_task_runner_->BelongsToCurrentThread()) { |
403 // with only 1 or 2 cores, use only one thread for encoding. On systems with | 296 encoding_task_runner_->PostTask( |
404 // more cores, allow half of the cores to be used for encoding. | 297 FROM_HERE, base::Bind(&Encoder::SetPaused, this, paused)); |
405 return std::min(8, (base::SysInfo::NumberOfProcessors() + 1) / 2); | 298 return; |
| 299 } |
| 300 paused_ = paused; |
406 } | 301 } |
407 | 302 |
408 // Class encapsulating VideoEncodeAccelerator interactions. | 303 bool VideoTrackRecorder::Encoder::CanEncodeAlphaChannel() { |
409 // This class is created and destroyed in its owner thread. All other methods | 304 return false; |
410 // operate on the task runner pointed by GpuFactories. | |
411 class VEAEncoder final : public VideoTrackRecorder::Encoder, | |
412 public media::VideoEncodeAccelerator::Client { | |
413 public: | |
414 VEAEncoder( | |
415 const VideoTrackRecorder::OnEncodedVideoCB& on_encoded_video_callback, | |
416 int32_t bits_per_second, | |
417 media::VideoCodecProfile codec, | |
418 const gfx::Size& size); | |
419 | |
420 // media::VideoEncodeAccelerator::Client implementation. | |
421 void RequireBitstreamBuffers(unsigned int input_count, | |
422 const gfx::Size& input_coded_size, | |
423 size_t output_buffer_size) override; | |
424 void BitstreamBufferReady(int32_t bitstream_buffer_id, | |
425 size_t payload_size, | |
426 bool key_frame, | |
427 base::TimeDelta timestamp) override; | |
428 void NotifyError(media::VideoEncodeAccelerator::Error error) override; | |
429 | |
430 private: | |
431 using VideoFrameAndTimestamp = | |
432 std::pair<scoped_refptr<media::VideoFrame>, base::TimeTicks>; | |
433 using VideoParamsAndTimestamp = | |
434 std::pair<media::WebmMuxer::VideoParameters, base::TimeTicks>; | |
435 | |
436 void UseOutputBitstreamBufferId(int32_t bitstream_buffer_id); | |
437 void FrameFinished(std::unique_ptr<base::SharedMemory> shm); | |
438 | |
439 // VideoTrackRecorder::Encoder implementation. | |
440 ~VEAEncoder() override; | |
441 void EncodeOnEncodingTaskRunner(scoped_refptr<VideoFrame> frame, | |
442 base::TimeTicks capture_timestamp) override; | |
443 | |
444 void ConfigureEncoderOnEncodingTaskRunner(const gfx::Size& size); | |
445 | |
446 void DestroyOnEncodingTaskRunner(base::WaitableEvent* async_waiter); | |
447 | |
448 media::GpuVideoAcceleratorFactories* const gpu_factories_; | |
449 | |
450 const media::VideoCodecProfile codec_; | |
451 | |
452 // The underlying VEA to perform encoding on. | |
453 std::unique_ptr<media::VideoEncodeAccelerator> video_encoder_; | |
454 | |
455 // Shared memory buffers for output with the VEA. | |
456 std::vector<std::unique_ptr<base::SharedMemory>> output_buffers_; | |
457 | |
458 // Shared memory buffers for output with the VEA as FIFO. | |
459 std::queue<std::unique_ptr<base::SharedMemory>> input_buffers_; | |
460 | |
461 // Tracks error status. | |
462 bool error_notified_; | |
463 | |
464 // Tracks the last frame that we delay the encode. | |
465 std::unique_ptr<VideoFrameAndTimestamp> last_frame_; | |
466 | |
467 // Size used to initialize encoder. | |
468 gfx::Size input_visible_size_; | |
469 | |
470 // Coded size that encoder requests as input. | |
471 gfx::Size vea_requested_input_coded_size_; | |
472 | |
473 // Frames and corresponding timestamps in encode as FIFO. | |
474 std::queue<VideoParamsAndTimestamp> frames_in_encode_; | |
475 }; | |
476 | |
477 // Class encapsulating all libvpx interactions for VP8/VP9 encoding. | |
478 class VpxEncoder final : public VideoTrackRecorder::Encoder { | |
479 public: | |
480 static void ShutdownEncoder(std::unique_ptr<base::Thread> encoding_thread, | |
481 ScopedVpxCodecCtxPtr encoder); | |
482 | |
483 VpxEncoder( | |
484 bool use_vp9, | |
485 const VideoTrackRecorder::OnEncodedVideoCB& on_encoded_video_callback, | |
486 int32_t bits_per_second); | |
487 | |
488 private: | |
489 // VideoTrackRecorder::Encoder implementation. | |
490 ~VpxEncoder() override; | |
491 void EncodeOnEncodingTaskRunner(scoped_refptr<VideoFrame> frame, | |
492 base::TimeTicks capture_timestamp) override; | |
493 bool CanEncodeAlphaChannel() override { return true; } | |
494 | |
495 void ConfigureEncoderOnEncodingTaskRunner(const gfx::Size& size, | |
496 vpx_codec_enc_cfg_t* codec_config, | |
497 ScopedVpxCodecCtxPtr* encoder); | |
498 void DoEncode(vpx_codec_ctx_t* const encoder, | |
499 const gfx::Size& frame_size, | |
500 uint8_t* const data, | |
501 uint8_t* const y_plane, | |
502 int y_stride, | |
503 uint8_t* const u_plane, | |
504 int u_stride, | |
505 uint8_t* const v_plane, | |
506 int v_stride, | |
507 const base::TimeDelta& duration, | |
508 bool force_keyframe, | |
509 std::string* const output_data, | |
510 bool* const keyframe); | |
511 | |
512 // Returns true if |codec_config| has been filled in at least once. | |
513 bool IsInitialized(const vpx_codec_enc_cfg_t& codec_config) const; | |
514 | |
515 // Estimate the frame duration from |frame| and |last_frame_timestamp_|. | |
516 base::TimeDelta EstimateFrameDuration(const scoped_refptr<VideoFrame>& frame); | |
517 | |
518 // Force usage of VP9 for encoding, instead of VP8 which is the default. | |
519 const bool use_vp9_; | |
520 | |
521 // VPx internal objects: configuration and encoder. |encoder_| is a special | |
522 // scoped pointer to guarantee proper destruction, particularly when | |
523 // reconfiguring due to parameters change. Only used on |encoding_thread_|. | |
524 vpx_codec_enc_cfg_t codec_config_; | |
525 ScopedVpxCodecCtxPtr encoder_; | |
526 | |
527 vpx_codec_enc_cfg_t alpha_codec_config_; | |
528 ScopedVpxCodecCtxPtr alpha_encoder_; | |
529 | |
530 std::vector<uint8_t> alpha_dummy_planes_; | |
531 size_t v_plane_offset_; | |
532 size_t u_plane_stride_; | |
533 size_t v_plane_stride_; | |
534 bool last_frame_had_alpha_ = false; | |
535 | |
536 // The |VideoFrame::timestamp()| of the last encoded frame. This is used to | |
537 // predict the duration of the next frame. Only used on |encoding_thread_|. | |
538 base::TimeDelta last_frame_timestamp_; | |
539 | |
540 DISALLOW_COPY_AND_ASSIGN(VpxEncoder); | |
541 }; | |
542 | |
543 #if BUILDFLAG(RTC_USE_H264) | |
544 | |
545 struct ISVCEncoderDeleter { | |
546 void operator()(ISVCEncoder* codec) { | |
547 if (!codec) | |
548 return; | |
549 const int uninit_ret = codec->Uninitialize(); | |
550 CHECK_EQ(cmResultSuccess, uninit_ret); | |
551 WelsDestroySVCEncoder(codec); | |
552 } | |
553 }; | |
554 typedef std::unique_ptr<ISVCEncoder, ISVCEncoderDeleter> ScopedISVCEncoderPtr; | |
555 | |
556 // Class encapsulating all openh264 interactions for H264 encoding. | |
557 class H264Encoder final : public VideoTrackRecorder::Encoder { | |
558 public: | |
559 static void ShutdownEncoder(std::unique_ptr<base::Thread> encoding_thread, | |
560 ScopedISVCEncoderPtr encoder); | |
561 | |
562 H264Encoder( | |
563 const VideoTrackRecorder::OnEncodedVideoCB& on_encoded_video_callback, | |
564 int32_t bits_per_second); | |
565 | |
566 private: | |
567 // VideoTrackRecorder::Encoder implementation. | |
568 ~H264Encoder() override; | |
569 void EncodeOnEncodingTaskRunner(scoped_refptr<VideoFrame> frame, | |
570 base::TimeTicks capture_timestamp) override; | |
571 | |
572 void ConfigureEncoderOnEncodingTaskRunner(const gfx::Size& size); | |
573 | |
574 // |openh264_encoder_| is a special scoped pointer to guarantee proper | |
575 // destruction, also when reconfiguring due to parameters change. Only used on | |
576 // |encoding_thread_|. | |
577 gfx::Size configured_size_; | |
578 ScopedISVCEncoderPtr openh264_encoder_; | |
579 | |
580 // The |VideoFrame::timestamp()| of the first received frame. Only used on | |
581 // |encoding_thread_|. | |
582 base::TimeTicks first_frame_timestamp_; | |
583 | |
584 DISALLOW_COPY_AND_ASSIGN(H264Encoder); | |
585 }; | |
586 | |
587 #endif // #if BUILDFLAG(RTC_USE_H264) | |
588 | |
589 VEAEncoder::VEAEncoder( | |
590 const VideoTrackRecorder::OnEncodedVideoCB& on_encoded_video_callback, | |
591 int32_t bits_per_second, | |
592 media::VideoCodecProfile codec, | |
593 const gfx::Size& size) | |
594 : Encoder(on_encoded_video_callback, | |
595 bits_per_second > 0 ? bits_per_second | |
596 : size.GetArea() * kVEADefaultBitratePerPixel, | |
597 RenderThreadImpl::current()->GetGpuFactories()->GetTaskRunner()), | |
598 gpu_factories_(RenderThreadImpl::current()->GetGpuFactories()), | |
599 codec_(codec), | |
600 error_notified_(false) { | |
601 DCHECK(gpu_factories_); | |
602 DCHECK_GE(size.width(), kVEAEncoderMinResolutionWidth); | |
603 DCHECK_GE(size.height(), kVEAEncoderMinResolutionHeight); | |
604 | |
605 encoding_task_runner_->PostTask( | |
606 FROM_HERE, base::Bind(&VEAEncoder::ConfigureEncoderOnEncodingTaskRunner, | |
607 this, size)); | |
608 } | |
609 | |
610 VEAEncoder::~VEAEncoder() { | |
611 base::WaitableEvent release_waiter( | |
612 base::WaitableEvent::ResetPolicy::MANUAL, | |
613 base::WaitableEvent::InitialState::NOT_SIGNALED); | |
614 // base::Unretained is safe because the class will be alive until | |
615 // |release_waiter| is signaled. | |
616 // TODO(emircan): Consider refactoring media::VideoEncodeAccelerator to avoid | |
617 // using naked pointers and using DeleteSoon() here, see | |
618 // http://crbug.com/701627. | |
619 // It is currently unsafe because |video_encoder_| might be in use on another | |
620 // function on |encoding_task_runner_|, see http://crbug.com/701030. | |
621 encoding_task_runner_->PostTask( | |
622 FROM_HERE, base::Bind(&VEAEncoder::DestroyOnEncodingTaskRunner, | |
623 base::Unretained(this), &release_waiter)); | |
624 release_waiter.Wait(); | |
625 } | |
626 | |
627 void VEAEncoder::RequireBitstreamBuffers(unsigned int /*input_count*/, | |
628 const gfx::Size& input_coded_size, | |
629 size_t output_buffer_size) { | |
630 DVLOG(3) << __func__; | |
631 DCHECK(encoding_task_runner_->BelongsToCurrentThread()); | |
632 | |
633 vea_requested_input_coded_size_ = input_coded_size; | |
634 output_buffers_.clear(); | |
635 std::queue<std::unique_ptr<base::SharedMemory>>().swap(input_buffers_); | |
636 | |
637 for (int i = 0; i < kVEAEncoderOutputBufferCount; ++i) { | |
638 std::unique_ptr<base::SharedMemory> shm = | |
639 gpu_factories_->CreateSharedMemory(output_buffer_size); | |
640 if (shm) | |
641 output_buffers_.push_back(base::WrapUnique(shm.release())); | |
642 } | |
643 | |
644 for (size_t i = 0; i < output_buffers_.size(); ++i) | |
645 UseOutputBitstreamBufferId(i); | |
646 } | |
647 | |
648 void VEAEncoder::BitstreamBufferReady(int32_t bitstream_buffer_id, | |
649 size_t payload_size, | |
650 bool keyframe, | |
651 base::TimeDelta timestamp) { | |
652 DVLOG(3) << __func__; | |
653 DCHECK(encoding_task_runner_->BelongsToCurrentThread()); | |
654 | |
655 base::SharedMemory* output_buffer = | |
656 output_buffers_[bitstream_buffer_id].get(); | |
657 | |
658 std::unique_ptr<std::string> data(new std::string); | |
659 data->append(reinterpret_cast<char*>(output_buffer->memory()), payload_size); | |
660 | |
661 const auto front_frame = frames_in_encode_.front(); | |
662 frames_in_encode_.pop(); | |
663 origin_task_runner_->PostTask( | |
664 FROM_HERE, base::Bind(OnFrameEncodeCompleted, on_encoded_video_callback_, | |
665 front_frame.first, base::Passed(&data), nullptr, | |
666 front_frame.second, keyframe)); | |
667 UseOutputBitstreamBufferId(bitstream_buffer_id); | |
668 } | |
669 | |
670 void VEAEncoder::NotifyError(media::VideoEncodeAccelerator::Error error) { | |
671 DVLOG(3) << __func__; | |
672 DCHECK(encoding_task_runner_->BelongsToCurrentThread()); | |
673 | |
674 // TODO(emircan): Notify the owner via a callback. | |
675 error_notified_ = true; | |
676 } | |
677 | |
678 void VEAEncoder::UseOutputBitstreamBufferId(int32_t bitstream_buffer_id) { | |
679 DVLOG(3) << __func__; | |
680 DCHECK(encoding_task_runner_->BelongsToCurrentThread()); | |
681 | |
682 video_encoder_->UseOutputBitstreamBuffer(media::BitstreamBuffer( | |
683 bitstream_buffer_id, output_buffers_[bitstream_buffer_id]->handle(), | |
684 output_buffers_[bitstream_buffer_id]->mapped_size())); | |
685 } | |
686 | |
687 void VEAEncoder::FrameFinished(std::unique_ptr<base::SharedMemory> shm) { | |
688 DVLOG(3) << __func__; | |
689 DCHECK(encoding_task_runner_->BelongsToCurrentThread()); | |
690 input_buffers_.push(std::move(shm)); | |
691 } | |
692 | |
693 void VEAEncoder::EncodeOnEncodingTaskRunner( | |
694 scoped_refptr<VideoFrame> frame, | |
695 base::TimeTicks capture_timestamp) { | |
696 DVLOG(3) << __func__; | |
697 DCHECK(encoding_task_runner_->BelongsToCurrentThread()); | |
698 | |
699 if (input_visible_size_ != frame->visible_rect().size() && video_encoder_) | |
700 video_encoder_.reset(); | |
701 | |
702 if (!video_encoder_) | |
703 ConfigureEncoderOnEncodingTaskRunner(frame->visible_rect().size()); | |
704 | |
705 if (error_notified_) { | |
706 DVLOG(3) << "An error occurred in VEA encoder"; | |
707 return; | |
708 } | |
709 | |
710 // Drop frames if there is no output buffers available. | |
711 if (output_buffers_.empty()) { | |
712 // TODO(emircan): Investigate if resetting encoder would help. | |
713 DVLOG(3) << "Might drop frame."; | |
714 last_frame_.reset( | |
715 new std::pair<scoped_refptr<VideoFrame>, base::TimeTicks>( | |
716 frame, capture_timestamp)); | |
717 return; | |
718 } | |
719 | |
720 // If first frame hasn't been encoded, do it first. | |
721 if (last_frame_) { | |
722 std::unique_ptr<VideoFrameAndTimestamp> last_frame(last_frame_.release()); | |
723 EncodeOnEncodingTaskRunner(last_frame->first, last_frame->second); | |
724 } | |
725 | |
726 // Lower resolutions may fall back to SW encoder in some platforms, i.e. Mac. | |
727 // In that case, the encoder expects more frames before returning result. | |
728 // Therefore, a copy is necessary to release the current frame. | |
729 // Only STORAGE_SHMEM backed frames can be shared with GPU process, therefore | |
730 // a copy is required for other storage types. | |
731 scoped_refptr<media::VideoFrame> video_frame = frame; | |
732 if (video_frame->storage_type() != VideoFrame::STORAGE_SHMEM || | |
733 vea_requested_input_coded_size_ != frame->coded_size() || | |
734 input_visible_size_.width() < kVEAEncoderMinResolutionWidth || | |
735 input_visible_size_.height() < kVEAEncoderMinResolutionHeight) { | |
736 // Create SharedMemory backed input buffers as necessary. These SharedMemory | |
737 // instances will be shared with GPU process. | |
738 std::unique_ptr<base::SharedMemory> input_buffer; | |
739 const size_t desired_mapped_size = media::VideoFrame::AllocationSize( | |
740 media::PIXEL_FORMAT_I420, vea_requested_input_coded_size_); | |
741 if (input_buffers_.empty()) { | |
742 input_buffer = gpu_factories_->CreateSharedMemory(desired_mapped_size); | |
743 } else { | |
744 do { | |
745 input_buffer = std::move(input_buffers_.front()); | |
746 input_buffers_.pop(); | |
747 } while (!input_buffers_.empty() && | |
748 input_buffer->mapped_size() < desired_mapped_size); | |
749 if (!input_buffer || input_buffer->mapped_size() < desired_mapped_size) | |
750 return; | |
751 } | |
752 | |
753 video_frame = media::VideoFrame::WrapExternalSharedMemory( | |
754 media::PIXEL_FORMAT_I420, vea_requested_input_coded_size_, | |
755 gfx::Rect(input_visible_size_), input_visible_size_, | |
756 reinterpret_cast<uint8_t*>(input_buffer->memory()), | |
757 input_buffer->mapped_size(), input_buffer->handle(), 0, | |
758 frame->timestamp()); | |
759 video_frame->AddDestructionObserver(media::BindToCurrentLoop( | |
760 base::Bind(&VEAEncoder::FrameFinished, this, | |
761 base::Passed(std::move(input_buffer))))); | |
762 libyuv::I420Copy(frame->visible_data(media::VideoFrame::kYPlane), | |
763 frame->stride(media::VideoFrame::kYPlane), | |
764 frame->visible_data(media::VideoFrame::kUPlane), | |
765 frame->stride(media::VideoFrame::kUPlane), | |
766 frame->visible_data(media::VideoFrame::kVPlane), | |
767 frame->stride(media::VideoFrame::kVPlane), | |
768 video_frame->visible_data(media::VideoFrame::kYPlane), | |
769 video_frame->stride(media::VideoFrame::kYPlane), | |
770 video_frame->visible_data(media::VideoFrame::kUPlane), | |
771 video_frame->stride(media::VideoFrame::kUPlane), | |
772 video_frame->visible_data(media::VideoFrame::kVPlane), | |
773 video_frame->stride(media::VideoFrame::kVPlane), | |
774 input_visible_size_.width(), input_visible_size_.height()); | |
775 } | |
776 frames_in_encode_.push(std::make_pair( | |
777 media::WebmMuxer::VideoParameters(frame), capture_timestamp)); | |
778 | |
779 video_encoder_->Encode(video_frame, false); | |
780 } | |
781 | |
782 void VEAEncoder::ConfigureEncoderOnEncodingTaskRunner(const gfx::Size& size) { | |
783 DVLOG(3) << __func__; | |
784 DCHECK(encoding_task_runner_->BelongsToCurrentThread()); | |
785 DCHECK(gpu_factories_->GetTaskRunner()->BelongsToCurrentThread()); | |
786 DCHECK_GT(bits_per_second_, 0); | |
787 | |
788 input_visible_size_ = size; | |
789 video_encoder_ = gpu_factories_->CreateVideoEncodeAccelerator(); | |
790 if (!video_encoder_ || | |
791 !video_encoder_->Initialize(media::PIXEL_FORMAT_I420, input_visible_size_, | |
792 codec_, bits_per_second_, this)) { | |
793 NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError); | |
794 } | |
795 } | |
796 | |
797 void VEAEncoder::DestroyOnEncodingTaskRunner( | |
798 base::WaitableEvent* async_waiter) { | |
799 DCHECK(encoding_task_runner_->BelongsToCurrentThread()); | |
800 video_encoder_.reset(); | |
801 async_waiter->Signal(); | |
802 } | 305 } |
803 | 306 |
804 // static | 307 // static |
805 void VpxEncoder::ShutdownEncoder(std::unique_ptr<base::Thread> encoding_thread, | |
806 ScopedVpxCodecCtxPtr encoder) { | |
807 DCHECK(encoding_thread->IsRunning()); | |
808 encoding_thread->Stop(); | |
809 // Both |encoding_thread| and |encoder| will be destroyed at end-of-scope. | |
810 } | |
811 | |
812 VpxEncoder::VpxEncoder( | |
813 bool use_vp9, | |
814 const VideoTrackRecorder::OnEncodedVideoCB& on_encoded_video_callback, | |
815 int32_t bits_per_second) | |
816 : Encoder(on_encoded_video_callback, bits_per_second), | |
817 use_vp9_(use_vp9) { | |
818 codec_config_.g_timebase.den = 0; // Not initialized. | |
819 alpha_codec_config_.g_timebase.den = 0; // Not initialized. | |
820 DCHECK(encoding_thread_->IsRunning()); | |
821 } | |
822 | |
823 VpxEncoder::~VpxEncoder() { | |
824 main_task_runner_->PostTask(FROM_HERE, | |
825 base::Bind(&VpxEncoder::ShutdownEncoder, | |
826 base::Passed(&encoding_thread_), | |
827 base::Passed(&encoder_))); | |
828 } | |
829 | |
830 void VpxEncoder::EncodeOnEncodingTaskRunner( | |
831 scoped_refptr<VideoFrame> frame, | |
832 base::TimeTicks capture_timestamp) { | |
833 TRACE_EVENT0("video", "VpxEncoder::EncodeOnEncodingTaskRunner"); | |
834 DCHECK(encoding_task_runner_->BelongsToCurrentThread()); | |
835 | |
836 const gfx::Size frame_size = frame->visible_rect().size(); | |
837 const base::TimeDelta duration = EstimateFrameDuration(frame); | |
838 const media::WebmMuxer::VideoParameters video_params(frame); | |
839 | |
840 if (!IsInitialized(codec_config_) || | |
841 gfx::Size(codec_config_.g_w, codec_config_.g_h) != frame_size) { | |
842 ConfigureEncoderOnEncodingTaskRunner(frame_size, &codec_config_, &encoder_); | |
843 } | |
844 | |
845 const bool frame_has_alpha = frame->format() == media::PIXEL_FORMAT_YV12A; | |
846 if (frame_has_alpha && (!IsInitialized(alpha_codec_config_) || | |
847 gfx::Size(alpha_codec_config_.g_w, | |
848 alpha_codec_config_.g_h) != frame_size)) { | |
849 ConfigureEncoderOnEncodingTaskRunner(frame_size, &alpha_codec_config_, | |
850 &alpha_encoder_); | |
851 u_plane_stride_ = media::VideoFrame::RowBytes( | |
852 VideoFrame::kUPlane, frame->format(), frame_size.width()); | |
853 v_plane_stride_ = media::VideoFrame::RowBytes( | |
854 VideoFrame::kVPlane, frame->format(), frame_size.width()); | |
855 v_plane_offset_ = media::VideoFrame::PlaneSize( | |
856 frame->format(), VideoFrame::kUPlane, frame_size) | |
857 .GetArea(); | |
858 alpha_dummy_planes_.resize( | |
859 v_plane_offset_ + media::VideoFrame::PlaneSize( | |
860 frame->format(), VideoFrame::kVPlane, frame_size) | |
861 .GetArea()); | |
862 // It is more expensive to encode 0x00, so use 0x80 instead. | |
863 std::fill(alpha_dummy_planes_.begin(), alpha_dummy_planes_.end(), 0x80); | |
864 } | |
865 // If we introduced a new alpha frame, force keyframe. | |
866 const bool force_keyframe = frame_has_alpha && !last_frame_had_alpha_; | |
867 last_frame_had_alpha_ = frame_has_alpha; | |
868 | |
869 std::unique_ptr<std::string> data(new std::string); | |
870 bool keyframe = false; | |
871 DoEncode(encoder_.get(), frame_size, frame->data(VideoFrame::kYPlane), | |
872 frame->visible_data(VideoFrame::kYPlane), | |
873 frame->stride(VideoFrame::kYPlane), | |
874 frame->visible_data(VideoFrame::kUPlane), | |
875 frame->stride(VideoFrame::kUPlane), | |
876 frame->visible_data(VideoFrame::kVPlane), | |
877 frame->stride(VideoFrame::kVPlane), duration, force_keyframe, | |
878 data.get(), &keyframe); | |
879 | |
880 std::unique_ptr<std::string> alpha_data(new std::string); | |
881 if (frame_has_alpha) { | |
882 bool alpha_keyframe = false; | |
883 DoEncode(alpha_encoder_.get(), frame_size, frame->data(VideoFrame::kAPlane), | |
884 frame->visible_data(VideoFrame::kAPlane), | |
885 frame->stride(VideoFrame::kAPlane), alpha_dummy_planes_.data(), | |
886 u_plane_stride_, alpha_dummy_planes_.data() + v_plane_offset_, | |
887 v_plane_stride_, duration, keyframe, alpha_data.get(), | |
888 &alpha_keyframe); | |
889 DCHECK_EQ(keyframe, alpha_keyframe); | |
890 } | |
891 frame = nullptr; | |
892 | |
893 origin_task_runner_->PostTask( | |
894 FROM_HERE, | |
895 base::Bind(OnFrameEncodeCompleted, on_encoded_video_callback_, | |
896 video_params, base::Passed(&data), base::Passed(&alpha_data), | |
897 capture_timestamp, keyframe)); | |
898 } | |
899 | |
900 void VpxEncoder::DoEncode(vpx_codec_ctx_t* const encoder, | |
901 const gfx::Size& frame_size, | |
902 uint8_t* const data, | |
903 uint8_t* const y_plane, | |
904 int y_stride, | |
905 uint8_t* const u_plane, | |
906 int u_stride, | |
907 uint8_t* const v_plane, | |
908 int v_stride, | |
909 const base::TimeDelta& duration, | |
910 bool force_keyframe, | |
911 std::string* const output_data, | |
912 bool* const keyframe) { | |
913 DCHECK(encoding_task_runner_->BelongsToCurrentThread()); | |
914 | |
915 vpx_image_t vpx_image; | |
916 vpx_image_t* const result = | |
917 vpx_img_wrap(&vpx_image, VPX_IMG_FMT_I420, frame_size.width(), | |
918 frame_size.height(), 1 /* align */, data); | |
919 DCHECK_EQ(result, &vpx_image); | |
920 vpx_image.planes[VPX_PLANE_Y] = y_plane; | |
921 vpx_image.planes[VPX_PLANE_U] = u_plane; | |
922 vpx_image.planes[VPX_PLANE_V] = v_plane; | |
923 vpx_image.stride[VPX_PLANE_Y] = y_stride; | |
924 vpx_image.stride[VPX_PLANE_U] = u_stride; | |
925 vpx_image.stride[VPX_PLANE_V] = v_stride; | |
926 | |
927 const vpx_codec_flags_t flags = force_keyframe ? VPX_EFLAG_FORCE_KF : 0; | |
928 // Encode the frame. The presentation time stamp argument here is fixed to | |
929 // zero to force the encoder to base its single-frame bandwidth calculations | |
930 // entirely on |predicted_frame_duration|. | |
931 const vpx_codec_err_t ret = | |
932 vpx_codec_encode(encoder, &vpx_image, 0 /* pts */, | |
933 duration.InMicroseconds(), flags, VPX_DL_REALTIME); | |
934 DCHECK_EQ(ret, VPX_CODEC_OK) | |
935 << vpx_codec_err_to_string(ret) << ", #" << vpx_codec_error(encoder) | |
936 << " -" << vpx_codec_error_detail(encoder); | |
937 | |
938 *keyframe = false; | |
939 vpx_codec_iter_t iter = NULL; | |
940 const vpx_codec_cx_pkt_t* pkt = NULL; | |
941 while ((pkt = vpx_codec_get_cx_data(encoder, &iter)) != NULL) { | |
942 if (pkt->kind != VPX_CODEC_CX_FRAME_PKT) | |
943 continue; | |
944 output_data->assign(static_cast<char*>(pkt->data.frame.buf), | |
945 pkt->data.frame.sz); | |
946 *keyframe = (pkt->data.frame.flags & VPX_FRAME_IS_KEY) != 0; | |
947 break; | |
948 } | |
949 } | |
950 | |
951 void VpxEncoder::ConfigureEncoderOnEncodingTaskRunner( | |
952 const gfx::Size& size, | |
953 vpx_codec_enc_cfg_t* codec_config, | |
954 ScopedVpxCodecCtxPtr* encoder) { | |
955 DCHECK(encoding_task_runner_->BelongsToCurrentThread()); | |
956 if (IsInitialized(*codec_config)) { | |
957 // TODO(mcasas) VP8 quirk/optimisation: If the new |size| is strictly less- | |
958 // than-or-equal than the old size, in terms of area, the existing encoder | |
959 // instance could be reused after changing |codec_config->{g_w,g_h}|. | |
960 DVLOG(1) << "Destroying/Re-Creating encoder for new frame size: " | |
961 << gfx::Size(codec_config->g_w, codec_config->g_h).ToString() | |
962 << " --> " << size.ToString() << (use_vp9_ ? " vp9" : " vp8"); | |
963 encoder->reset(); | |
964 } | |
965 | |
966 const vpx_codec_iface_t* codec_interface = | |
967 use_vp9_ ? vpx_codec_vp9_cx() : vpx_codec_vp8_cx(); | |
968 vpx_codec_err_t result = vpx_codec_enc_config_default( | |
969 codec_interface, codec_config, 0 /* reserved */); | |
970 DCHECK_EQ(VPX_CODEC_OK, result); | |
971 | |
972 DCHECK_EQ(320u, codec_config->g_w); | |
973 DCHECK_EQ(240u, codec_config->g_h); | |
974 DCHECK_EQ(256u, codec_config->rc_target_bitrate); | |
975 // Use the selected bitrate or adjust default bit rate to account for the | |
976 // actual size. Note: |rc_target_bitrate| units are kbit per second. | |
977 if (bits_per_second_ > 0) { | |
978 codec_config->rc_target_bitrate = bits_per_second_ / 1000; | |
979 } else { | |
980 codec_config->rc_target_bitrate = size.GetArea() * | |
981 codec_config->rc_target_bitrate / | |
982 codec_config->g_w / codec_config->g_h; | |
983 } | |
984 // Both VP8/VP9 configuration should be Variable BitRate by default. | |
985 DCHECK_EQ(VPX_VBR, codec_config->rc_end_usage); | |
986 if (use_vp9_) { | |
987 // Number of frames to consume before producing output. | |
988 codec_config->g_lag_in_frames = 0; | |
989 | |
990 // DCHECK that the profile selected by default is I420 (magic number 0). | |
991 DCHECK_EQ(0u, codec_config->g_profile); | |
992 } else { | |
993 // VP8 always produces frames instantaneously. | |
994 DCHECK_EQ(0u, codec_config->g_lag_in_frames); | |
995 } | |
996 | |
997 DCHECK(size.width()); | |
998 DCHECK(size.height()); | |
999 codec_config->g_w = size.width(); | |
1000 codec_config->g_h = size.height(); | |
1001 codec_config->g_pass = VPX_RC_ONE_PASS; | |
1002 | |
1003 // Timebase is the smallest interval used by the stream, can be set to the | |
1004 // frame rate or to e.g. microseconds. | |
1005 codec_config->g_timebase.num = 1; | |
1006 codec_config->g_timebase.den = base::Time::kMicrosecondsPerSecond; | |
1007 | |
1008 // Let the encoder decide where to place the Keyframes, between min and max. | |
1009 // In VPX_KF_AUTO mode libvpx will sometimes emit keyframes regardless of min/ | |
1010 // max distance out of necessity. | |
1011 // Note that due to http://crbug.com/440223, it might be necessary to force a | |
1012 // key frame after 10,000frames since decoding fails after 30,000 non-key | |
1013 // frames. | |
1014 // Forcing a keyframe in regular intervals also allows seeking in the | |
1015 // resulting recording with decent performance. | |
1016 codec_config->kf_mode = VPX_KF_AUTO; | |
1017 codec_config->kf_min_dist = 0; | |
1018 codec_config->kf_max_dist = 100; | |
1019 | |
1020 codec_config->g_threads = GetNumberOfThreadsForEncoding(); | |
1021 | |
1022 // Number of frames to consume before producing output. | |
1023 codec_config->g_lag_in_frames = 0; | |
1024 | |
1025 encoder->reset(new vpx_codec_ctx_t); | |
1026 const vpx_codec_err_t ret = vpx_codec_enc_init( | |
1027 encoder->get(), codec_interface, codec_config, 0 /* flags */); | |
1028 DCHECK_EQ(VPX_CODEC_OK, ret); | |
1029 | |
1030 if (use_vp9_) { | |
1031 // Values of VP8E_SET_CPUUSED greater than 0 will increase encoder speed at | |
1032 // the expense of quality up to a maximum value of 8 for VP9, by tuning the | |
1033 // target time spent encoding the frame. Go from 8 to 5 (values for real | |
1034 // time encoding) depending on the amount of cores available in the system. | |
1035 const int kCpuUsed = | |
1036 std::max(5, 8 - base::SysInfo::NumberOfProcessors() / 2); | |
1037 result = vpx_codec_control(encoder->get(), VP8E_SET_CPUUSED, kCpuUsed); | |
1038 DLOG_IF(WARNING, VPX_CODEC_OK != result) << "VP8E_SET_CPUUSED failed"; | |
1039 } | |
1040 } | |
1041 | |
1042 bool VpxEncoder::IsInitialized(const vpx_codec_enc_cfg_t& codec_config) const { | |
1043 DCHECK(encoding_task_runner_->BelongsToCurrentThread()); | |
1044 return codec_config.g_timebase.den != 0; | |
1045 } | |
1046 | |
1047 base::TimeDelta VpxEncoder::EstimateFrameDuration( | |
1048 const scoped_refptr<VideoFrame>& frame) { | |
1049 DCHECK(encoding_task_runner_->BelongsToCurrentThread()); | |
1050 | |
1051 using base::TimeDelta; | |
1052 TimeDelta predicted_frame_duration; | |
1053 if (!frame->metadata()->GetTimeDelta(VideoFrameMetadata::FRAME_DURATION, | |
1054 &predicted_frame_duration) || | |
1055 predicted_frame_duration <= TimeDelta()) { | |
1056 // The source of the video frame did not provide the frame duration. Use | |
1057 // the actual amount of time between the current and previous frame as a | |
1058 // prediction for the next frame's duration. | |
1059 // TODO(mcasas): This duration estimation could lead to artifacts if the | |
1060 // cadence of the received stream is compromised (e.g. camera freeze, pause, | |
1061 // remote packet loss). Investigate using GetFrameRate() in this case. | |
1062 predicted_frame_duration = frame->timestamp() - last_frame_timestamp_; | |
1063 } | |
1064 last_frame_timestamp_ = frame->timestamp(); | |
1065 // Make sure |predicted_frame_duration| is in a safe range of values. | |
1066 const TimeDelta kMaxFrameDuration = TimeDelta::FromSecondsD(1.0 / 8); | |
1067 const TimeDelta kMinFrameDuration = TimeDelta::FromMilliseconds(1); | |
1068 return std::min(kMaxFrameDuration, std::max(predicted_frame_duration, | |
1069 kMinFrameDuration)); | |
1070 } | |
1071 | |
1072 #if BUILDFLAG(RTC_USE_H264) | |
1073 | |
1074 // static | |
1075 void H264Encoder::ShutdownEncoder(std::unique_ptr<base::Thread> encoding_thread, | |
1076 ScopedISVCEncoderPtr encoder) { | |
1077 DCHECK(encoding_thread->IsRunning()); | |
1078 encoding_thread->Stop(); | |
1079 // Both |encoding_thread| and |encoder| will be destroyed at end-of-scope. | |
1080 } | |
1081 | |
1082 H264Encoder::H264Encoder( | |
1083 const VideoTrackRecorder::OnEncodedVideoCB& on_encoded_video_callback, | |
1084 int32_t bits_per_second) | |
1085 : Encoder(on_encoded_video_callback, bits_per_second) { | |
1086 DCHECK(encoding_thread_->IsRunning()); | |
1087 } | |
1088 | |
1089 H264Encoder::~H264Encoder() { | |
1090 main_task_runner_->PostTask(FROM_HERE, | |
1091 base::Bind(&H264Encoder::ShutdownEncoder, | |
1092 base::Passed(&encoding_thread_), | |
1093 base::Passed(&openh264_encoder_))); | |
1094 } | |
1095 | |
1096 void H264Encoder::EncodeOnEncodingTaskRunner( | |
1097 scoped_refptr<VideoFrame> frame, | |
1098 base::TimeTicks capture_timestamp) { | |
1099 TRACE_EVENT0("video", "H264Encoder::EncodeOnEncodingTaskRunner"); | |
1100 DCHECK(encoding_task_runner_->BelongsToCurrentThread()); | |
1101 | |
1102 const gfx::Size frame_size = frame->visible_rect().size(); | |
1103 if (!openh264_encoder_ || configured_size_ != frame_size) { | |
1104 ConfigureEncoderOnEncodingTaskRunner(frame_size); | |
1105 first_frame_timestamp_ = capture_timestamp; | |
1106 } | |
1107 | |
1108 SSourcePicture picture = {}; | |
1109 picture.iPicWidth = frame_size.width(); | |
1110 picture.iPicHeight = frame_size.height(); | |
1111 picture.iColorFormat = EVideoFormatType::videoFormatI420; | |
1112 picture.uiTimeStamp = | |
1113 (capture_timestamp - first_frame_timestamp_).InMilliseconds(); | |
1114 picture.iStride[0] = frame->stride(VideoFrame::kYPlane); | |
1115 picture.iStride[1] = frame->stride(VideoFrame::kUPlane); | |
1116 picture.iStride[2] = frame->stride(VideoFrame::kVPlane); | |
1117 picture.pData[0] = frame->visible_data(VideoFrame::kYPlane); | |
1118 picture.pData[1] = frame->visible_data(VideoFrame::kUPlane); | |
1119 picture.pData[2] = frame->visible_data(VideoFrame::kVPlane); | |
1120 | |
1121 SFrameBSInfo info = {}; | |
1122 if (openh264_encoder_->EncodeFrame(&picture, &info) != cmResultSuccess) { | |
1123 NOTREACHED() << "OpenH264 encoding failed"; | |
1124 return; | |
1125 } | |
1126 const media::WebmMuxer::VideoParameters video_params(frame); | |
1127 frame = nullptr; | |
1128 | |
1129 std::unique_ptr<std::string> data(new std::string); | |
1130 const uint8_t kNALStartCode[4] = {0, 0, 0, 1}; | |
1131 for (int layer = 0; layer < info.iLayerNum; ++layer) { | |
1132 const SLayerBSInfo& layerInfo = info.sLayerInfo[layer]; | |
1133 // Iterate NAL units making up this layer, noting fragments. | |
1134 size_t layer_len = 0; | |
1135 for (int nal = 0; nal < layerInfo.iNalCount; ++nal) { | |
1136 // The following DCHECKs make sure that the header of each NAL unit is OK. | |
1137 DCHECK_GE(layerInfo.pNalLengthInByte[nal], 4); | |
1138 DCHECK_EQ(kNALStartCode[0], layerInfo.pBsBuf[layer_len+0]); | |
1139 DCHECK_EQ(kNALStartCode[1], layerInfo.pBsBuf[layer_len+1]); | |
1140 DCHECK_EQ(kNALStartCode[2], layerInfo.pBsBuf[layer_len+2]); | |
1141 DCHECK_EQ(kNALStartCode[3], layerInfo.pBsBuf[layer_len+3]); | |
1142 | |
1143 layer_len += layerInfo.pNalLengthInByte[nal]; | |
1144 } | |
1145 // Copy the entire layer's data (including NAL start codes). | |
1146 data->append(reinterpret_cast<char*>(layerInfo.pBsBuf), layer_len); | |
1147 } | |
1148 | |
1149 const bool is_key_frame = info.eFrameType == videoFrameTypeIDR; | |
1150 origin_task_runner_->PostTask( | |
1151 FROM_HERE, base::Bind(OnFrameEncodeCompleted, on_encoded_video_callback_, | |
1152 video_params, base::Passed(&data), nullptr, | |
1153 capture_timestamp, is_key_frame)); | |
1154 } | |
1155 | |
1156 void H264Encoder::ConfigureEncoderOnEncodingTaskRunner(const gfx::Size& size) { | |
1157 DCHECK(encoding_task_runner_->BelongsToCurrentThread()); | |
1158 ISVCEncoder* temp_encoder = nullptr; | |
1159 if (WelsCreateSVCEncoder(&temp_encoder) != 0) { | |
1160 NOTREACHED() << "Failed to create OpenH264 encoder"; | |
1161 return; | |
1162 } | |
1163 openh264_encoder_.reset(temp_encoder); | |
1164 configured_size_ = size; | |
1165 | |
1166 #if DCHECK_IS_ON() | |
1167 int trace_level = WELS_LOG_INFO; | |
1168 openh264_encoder_->SetOption(ENCODER_OPTION_TRACE_LEVEL, &trace_level); | |
1169 #endif | |
1170 | |
1171 SEncParamExt init_params; | |
1172 openh264_encoder_->GetDefaultParams(&init_params); | |
1173 init_params.iUsageType = CAMERA_VIDEO_REAL_TIME; | |
1174 | |
1175 DCHECK_EQ(AUTO_REF_PIC_COUNT, init_params.iNumRefFrame); | |
1176 DCHECK(!init_params.bSimulcastAVC); | |
1177 | |
1178 init_params.uiIntraPeriod = 100; // Same as for VpxEncoder. | |
1179 init_params.iPicWidth = size.width(); | |
1180 init_params.iPicHeight = size.height(); | |
1181 | |
1182 DCHECK_EQ(RC_QUALITY_MODE, init_params.iRCMode); | |
1183 DCHECK_EQ(0, init_params.iPaddingFlag); | |
1184 DCHECK_EQ(UNSPECIFIED_BIT_RATE, init_params.iTargetBitrate); | |
1185 DCHECK_EQ(UNSPECIFIED_BIT_RATE, init_params.iMaxBitrate); | |
1186 if (bits_per_second_ > 0) { | |
1187 init_params.iRCMode = RC_BITRATE_MODE; | |
1188 init_params.iTargetBitrate = bits_per_second_; | |
1189 } else { | |
1190 init_params.iRCMode = RC_OFF_MODE; | |
1191 } | |
1192 | |
1193 // Threading model: Set to 1 due to https://crbug.com/583348. | |
1194 init_params.iMultipleThreadIdc = 1; | |
1195 | |
1196 // TODO(mcasas): consider reducing complexity if there are few CPUs available. | |
1197 init_params.iComplexityMode = MEDIUM_COMPLEXITY; | |
1198 DCHECK(!init_params.bEnableDenoise); | |
1199 DCHECK(init_params.bEnableFrameSkip); | |
1200 | |
1201 // The base spatial layer 0 is the only one we use. | |
1202 DCHECK_EQ(1, init_params.iSpatialLayerNum); | |
1203 init_params.sSpatialLayers[0].iVideoWidth = init_params.iPicWidth; | |
1204 init_params.sSpatialLayers[0].iVideoHeight = init_params.iPicHeight; | |
1205 init_params.sSpatialLayers[0].iSpatialBitrate = init_params.iTargetBitrate; | |
1206 | |
1207 // When uiSliceMode = SM_FIXEDSLCNUM_SLICE, uiSliceNum = 0 means auto design | |
1208 // it with cpu core number. | |
1209 // TODO(sprang): Set to 0 when we understand why the rate controller borks | |
1210 // when uiSliceNum > 1. See https://github.com/cisco/openh264/issues/2591 | |
1211 init_params.sSpatialLayers[0].sSliceArgument.uiSliceNum = 1; | |
1212 init_params.sSpatialLayers[0].sSliceArgument.uiSliceMode = | |
1213 SM_FIXEDSLCNUM_SLICE; | |
1214 | |
1215 if (openh264_encoder_->InitializeExt(&init_params) != cmResultSuccess) { | |
1216 NOTREACHED() << "Failed to initialize OpenH264 encoder"; | |
1217 return; | |
1218 } | |
1219 | |
1220 int pixel_format = EVideoFormatType::videoFormatI420; | |
1221 openh264_encoder_->SetOption(ENCODER_OPTION_DATAFORMAT, &pixel_format); | |
1222 } | |
1223 #endif //#if BUILDFLAG(RTC_USE_H264) | |
1224 | |
1225 } // anonymous namespace | |
1226 | |
1227 // static | |
1228 VideoTrackRecorder::CodecId VideoTrackRecorder::GetPreferredCodecId() { | 308 VideoTrackRecorder::CodecId VideoTrackRecorder::GetPreferredCodecId() { |
1229 return GetCodecEnumerator()->GetPreferredCodecId(); | 309 return GetCodecEnumerator()->GetPreferredCodecId(); |
1230 } | 310 } |
1231 | 311 |
1232 VideoTrackRecorder::VideoTrackRecorder( | 312 VideoTrackRecorder::VideoTrackRecorder( |
1233 CodecId codec, | 313 CodecId codec, |
1234 const blink::WebMediaStreamTrack& track, | 314 const blink::WebMediaStreamTrack& track, |
1235 const OnEncodedVideoCB& on_encoded_video_callback, | 315 const OnEncodedVideoCB& on_encoded_video_callback, |
1236 int32_t bits_per_second) | 316 int32_t bits_per_second) |
1237 : track_(track), | 317 : track_(track), |
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1330 if (paused_before_init_) | 410 if (paused_before_init_) |
1331 encoder_->SetPaused(paused_before_init_); | 411 encoder_->SetPaused(paused_before_init_); |
1332 | 412 |
1333 // StartFrameEncode() will be called on Render IO thread. | 413 // StartFrameEncode() will be called on Render IO thread. |
1334 MediaStreamVideoSink::ConnectToTrack( | 414 MediaStreamVideoSink::ConnectToTrack( |
1335 track_, | 415 track_, |
1336 base::Bind(&VideoTrackRecorder::Encoder::StartFrameEncode, encoder_), | 416 base::Bind(&VideoTrackRecorder::Encoder::StartFrameEncode, encoder_), |
1337 false); | 417 false); |
1338 } | 418 } |
1339 | 419 |
1340 bool VideoTrackRecorder::CanEncodeAlphaChannelForTesting() { | |
1341 DCHECK(encoder_); | |
1342 return encoder_->CanEncodeAlphaChannel(); | |
1343 } | |
1344 | |
1345 } // namespace content | 420 } // namespace content |
OLD | NEW |