OLD | NEW |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/renderer/media_recorder/video_track_recorder.h" | 5 #include "content/renderer/media_recorder/video_track_recorder.h" |
6 | 6 |
7 #include <utility> | 7 #include <utility> |
8 | 8 |
9 #include "base/bind.h" | 9 #include "base/bind.h" |
10 #include "base/logging.h" | 10 #include "base/logging.h" |
11 #include "base/macros.h" | 11 #include "base/macros.h" |
12 #include "base/memory/ptr_util.h" | 12 #include "base/memory/ptr_util.h" |
13 #include "base/sys_info.h" | |
14 #include "base/task_runner_util.h" | 13 #include "base/task_runner_util.h" |
15 #include "base/threading/thread.h" | 14 #include "base/threading/thread.h" |
16 #include "base/threading/thread_task_runner_handle.h" | 15 #include "base/threading/thread_task_runner_handle.h" |
17 #include "base/time/time.h" | 16 #include "base/time/time.h" |
18 #include "base/trace_event/trace_event.h" | |
19 #include "cc/paint/skia_paint_canvas.h" | 17 #include "cc/paint/skia_paint_canvas.h" |
20 #include "content/renderer/media/renderer_gpu_video_accelerator_factories.h" | 18 #include "content/renderer/media/renderer_gpu_video_accelerator_factories.h" |
| 19 #include "content/renderer/media_recorder/vea_encoder.h" |
| 20 #include "content/renderer/media_recorder/vpx_encoder.h" |
21 #include "content/renderer/render_thread_impl.h" | 21 #include "content/renderer/render_thread_impl.h" |
22 #include "media/base/bind_to_current_loop.h" | 22 #include "media/base/bind_to_current_loop.h" |
23 #include "media/base/video_frame.h" | 23 #include "media/base/video_frame.h" |
24 #include "media/base/video_util.h" | 24 #include "media/base/video_util.h" |
25 #include "media/filters/context_3d.h" | 25 #include "media/filters/context_3d.h" |
26 #include "media/renderers/skcanvas_video_renderer.h" | 26 #include "media/renderers/skcanvas_video_renderer.h" |
27 #include "services/ui/public/cpp/gpu/context_provider_command_buffer.h" | 27 #include "services/ui/public/cpp/gpu/context_provider_command_buffer.h" |
28 #include "skia/ext/platform_canvas.h" | 28 #include "skia/ext/platform_canvas.h" |
29 #include "third_party/libyuv/include/libyuv.h" | 29 #include "third_party/libyuv/include/libyuv.h" |
30 #include "ui/gfx/geometry/size.h" | 30 #include "ui/gfx/geometry/size.h" |
31 | 31 |
32 #if BUILDFLAG(RTC_USE_H264) | 32 #if BUILDFLAG(RTC_USE_H264) |
33 #include "third_party/openh264/src/codec/api/svc/codec_api.h" | 33 #include "content/renderer/media_recorder/h264_encoder.h" |
34 #include "third_party/openh264/src/codec/api/svc/codec_app_def.h" | |
35 #include "third_party/openh264/src/codec/api/svc/codec_def.h" | |
36 #endif // #if BUILDFLAG(RTC_USE_H264) | 34 #endif // #if BUILDFLAG(RTC_USE_H264) |
37 | 35 |
38 extern "C" { | |
39 // VPX_CODEC_DISABLE_COMPAT excludes parts of the libvpx API that provide | |
40 // backwards compatibility for legacy applications using the library. | |
41 #define VPX_CODEC_DISABLE_COMPAT 1 | |
42 #include "third_party/libvpx/source/libvpx/vpx/vp8cx.h" | |
43 #include "third_party/libvpx/source/libvpx/vpx/vpx_encoder.h" | |
44 } | |
45 | |
46 using media::VideoFrame; | 36 using media::VideoFrame; |
47 using media::VideoFrameMetadata; | |
48 using video_track_recorder::kVEAEncoderMinResolutionWidth; | 37 using video_track_recorder::kVEAEncoderMinResolutionWidth; |
49 using video_track_recorder::kVEAEncoderMinResolutionHeight; | 38 using video_track_recorder::kVEAEncoderMinResolutionHeight; |
50 | 39 |
51 namespace content { | 40 namespace content { |
52 | 41 |
53 namespace { | 42 namespace { |
54 | 43 |
55 // HW encoders expect a nonzero bitrate, so |kVEADefaultBitratePerPixel| is used | |
56 // to estimate bits per second for ~30 fps with ~1/16 compression rate. | |
57 const int kVEADefaultBitratePerPixel = 2; | |
58 // Number of output buffers used to copy the encoded data coming from HW | |
59 // encoders. | |
60 const int kVEAEncoderOutputBufferCount = 4; | |
61 | |
62 using CodecId = VideoTrackRecorder::CodecId; | 44 using CodecId = VideoTrackRecorder::CodecId; |
63 | 45 |
64 static const struct { | 46 static const struct { |
65 CodecId codec_id; | 47 CodecId codec_id; |
66 media::VideoCodecProfile min_profile; | 48 media::VideoCodecProfile min_profile; |
67 media::VideoCodecProfile max_profile; | 49 media::VideoCodecProfile max_profile; |
68 } kPreferredCodecIdAndVEAProfiles[] = { | 50 } kPreferredCodecIdAndVEAProfiles[] = { |
69 {CodecId::VP8, media::VP8PROFILE_MIN, media::VP8PROFILE_MAX}, | 51 {CodecId::VP8, media::VP8PROFILE_MIN, media::VP8PROFILE_MAX}, |
70 {CodecId::VP9, media::VP9PROFILE_MIN, media::VP9PROFILE_MAX}, | 52 {CodecId::VP9, media::VP9PROFILE_MIN, media::VP9PROFILE_MAX}, |
71 #if BUILDFLAG(RTC_USE_H264) | 53 #if BUILDFLAG(RTC_USE_H264) |
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
153 | 135 |
154 media::VideoCodecProfile CodecEnumerator::CodecIdToVEAProfile(CodecId codec) { | 136 media::VideoCodecProfile CodecEnumerator::CodecIdToVEAProfile(CodecId codec) { |
155 const auto profile = codec_id_to_profile_.find(codec); | 137 const auto profile = codec_id_to_profile_.find(codec); |
156 return profile == codec_id_to_profile_.end() | 138 return profile == codec_id_to_profile_.end() |
157 ? media::VIDEO_CODEC_PROFILE_UNKNOWN | 139 ? media::VIDEO_CODEC_PROFILE_UNKNOWN |
158 : profile->second; | 140 : profile->second; |
159 } | 141 } |
160 | 142 |
161 } // anonymous namespace | 143 } // anonymous namespace |
162 | 144 |
163 // Base class to describe a generic Encoder, encapsulating all actual encoder | 145 VideoTrackRecorder::Encoder::Encoder( |
164 // (re)configurations, encoding and delivery of received frames. This class is | 146 const OnEncodedVideoCB& on_encoded_video_callback, |
165 // ref-counted to allow the MediaStreamVideoTrack to hold a reference to it (via | 147 int32_t bits_per_second, |
166 // the callback that MediaStreamVideoSink passes along) and to jump back and | 148 scoped_refptr<base::SingleThreadTaskRunner> encoding_task_runner) |
167 // forth to an internal encoder thread. Moreover, this class: | 149 : main_task_runner_(base::ThreadTaskRunnerHandle::Get()), |
168 // - is created on its parent's thread (usually the main Render thread), | 150 encoding_task_runner_(encoding_task_runner), |
169 // that is, |main_task_runner_|. | 151 paused_(false), |
170 // - receives VideoFrames on |origin_task_runner_| and runs OnEncodedVideoCB on | 152 on_encoded_video_callback_(on_encoded_video_callback), |
171 // that thread as well. This task runner is cached on first frame arrival, and | 153 bits_per_second_(bits_per_second) { |
172 // is supposed to be the render IO thread (but this is not enforced); | 154 DCHECK(!on_encoded_video_callback_.is_null()); |
173 // - uses an internal |encoding_task_runner_| for actual encoder interactions, | 155 if (encoding_task_runner_) |
174 // namely configuration, encoding (which might take some time) and destruction. | 156 return; |
175 // This task runner can be passed on the creation. If nothing is passed, a new | 157 encoding_thread_.reset(new base::Thread("EncodingThread")); |
176 // encoding thread is created and used. | 158 encoding_thread_->Start(); |
177 class VideoTrackRecorder::Encoder : public base::RefCountedThreadSafe<Encoder> { | 159 encoding_task_runner_ = encoding_thread_->task_runner(); |
178 public: | 160 } |
179 Encoder(const OnEncodedVideoCB& on_encoded_video_callback, | |
180 int32_t bits_per_second, | |
181 scoped_refptr<base::SingleThreadTaskRunner> encoding_task_runner = | |
182 nullptr) | |
183 : main_task_runner_(base::ThreadTaskRunnerHandle::Get()), | |
184 encoding_task_runner_(encoding_task_runner), | |
185 paused_(false), | |
186 on_encoded_video_callback_(on_encoded_video_callback), | |
187 bits_per_second_(bits_per_second) { | |
188 DCHECK(!on_encoded_video_callback_.is_null()); | |
189 if (encoding_task_runner_) | |
190 return; | |
191 encoding_thread_.reset(new base::Thread("EncodingThread")); | |
192 encoding_thread_->Start(); | |
193 encoding_task_runner_ = encoding_thread_->task_runner(); | |
194 } | |
195 | 161 |
196 // Start encoding |frame|, returning via |on_encoded_video_callback_|. This | 162 VideoTrackRecorder::Encoder::~Encoder() { |
197 // call will also trigger an encode configuration upon first frame arrival | 163 main_task_runner_->DeleteSoon(FROM_HERE, video_renderer_.release()); |
198 // or parameter change, and an EncodeOnEncodingTaskRunner() to actually | 164 } |
199 // encode the frame. If the |frame|'s data is not directly available (e.g. | |
200 // it's a texture) then RetrieveFrameOnMainThread() is called, and if even | |
201 // that fails, black frames are sent instead. | |
202 void StartFrameEncode(const scoped_refptr<VideoFrame>& frame, | |
203 base::TimeTicks capture_timestamp); | |
204 void RetrieveFrameOnMainThread(const scoped_refptr<VideoFrame>& video_frame, | |
205 base::TimeTicks capture_timestamp); | |
206 | |
207 void SetPaused(bool paused); | |
208 virtual bool CanEncodeAlphaChannel() { return false; } | |
209 | |
210 protected: | |
211 friend class base::RefCountedThreadSafe<Encoder>; | |
212 virtual ~Encoder() { | |
213 main_task_runner_->DeleteSoon(FROM_HERE, video_renderer_.release()); | |
214 } | |
215 | |
216 virtual void EncodeOnEncodingTaskRunner( | |
217 scoped_refptr<VideoFrame> frame, | |
218 base::TimeTicks capture_timestamp) = 0; | |
219 | |
220 // Used to shutdown properly on the same thread we were created. | |
221 const scoped_refptr<base::SingleThreadTaskRunner> main_task_runner_; | |
222 | |
223 // Task runner where frames to encode and reply callbacks must happen. | |
224 scoped_refptr<base::SingleThreadTaskRunner> origin_task_runner_; | |
225 | |
226 // Task runner where encoding interactions happen. | |
227 scoped_refptr<base::SingleThreadTaskRunner> encoding_task_runner_; | |
228 | |
229 // Optional thread for encoding. Active for the lifetime of VpxEncoder. | |
230 std::unique_ptr<base::Thread> encoding_thread_; | |
231 | |
232 // While |paused_|, frames are not encoded. Used only from |encoding_thread_|. | |
233 bool paused_; | |
234 | |
235 // This callback should be exercised on IO thread. | |
236 const OnEncodedVideoCB on_encoded_video_callback_; | |
237 | |
238 // Target bitrate for video encoding. If 0, a standard bitrate is used. | |
239 const int32_t bits_per_second_; | |
240 | |
241 // Used to retrieve incoming opaque VideoFrames (i.e. VideoFrames backed by | |
242 // textures). Created on-demand on |main_task_runner_|. | |
243 std::unique_ptr<media::SkCanvasVideoRenderer> video_renderer_; | |
244 SkBitmap bitmap_; | |
245 std::unique_ptr<cc::PaintCanvas> canvas_; | |
246 | |
247 DISALLOW_COPY_AND_ASSIGN(Encoder); | |
248 }; | |
249 | 165 |
250 void VideoTrackRecorder::Encoder::StartFrameEncode( | 166 void VideoTrackRecorder::Encoder::StartFrameEncode( |
251 const scoped_refptr<VideoFrame>& video_frame, | 167 const scoped_refptr<VideoFrame>& video_frame, |
252 base::TimeTicks capture_timestamp) { | 168 base::TimeTicks capture_timestamp) { |
253 // Cache the thread sending frames on first frame arrival. | 169 // Cache the thread sending frames on first frame arrival. |
254 if (!origin_task_runner_.get()) | 170 if (!origin_task_runner_.get()) |
255 origin_task_runner_ = base::ThreadTaskRunnerHandle::Get(); | 171 origin_task_runner_ = base::ThreadTaskRunnerHandle::Get(); |
256 DCHECK(origin_task_runner_->BelongsToCurrentThread()); | 172 DCHECK(origin_task_runner_->BelongsToCurrentThread()); |
257 if (paused_) | 173 if (paused_) |
258 return; | 174 return; |
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
354 DLOG(ERROR) << "Error converting frame to I420"; | 270 DLOG(ERROR) << "Error converting frame to I420"; |
355 return; | 271 return; |
356 } | 272 } |
357 } | 273 } |
358 | 274 |
359 encoding_task_runner_->PostTask( | 275 encoding_task_runner_->PostTask( |
360 FROM_HERE, base::Bind(&Encoder::EncodeOnEncodingTaskRunner, this, frame, | 276 FROM_HERE, base::Bind(&Encoder::EncodeOnEncodingTaskRunner, this, frame, |
361 capture_timestamp)); | 277 capture_timestamp)); |
362 } | 278 } |
363 | 279 |
364 void VideoTrackRecorder::Encoder::SetPaused(bool paused) { | 280 // static |
365 if (!encoding_task_runner_->BelongsToCurrentThread()) { | 281 void VideoTrackRecorder::Encoder::OnFrameEncodeCompleted( |
366 encoding_task_runner_->PostTask( | |
367 FROM_HERE, base::Bind(&Encoder::SetPaused, this, paused)); | |
368 return; | |
369 } | |
370 paused_ = paused; | |
371 } | |
372 | |
373 namespace { | |
374 | |
375 // Originally from remoting/codec/scoped_vpx_codec.h. | |
376 // TODO(mcasas): Refactor into a common location. | |
377 struct VpxCodecDeleter { | |
378 void operator()(vpx_codec_ctx_t* codec) { | |
379 if (!codec) | |
380 return; | |
381 vpx_codec_err_t ret = vpx_codec_destroy(codec); | |
382 CHECK_EQ(ret, VPX_CODEC_OK); | |
383 delete codec; | |
384 } | |
385 }; | |
386 typedef std::unique_ptr<vpx_codec_ctx_t, VpxCodecDeleter> ScopedVpxCodecCtxPtr; | |
387 | |
388 static void OnFrameEncodeCompleted( | |
389 const VideoTrackRecorder::OnEncodedVideoCB& on_encoded_video_cb, | 282 const VideoTrackRecorder::OnEncodedVideoCB& on_encoded_video_cb, |
390 const media::WebmMuxer::VideoParameters& params, | 283 const media::WebmMuxer::VideoParameters& params, |
391 std::unique_ptr<std::string> data, | 284 std::unique_ptr<std::string> data, |
392 std::unique_ptr<std::string> alpha_data, | 285 std::unique_ptr<std::string> alpha_data, |
393 base::TimeTicks capture_timestamp, | 286 base::TimeTicks capture_timestamp, |
394 bool keyframe) { | 287 bool keyframe) { |
395 DVLOG(1) << (keyframe ? "" : "non ") << "keyframe "<< data->length() << "B, " | 288 DVLOG(1) << (keyframe ? "" : "non ") << "keyframe "<< data->length() << "B, " |
396 << capture_timestamp << " ms"; | 289 << capture_timestamp << " ms"; |
397 on_encoded_video_cb.Run(params, std::move(data), std::move(alpha_data), | 290 on_encoded_video_cb.Run(params, std::move(data), std::move(alpha_data), |
398 capture_timestamp, keyframe); | 291 capture_timestamp, keyframe); |
399 } | 292 } |
400 | 293 |
401 static int GetNumberOfThreadsForEncoding() { | 294 void VideoTrackRecorder::Encoder::SetPaused(bool paused) { |
402 // Do not saturate CPU utilization just for encoding. On a lower-end system | 295 if (!encoding_task_runner_->BelongsToCurrentThread()) { |
403 // with only 1 or 2 cores, use only one thread for encoding. On systems with | 296 encoding_task_runner_->PostTask( |
404 // more cores, allow half of the cores to be used for encoding. | 297 FROM_HERE, base::Bind(&Encoder::SetPaused, this, paused)); |
405 return std::min(8, (base::SysInfo::NumberOfProcessors() + 1) / 2); | 298 return; |
| 299 } |
| 300 paused_ = paused; |
406 } | 301 } |
407 | 302 |
408 // Class encapsulating VideoEncodeAccelerator interactions. | 303 bool VideoTrackRecorder::Encoder::CanEncodeAlphaChannel() { |
409 // This class is created and destroyed in its owner thread. All other methods | 304 return false; |
410 // operate on the task runner pointed by GpuFactories. | |
411 class VEAEncoder final : public VideoTrackRecorder::Encoder, | |
412 public media::VideoEncodeAccelerator::Client { | |
413 public: | |
414 VEAEncoder( | |
415 const VideoTrackRecorder::OnEncodedVideoCB& on_encoded_video_callback, | |
416 const VideoTrackRecorder::OnErrorCB& on_error_callback, | |
417 int32_t bits_per_second, | |
418 media::VideoCodecProfile codec, | |
419 const gfx::Size& size); | |
420 | |
421 // media::VideoEncodeAccelerator::Client implementation. | |
422 void RequireBitstreamBuffers(unsigned int input_count, | |
423 const gfx::Size& input_coded_size, | |
424 size_t output_buffer_size) override; | |
425 void BitstreamBufferReady(int32_t bitstream_buffer_id, | |
426 size_t payload_size, | |
427 bool key_frame, | |
428 base::TimeDelta timestamp) override; | |
429 void NotifyError(media::VideoEncodeAccelerator::Error error) override; | |
430 | |
431 private: | |
432 using VideoFrameAndTimestamp = | |
433 std::pair<scoped_refptr<media::VideoFrame>, base::TimeTicks>; | |
434 using VideoParamsAndTimestamp = | |
435 std::pair<media::WebmMuxer::VideoParameters, base::TimeTicks>; | |
436 | |
437 void UseOutputBitstreamBufferId(int32_t bitstream_buffer_id); | |
438 void FrameFinished(std::unique_ptr<base::SharedMemory> shm); | |
439 | |
440 // VideoTrackRecorder::Encoder implementation. | |
441 ~VEAEncoder() override; | |
442 void EncodeOnEncodingTaskRunner(scoped_refptr<VideoFrame> frame, | |
443 base::TimeTicks capture_timestamp) override; | |
444 | |
445 void ConfigureEncoderOnEncodingTaskRunner(const gfx::Size& size); | |
446 | |
447 void DestroyOnEncodingTaskRunner(base::WaitableEvent* async_waiter); | |
448 | |
449 media::GpuVideoAcceleratorFactories* const gpu_factories_; | |
450 | |
451 const media::VideoCodecProfile codec_; | |
452 | |
453 // The underlying VEA to perform encoding on. | |
454 std::unique_ptr<media::VideoEncodeAccelerator> video_encoder_; | |
455 | |
456 // Shared memory buffers for output with the VEA. | |
457 std::vector<std::unique_ptr<base::SharedMemory>> output_buffers_; | |
458 | |
459 // Shared memory buffers for output with the VEA as FIFO. | |
460 std::queue<std::unique_ptr<base::SharedMemory>> input_buffers_; | |
461 | |
462 // Tracks error status. | |
463 bool error_notified_; | |
464 | |
465 // Tracks the last frame that we delay the encode. | |
466 std::unique_ptr<VideoFrameAndTimestamp> last_frame_; | |
467 | |
468 // Size used to initialize encoder. | |
469 gfx::Size input_visible_size_; | |
470 | |
471 // Coded size that encoder requests as input. | |
472 gfx::Size vea_requested_input_coded_size_; | |
473 | |
474 // Frames and corresponding timestamps in encode as FIFO. | |
475 std::queue<VideoParamsAndTimestamp> frames_in_encode_; | |
476 | |
477 // This callback can be exercised on any thread. | |
478 const VideoTrackRecorder::OnErrorCB on_error_callback_; | |
479 }; | |
480 | |
481 // Class encapsulating all libvpx interactions for VP8/VP9 encoding. | |
482 class VpxEncoder final : public VideoTrackRecorder::Encoder { | |
483 public: | |
484 static void ShutdownEncoder(std::unique_ptr<base::Thread> encoding_thread, | |
485 ScopedVpxCodecCtxPtr encoder); | |
486 | |
487 VpxEncoder( | |
488 bool use_vp9, | |
489 const VideoTrackRecorder::OnEncodedVideoCB& on_encoded_video_callback, | |
490 int32_t bits_per_second); | |
491 | |
492 private: | |
493 // VideoTrackRecorder::Encoder implementation. | |
494 ~VpxEncoder() override; | |
495 void EncodeOnEncodingTaskRunner(scoped_refptr<VideoFrame> frame, | |
496 base::TimeTicks capture_timestamp) override; | |
497 bool CanEncodeAlphaChannel() override { return true; } | |
498 | |
499 void ConfigureEncoderOnEncodingTaskRunner(const gfx::Size& size, | |
500 vpx_codec_enc_cfg_t* codec_config, | |
501 ScopedVpxCodecCtxPtr* encoder); | |
502 void DoEncode(vpx_codec_ctx_t* const encoder, | |
503 const gfx::Size& frame_size, | |
504 uint8_t* const data, | |
505 uint8_t* const y_plane, | |
506 int y_stride, | |
507 uint8_t* const u_plane, | |
508 int u_stride, | |
509 uint8_t* const v_plane, | |
510 int v_stride, | |
511 const base::TimeDelta& duration, | |
512 bool force_keyframe, | |
513 std::string* const output_data, | |
514 bool* const keyframe); | |
515 | |
516 // Returns true if |codec_config| has been filled in at least once. | |
517 bool IsInitialized(const vpx_codec_enc_cfg_t& codec_config) const; | |
518 | |
519 // Estimate the frame duration from |frame| and |last_frame_timestamp_|. | |
520 base::TimeDelta EstimateFrameDuration(const scoped_refptr<VideoFrame>& frame); | |
521 | |
522 // Force usage of VP9 for encoding, instead of VP8 which is the default. | |
523 const bool use_vp9_; | |
524 | |
525 // VPx internal objects: configuration and encoder. |encoder_| is a special | |
526 // scoped pointer to guarantee proper destruction, particularly when | |
527 // reconfiguring due to parameters change. Only used on |encoding_thread_|. | |
528 vpx_codec_enc_cfg_t codec_config_; | |
529 ScopedVpxCodecCtxPtr encoder_; | |
530 | |
531 vpx_codec_enc_cfg_t alpha_codec_config_; | |
532 ScopedVpxCodecCtxPtr alpha_encoder_; | |
533 | |
534 std::vector<uint8_t> alpha_dummy_planes_; | |
535 size_t v_plane_offset_; | |
536 size_t u_plane_stride_; | |
537 size_t v_plane_stride_; | |
538 bool last_frame_had_alpha_ = false; | |
539 | |
540 // The |VideoFrame::timestamp()| of the last encoded frame. This is used to | |
541 // predict the duration of the next frame. Only used on |encoding_thread_|. | |
542 base::TimeDelta last_frame_timestamp_; | |
543 | |
544 DISALLOW_COPY_AND_ASSIGN(VpxEncoder); | |
545 }; | |
546 | |
547 #if BUILDFLAG(RTC_USE_H264) | |
548 | |
549 struct ISVCEncoderDeleter { | |
550 void operator()(ISVCEncoder* codec) { | |
551 if (!codec) | |
552 return; | |
553 const int uninit_ret = codec->Uninitialize(); | |
554 CHECK_EQ(cmResultSuccess, uninit_ret); | |
555 WelsDestroySVCEncoder(codec); | |
556 } | |
557 }; | |
558 typedef std::unique_ptr<ISVCEncoder, ISVCEncoderDeleter> ScopedISVCEncoderPtr; | |
559 | |
560 // Class encapsulating all openh264 interactions for H264 encoding. | |
561 class H264Encoder final : public VideoTrackRecorder::Encoder { | |
562 public: | |
563 static void ShutdownEncoder(std::unique_ptr<base::Thread> encoding_thread, | |
564 ScopedISVCEncoderPtr encoder); | |
565 | |
566 H264Encoder( | |
567 const VideoTrackRecorder::OnEncodedVideoCB& on_encoded_video_callback, | |
568 int32_t bits_per_second); | |
569 | |
570 private: | |
571 // VideoTrackRecorder::Encoder implementation. | |
572 ~H264Encoder() override; | |
573 void EncodeOnEncodingTaskRunner(scoped_refptr<VideoFrame> frame, | |
574 base::TimeTicks capture_timestamp) override; | |
575 | |
576 void ConfigureEncoderOnEncodingTaskRunner(const gfx::Size& size); | |
577 | |
578 // |openh264_encoder_| is a special scoped pointer to guarantee proper | |
579 // destruction, also when reconfiguring due to parameters change. Only used on | |
580 // |encoding_thread_|. | |
581 gfx::Size configured_size_; | |
582 ScopedISVCEncoderPtr openh264_encoder_; | |
583 | |
584 // The |VideoFrame::timestamp()| of the first received frame. Only used on | |
585 // |encoding_thread_|. | |
586 base::TimeTicks first_frame_timestamp_; | |
587 | |
588 DISALLOW_COPY_AND_ASSIGN(H264Encoder); | |
589 }; | |
590 | |
591 #endif // #if BUILDFLAG(RTC_USE_H264) | |
592 | |
593 VEAEncoder::VEAEncoder( | |
594 const VideoTrackRecorder::OnEncodedVideoCB& on_encoded_video_callback, | |
595 const VideoTrackRecorder::OnErrorCB& on_error_callback, | |
596 int32_t bits_per_second, | |
597 media::VideoCodecProfile codec, | |
598 const gfx::Size& size) | |
599 : Encoder(on_encoded_video_callback, | |
600 bits_per_second > 0 ? bits_per_second | |
601 : size.GetArea() * kVEADefaultBitratePerPixel, | |
602 RenderThreadImpl::current()->GetGpuFactories()->GetTaskRunner()), | |
603 gpu_factories_(RenderThreadImpl::current()->GetGpuFactories()), | |
604 codec_(codec), | |
605 error_notified_(false), | |
606 on_error_callback_(on_error_callback) { | |
607 DCHECK(gpu_factories_); | |
608 DCHECK_GE(size.width(), kVEAEncoderMinResolutionWidth); | |
609 DCHECK_GE(size.height(), kVEAEncoderMinResolutionHeight); | |
610 | |
611 encoding_task_runner_->PostTask( | |
612 FROM_HERE, base::Bind(&VEAEncoder::ConfigureEncoderOnEncodingTaskRunner, | |
613 this, size)); | |
614 } | |
615 | |
616 VEAEncoder::~VEAEncoder() { | |
617 base::WaitableEvent release_waiter( | |
618 base::WaitableEvent::ResetPolicy::MANUAL, | |
619 base::WaitableEvent::InitialState::NOT_SIGNALED); | |
620 // base::Unretained is safe because the class will be alive until | |
621 // |release_waiter| is signaled. | |
622 // TODO(emircan): Consider refactoring media::VideoEncodeAccelerator to avoid | |
623 // using naked pointers and using DeleteSoon() here, see | |
624 // http://crbug.com/701627. | |
625 // It is currently unsafe because |video_encoder_| might be in use on another | |
626 // function on |encoding_task_runner_|, see http://crbug.com/701030. | |
627 encoding_task_runner_->PostTask( | |
628 FROM_HERE, base::Bind(&VEAEncoder::DestroyOnEncodingTaskRunner, | |
629 base::Unretained(this), &release_waiter)); | |
630 release_waiter.Wait(); | |
631 } | |
632 | |
633 void VEAEncoder::RequireBitstreamBuffers(unsigned int /*input_count*/, | |
634 const gfx::Size& input_coded_size, | |
635 size_t output_buffer_size) { | |
636 DVLOG(3) << __func__; | |
637 DCHECK(encoding_task_runner_->BelongsToCurrentThread()); | |
638 | |
639 vea_requested_input_coded_size_ = input_coded_size; | |
640 output_buffers_.clear(); | |
641 std::queue<std::unique_ptr<base::SharedMemory>>().swap(input_buffers_); | |
642 | |
643 for (int i = 0; i < kVEAEncoderOutputBufferCount; ++i) { | |
644 std::unique_ptr<base::SharedMemory> shm = | |
645 gpu_factories_->CreateSharedMemory(output_buffer_size); | |
646 if (shm) | |
647 output_buffers_.push_back(base::WrapUnique(shm.release())); | |
648 } | |
649 | |
650 for (size_t i = 0; i < output_buffers_.size(); ++i) | |
651 UseOutputBitstreamBufferId(i); | |
652 } | |
653 | |
654 void VEAEncoder::BitstreamBufferReady(int32_t bitstream_buffer_id, | |
655 size_t payload_size, | |
656 bool keyframe, | |
657 base::TimeDelta timestamp) { | |
658 DVLOG(3) << __func__; | |
659 DCHECK(encoding_task_runner_->BelongsToCurrentThread()); | |
660 | |
661 base::SharedMemory* output_buffer = | |
662 output_buffers_[bitstream_buffer_id].get(); | |
663 | |
664 std::unique_ptr<std::string> data(new std::string); | |
665 data->append(reinterpret_cast<char*>(output_buffer->memory()), payload_size); | |
666 | |
667 const auto front_frame = frames_in_encode_.front(); | |
668 frames_in_encode_.pop(); | |
669 origin_task_runner_->PostTask( | |
670 FROM_HERE, base::Bind(OnFrameEncodeCompleted, on_encoded_video_callback_, | |
671 front_frame.first, base::Passed(&data), nullptr, | |
672 front_frame.second, keyframe)); | |
673 UseOutputBitstreamBufferId(bitstream_buffer_id); | |
674 } | |
675 | |
676 void VEAEncoder::NotifyError(media::VideoEncodeAccelerator::Error error) { | |
677 DVLOG(3) << __func__; | |
678 DCHECK(encoding_task_runner_->BelongsToCurrentThread()); | |
679 on_error_callback_.Run(); | |
680 error_notified_ = true; | |
681 } | |
682 | |
683 void VEAEncoder::UseOutputBitstreamBufferId(int32_t bitstream_buffer_id) { | |
684 DVLOG(3) << __func__; | |
685 DCHECK(encoding_task_runner_->BelongsToCurrentThread()); | |
686 | |
687 video_encoder_->UseOutputBitstreamBuffer(media::BitstreamBuffer( | |
688 bitstream_buffer_id, output_buffers_[bitstream_buffer_id]->handle(), | |
689 output_buffers_[bitstream_buffer_id]->mapped_size())); | |
690 } | |
691 | |
692 void VEAEncoder::FrameFinished(std::unique_ptr<base::SharedMemory> shm) { | |
693 DVLOG(3) << __func__; | |
694 DCHECK(encoding_task_runner_->BelongsToCurrentThread()); | |
695 input_buffers_.push(std::move(shm)); | |
696 } | |
697 | |
698 void VEAEncoder::EncodeOnEncodingTaskRunner( | |
699 scoped_refptr<VideoFrame> frame, | |
700 base::TimeTicks capture_timestamp) { | |
701 DVLOG(3) << __func__; | |
702 DCHECK(encoding_task_runner_->BelongsToCurrentThread()); | |
703 | |
704 if (input_visible_size_ != frame->visible_rect().size() && video_encoder_) | |
705 video_encoder_.reset(); | |
706 | |
707 if (!video_encoder_) | |
708 ConfigureEncoderOnEncodingTaskRunner(frame->visible_rect().size()); | |
709 | |
710 if (error_notified_) { | |
711 DVLOG(3) << "An error occurred in VEA encoder"; | |
712 return; | |
713 } | |
714 | |
715 // Drop frames if there is no output buffers available. | |
716 if (output_buffers_.empty()) { | |
717 // TODO(emircan): Investigate if resetting encoder would help. | |
718 DVLOG(3) << "Might drop frame."; | |
719 last_frame_.reset( | |
720 new std::pair<scoped_refptr<VideoFrame>, base::TimeTicks>( | |
721 frame, capture_timestamp)); | |
722 return; | |
723 } | |
724 | |
725 // If first frame hasn't been encoded, do it first. | |
726 if (last_frame_) { | |
727 std::unique_ptr<VideoFrameAndTimestamp> last_frame(last_frame_.release()); | |
728 EncodeOnEncodingTaskRunner(last_frame->first, last_frame->second); | |
729 } | |
730 | |
731 // Lower resolutions may fall back to SW encoder in some platforms, i.e. Mac. | |
732 // In that case, the encoder expects more frames before returning result. | |
733 // Therefore, a copy is necessary to release the current frame. | |
734 // Only STORAGE_SHMEM backed frames can be shared with GPU process, therefore | |
735 // a copy is required for other storage types. | |
736 scoped_refptr<media::VideoFrame> video_frame = frame; | |
737 if (video_frame->storage_type() != VideoFrame::STORAGE_SHMEM || | |
738 vea_requested_input_coded_size_ != frame->coded_size() || | |
739 input_visible_size_.width() < kVEAEncoderMinResolutionWidth || | |
740 input_visible_size_.height() < kVEAEncoderMinResolutionHeight) { | |
741 // Create SharedMemory backed input buffers as necessary. These SharedMemory | |
742 // instances will be shared with GPU process. | |
743 std::unique_ptr<base::SharedMemory> input_buffer; | |
744 const size_t desired_mapped_size = media::VideoFrame::AllocationSize( | |
745 media::PIXEL_FORMAT_I420, vea_requested_input_coded_size_); | |
746 if (input_buffers_.empty()) { | |
747 input_buffer = gpu_factories_->CreateSharedMemory(desired_mapped_size); | |
748 } else { | |
749 do { | |
750 input_buffer = std::move(input_buffers_.front()); | |
751 input_buffers_.pop(); | |
752 } while (!input_buffers_.empty() && | |
753 input_buffer->mapped_size() < desired_mapped_size); | |
754 if (!input_buffer || input_buffer->mapped_size() < desired_mapped_size) | |
755 return; | |
756 } | |
757 | |
758 video_frame = media::VideoFrame::WrapExternalSharedMemory( | |
759 media::PIXEL_FORMAT_I420, vea_requested_input_coded_size_, | |
760 gfx::Rect(input_visible_size_), input_visible_size_, | |
761 reinterpret_cast<uint8_t*>(input_buffer->memory()), | |
762 input_buffer->mapped_size(), input_buffer->handle(), 0, | |
763 frame->timestamp()); | |
764 video_frame->AddDestructionObserver(media::BindToCurrentLoop( | |
765 base::Bind(&VEAEncoder::FrameFinished, this, | |
766 base::Passed(std::move(input_buffer))))); | |
767 libyuv::I420Copy(frame->visible_data(media::VideoFrame::kYPlane), | |
768 frame->stride(media::VideoFrame::kYPlane), | |
769 frame->visible_data(media::VideoFrame::kUPlane), | |
770 frame->stride(media::VideoFrame::kUPlane), | |
771 frame->visible_data(media::VideoFrame::kVPlane), | |
772 frame->stride(media::VideoFrame::kVPlane), | |
773 video_frame->visible_data(media::VideoFrame::kYPlane), | |
774 video_frame->stride(media::VideoFrame::kYPlane), | |
775 video_frame->visible_data(media::VideoFrame::kUPlane), | |
776 video_frame->stride(media::VideoFrame::kUPlane), | |
777 video_frame->visible_data(media::VideoFrame::kVPlane), | |
778 video_frame->stride(media::VideoFrame::kVPlane), | |
779 input_visible_size_.width(), input_visible_size_.height()); | |
780 } | |
781 frames_in_encode_.push(std::make_pair( | |
782 media::WebmMuxer::VideoParameters(frame), capture_timestamp)); | |
783 | |
784 video_encoder_->Encode(video_frame, false); | |
785 } | |
786 | |
787 void VEAEncoder::ConfigureEncoderOnEncodingTaskRunner(const gfx::Size& size) { | |
788 DVLOG(3) << __func__; | |
789 DCHECK(encoding_task_runner_->BelongsToCurrentThread()); | |
790 DCHECK(gpu_factories_->GetTaskRunner()->BelongsToCurrentThread()); | |
791 DCHECK_GT(bits_per_second_, 0); | |
792 | |
793 input_visible_size_ = size; | |
794 video_encoder_ = gpu_factories_->CreateVideoEncodeAccelerator(); | |
795 if (!video_encoder_ || | |
796 !video_encoder_->Initialize(media::PIXEL_FORMAT_I420, input_visible_size_, | |
797 codec_, bits_per_second_, this)) { | |
798 NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError); | |
799 } | |
800 } | |
801 | |
802 void VEAEncoder::DestroyOnEncodingTaskRunner( | |
803 base::WaitableEvent* async_waiter) { | |
804 DCHECK(encoding_task_runner_->BelongsToCurrentThread()); | |
805 video_encoder_.reset(); | |
806 async_waiter->Signal(); | |
807 } | 305 } |
808 | 306 |
809 // static | 307 // static |
810 void VpxEncoder::ShutdownEncoder(std::unique_ptr<base::Thread> encoding_thread, | |
811 ScopedVpxCodecCtxPtr encoder) { | |
812 DCHECK(encoding_thread->IsRunning()); | |
813 encoding_thread->Stop(); | |
814 // Both |encoding_thread| and |encoder| will be destroyed at end-of-scope. | |
815 } | |
816 | |
817 VpxEncoder::VpxEncoder( | |
818 bool use_vp9, | |
819 const VideoTrackRecorder::OnEncodedVideoCB& on_encoded_video_callback, | |
820 int32_t bits_per_second) | |
821 : Encoder(on_encoded_video_callback, bits_per_second), | |
822 use_vp9_(use_vp9) { | |
823 codec_config_.g_timebase.den = 0; // Not initialized. | |
824 alpha_codec_config_.g_timebase.den = 0; // Not initialized. | |
825 DCHECK(encoding_thread_->IsRunning()); | |
826 } | |
827 | |
828 VpxEncoder::~VpxEncoder() { | |
829 main_task_runner_->PostTask(FROM_HERE, | |
830 base::Bind(&VpxEncoder::ShutdownEncoder, | |
831 base::Passed(&encoding_thread_), | |
832 base::Passed(&encoder_))); | |
833 } | |
834 | |
835 void VpxEncoder::EncodeOnEncodingTaskRunner( | |
836 scoped_refptr<VideoFrame> frame, | |
837 base::TimeTicks capture_timestamp) { | |
838 TRACE_EVENT0("video", "VpxEncoder::EncodeOnEncodingTaskRunner"); | |
839 DCHECK(encoding_task_runner_->BelongsToCurrentThread()); | |
840 | |
841 const gfx::Size frame_size = frame->visible_rect().size(); | |
842 const base::TimeDelta duration = EstimateFrameDuration(frame); | |
843 const media::WebmMuxer::VideoParameters video_params(frame); | |
844 | |
845 if (!IsInitialized(codec_config_) || | |
846 gfx::Size(codec_config_.g_w, codec_config_.g_h) != frame_size) { | |
847 ConfigureEncoderOnEncodingTaskRunner(frame_size, &codec_config_, &encoder_); | |
848 } | |
849 | |
850 const bool frame_has_alpha = frame->format() == media::PIXEL_FORMAT_YV12A; | |
851 if (frame_has_alpha && (!IsInitialized(alpha_codec_config_) || | |
852 gfx::Size(alpha_codec_config_.g_w, | |
853 alpha_codec_config_.g_h) != frame_size)) { | |
854 ConfigureEncoderOnEncodingTaskRunner(frame_size, &alpha_codec_config_, | |
855 &alpha_encoder_); | |
856 u_plane_stride_ = media::VideoFrame::RowBytes( | |
857 VideoFrame::kUPlane, frame->format(), frame_size.width()); | |
858 v_plane_stride_ = media::VideoFrame::RowBytes( | |
859 VideoFrame::kVPlane, frame->format(), frame_size.width()); | |
860 v_plane_offset_ = media::VideoFrame::PlaneSize( | |
861 frame->format(), VideoFrame::kUPlane, frame_size) | |
862 .GetArea(); | |
863 alpha_dummy_planes_.resize( | |
864 v_plane_offset_ + media::VideoFrame::PlaneSize( | |
865 frame->format(), VideoFrame::kVPlane, frame_size) | |
866 .GetArea()); | |
867 // It is more expensive to encode 0x00, so use 0x80 instead. | |
868 std::fill(alpha_dummy_planes_.begin(), alpha_dummy_planes_.end(), 0x80); | |
869 } | |
870 // If we introduced a new alpha frame, force keyframe. | |
871 const bool force_keyframe = frame_has_alpha && !last_frame_had_alpha_; | |
872 last_frame_had_alpha_ = frame_has_alpha; | |
873 | |
874 std::unique_ptr<std::string> data(new std::string); | |
875 bool keyframe = false; | |
876 DoEncode(encoder_.get(), frame_size, frame->data(VideoFrame::kYPlane), | |
877 frame->visible_data(VideoFrame::kYPlane), | |
878 frame->stride(VideoFrame::kYPlane), | |
879 frame->visible_data(VideoFrame::kUPlane), | |
880 frame->stride(VideoFrame::kUPlane), | |
881 frame->visible_data(VideoFrame::kVPlane), | |
882 frame->stride(VideoFrame::kVPlane), duration, force_keyframe, | |
883 data.get(), &keyframe); | |
884 | |
885 std::unique_ptr<std::string> alpha_data(new std::string); | |
886 if (frame_has_alpha) { | |
887 bool alpha_keyframe = false; | |
888 DoEncode(alpha_encoder_.get(), frame_size, frame->data(VideoFrame::kAPlane), | |
889 frame->visible_data(VideoFrame::kAPlane), | |
890 frame->stride(VideoFrame::kAPlane), alpha_dummy_planes_.data(), | |
891 u_plane_stride_, alpha_dummy_planes_.data() + v_plane_offset_, | |
892 v_plane_stride_, duration, keyframe, alpha_data.get(), | |
893 &alpha_keyframe); | |
894 DCHECK_EQ(keyframe, alpha_keyframe); | |
895 } | |
896 frame = nullptr; | |
897 | |
898 origin_task_runner_->PostTask( | |
899 FROM_HERE, | |
900 base::Bind(OnFrameEncodeCompleted, on_encoded_video_callback_, | |
901 video_params, base::Passed(&data), base::Passed(&alpha_data), | |
902 capture_timestamp, keyframe)); | |
903 } | |
904 | |
905 void VpxEncoder::DoEncode(vpx_codec_ctx_t* const encoder, | |
906 const gfx::Size& frame_size, | |
907 uint8_t* const data, | |
908 uint8_t* const y_plane, | |
909 int y_stride, | |
910 uint8_t* const u_plane, | |
911 int u_stride, | |
912 uint8_t* const v_plane, | |
913 int v_stride, | |
914 const base::TimeDelta& duration, | |
915 bool force_keyframe, | |
916 std::string* const output_data, | |
917 bool* const keyframe) { | |
918 DCHECK(encoding_task_runner_->BelongsToCurrentThread()); | |
919 | |
920 vpx_image_t vpx_image; | |
921 vpx_image_t* const result = | |
922 vpx_img_wrap(&vpx_image, VPX_IMG_FMT_I420, frame_size.width(), | |
923 frame_size.height(), 1 /* align */, data); | |
924 DCHECK_EQ(result, &vpx_image); | |
925 vpx_image.planes[VPX_PLANE_Y] = y_plane; | |
926 vpx_image.planes[VPX_PLANE_U] = u_plane; | |
927 vpx_image.planes[VPX_PLANE_V] = v_plane; | |
928 vpx_image.stride[VPX_PLANE_Y] = y_stride; | |
929 vpx_image.stride[VPX_PLANE_U] = u_stride; | |
930 vpx_image.stride[VPX_PLANE_V] = v_stride; | |
931 | |
932 const vpx_codec_flags_t flags = force_keyframe ? VPX_EFLAG_FORCE_KF : 0; | |
933 // Encode the frame. The presentation time stamp argument here is fixed to | |
934 // zero to force the encoder to base its single-frame bandwidth calculations | |
935 // entirely on |predicted_frame_duration|. | |
936 const vpx_codec_err_t ret = | |
937 vpx_codec_encode(encoder, &vpx_image, 0 /* pts */, | |
938 duration.InMicroseconds(), flags, VPX_DL_REALTIME); | |
939 DCHECK_EQ(ret, VPX_CODEC_OK) | |
940 << vpx_codec_err_to_string(ret) << ", #" << vpx_codec_error(encoder) | |
941 << " -" << vpx_codec_error_detail(encoder); | |
942 | |
943 *keyframe = false; | |
944 vpx_codec_iter_t iter = NULL; | |
945 const vpx_codec_cx_pkt_t* pkt = NULL; | |
946 while ((pkt = vpx_codec_get_cx_data(encoder, &iter)) != NULL) { | |
947 if (pkt->kind != VPX_CODEC_CX_FRAME_PKT) | |
948 continue; | |
949 output_data->assign(static_cast<char*>(pkt->data.frame.buf), | |
950 pkt->data.frame.sz); | |
951 *keyframe = (pkt->data.frame.flags & VPX_FRAME_IS_KEY) != 0; | |
952 break; | |
953 } | |
954 } | |
955 | |
956 void VpxEncoder::ConfigureEncoderOnEncodingTaskRunner( | |
957 const gfx::Size& size, | |
958 vpx_codec_enc_cfg_t* codec_config, | |
959 ScopedVpxCodecCtxPtr* encoder) { | |
960 DCHECK(encoding_task_runner_->BelongsToCurrentThread()); | |
961 if (IsInitialized(*codec_config)) { | |
962 // TODO(mcasas) VP8 quirk/optimisation: If the new |size| is strictly less- | |
963 // than-or-equal than the old size, in terms of area, the existing encoder | |
964 // instance could be reused after changing |codec_config->{g_w,g_h}|. | |
965 DVLOG(1) << "Destroying/Re-Creating encoder for new frame size: " | |
966 << gfx::Size(codec_config->g_w, codec_config->g_h).ToString() | |
967 << " --> " << size.ToString() << (use_vp9_ ? " vp9" : " vp8"); | |
968 encoder->reset(); | |
969 } | |
970 | |
971 const vpx_codec_iface_t* codec_interface = | |
972 use_vp9_ ? vpx_codec_vp9_cx() : vpx_codec_vp8_cx(); | |
973 vpx_codec_err_t result = vpx_codec_enc_config_default( | |
974 codec_interface, codec_config, 0 /* reserved */); | |
975 DCHECK_EQ(VPX_CODEC_OK, result); | |
976 | |
977 DCHECK_EQ(320u, codec_config->g_w); | |
978 DCHECK_EQ(240u, codec_config->g_h); | |
979 DCHECK_EQ(256u, codec_config->rc_target_bitrate); | |
980 // Use the selected bitrate or adjust default bit rate to account for the | |
981 // actual size. Note: |rc_target_bitrate| units are kbit per second. | |
982 if (bits_per_second_ > 0) { | |
983 codec_config->rc_target_bitrate = bits_per_second_ / 1000; | |
984 } else { | |
985 codec_config->rc_target_bitrate = size.GetArea() * | |
986 codec_config->rc_target_bitrate / | |
987 codec_config->g_w / codec_config->g_h; | |
988 } | |
989 // Both VP8/VP9 configuration should be Variable BitRate by default. | |
990 DCHECK_EQ(VPX_VBR, codec_config->rc_end_usage); | |
991 if (use_vp9_) { | |
992 // Number of frames to consume before producing output. | |
993 codec_config->g_lag_in_frames = 0; | |
994 | |
995 // DCHECK that the profile selected by default is I420 (magic number 0). | |
996 DCHECK_EQ(0u, codec_config->g_profile); | |
997 } else { | |
998 // VP8 always produces frames instantaneously. | |
999 DCHECK_EQ(0u, codec_config->g_lag_in_frames); | |
1000 } | |
1001 | |
1002 DCHECK(size.width()); | |
1003 DCHECK(size.height()); | |
1004 codec_config->g_w = size.width(); | |
1005 codec_config->g_h = size.height(); | |
1006 codec_config->g_pass = VPX_RC_ONE_PASS; | |
1007 | |
1008 // Timebase is the smallest interval used by the stream, can be set to the | |
1009 // frame rate or to e.g. microseconds. | |
1010 codec_config->g_timebase.num = 1; | |
1011 codec_config->g_timebase.den = base::Time::kMicrosecondsPerSecond; | |
1012 | |
1013 // Let the encoder decide where to place the Keyframes, between min and max. | |
1014 // In VPX_KF_AUTO mode libvpx will sometimes emit keyframes regardless of min/ | |
1015 // max distance out of necessity. | |
1016 // Note that due to http://crbug.com/440223, it might be necessary to force a | |
1017 // key frame after 10,000frames since decoding fails after 30,000 non-key | |
1018 // frames. | |
1019 // Forcing a keyframe in regular intervals also allows seeking in the | |
1020 // resulting recording with decent performance. | |
1021 codec_config->kf_mode = VPX_KF_AUTO; | |
1022 codec_config->kf_min_dist = 0; | |
1023 codec_config->kf_max_dist = 100; | |
1024 | |
1025 codec_config->g_threads = GetNumberOfThreadsForEncoding(); | |
1026 | |
1027 // Number of frames to consume before producing output. | |
1028 codec_config->g_lag_in_frames = 0; | |
1029 | |
1030 encoder->reset(new vpx_codec_ctx_t); | |
1031 const vpx_codec_err_t ret = vpx_codec_enc_init( | |
1032 encoder->get(), codec_interface, codec_config, 0 /* flags */); | |
1033 DCHECK_EQ(VPX_CODEC_OK, ret); | |
1034 | |
1035 if (use_vp9_) { | |
1036 // Values of VP8E_SET_CPUUSED greater than 0 will increase encoder speed at | |
1037 // the expense of quality up to a maximum value of 8 for VP9, by tuning the | |
1038 // target time spent encoding the frame. Go from 8 to 5 (values for real | |
1039 // time encoding) depending on the amount of cores available in the system. | |
1040 const int kCpuUsed = | |
1041 std::max(5, 8 - base::SysInfo::NumberOfProcessors() / 2); | |
1042 result = vpx_codec_control(encoder->get(), VP8E_SET_CPUUSED, kCpuUsed); | |
1043 DLOG_IF(WARNING, VPX_CODEC_OK != result) << "VP8E_SET_CPUUSED failed"; | |
1044 } | |
1045 } | |
1046 | |
1047 bool VpxEncoder::IsInitialized(const vpx_codec_enc_cfg_t& codec_config) const { | |
1048 DCHECK(encoding_task_runner_->BelongsToCurrentThread()); | |
1049 return codec_config.g_timebase.den != 0; | |
1050 } | |
1051 | |
1052 base::TimeDelta VpxEncoder::EstimateFrameDuration( | |
1053 const scoped_refptr<VideoFrame>& frame) { | |
1054 DCHECK(encoding_task_runner_->BelongsToCurrentThread()); | |
1055 | |
1056 using base::TimeDelta; | |
1057 TimeDelta predicted_frame_duration; | |
1058 if (!frame->metadata()->GetTimeDelta(VideoFrameMetadata::FRAME_DURATION, | |
1059 &predicted_frame_duration) || | |
1060 predicted_frame_duration <= TimeDelta()) { | |
1061 // The source of the video frame did not provide the frame duration. Use | |
1062 // the actual amount of time between the current and previous frame as a | |
1063 // prediction for the next frame's duration. | |
1064 // TODO(mcasas): This duration estimation could lead to artifacts if the | |
1065 // cadence of the received stream is compromised (e.g. camera freeze, pause, | |
1066 // remote packet loss). Investigate using GetFrameRate() in this case. | |
1067 predicted_frame_duration = frame->timestamp() - last_frame_timestamp_; | |
1068 } | |
1069 last_frame_timestamp_ = frame->timestamp(); | |
1070 // Make sure |predicted_frame_duration| is in a safe range of values. | |
1071 const TimeDelta kMaxFrameDuration = TimeDelta::FromSecondsD(1.0 / 8); | |
1072 const TimeDelta kMinFrameDuration = TimeDelta::FromMilliseconds(1); | |
1073 return std::min(kMaxFrameDuration, std::max(predicted_frame_duration, | |
1074 kMinFrameDuration)); | |
1075 } | |
1076 | |
1077 #if BUILDFLAG(RTC_USE_H264) | |
1078 | |
1079 // static | |
1080 void H264Encoder::ShutdownEncoder(std::unique_ptr<base::Thread> encoding_thread, | |
1081 ScopedISVCEncoderPtr encoder) { | |
1082 DCHECK(encoding_thread->IsRunning()); | |
1083 encoding_thread->Stop(); | |
1084 // Both |encoding_thread| and |encoder| will be destroyed at end-of-scope. | |
1085 } | |
1086 | |
1087 H264Encoder::H264Encoder( | |
1088 const VideoTrackRecorder::OnEncodedVideoCB& on_encoded_video_callback, | |
1089 int32_t bits_per_second) | |
1090 : Encoder(on_encoded_video_callback, bits_per_second) { | |
1091 DCHECK(encoding_thread_->IsRunning()); | |
1092 } | |
1093 | |
1094 H264Encoder::~H264Encoder() { | |
1095 main_task_runner_->PostTask(FROM_HERE, | |
1096 base::Bind(&H264Encoder::ShutdownEncoder, | |
1097 base::Passed(&encoding_thread_), | |
1098 base::Passed(&openh264_encoder_))); | |
1099 } | |
1100 | |
1101 void H264Encoder::EncodeOnEncodingTaskRunner( | |
1102 scoped_refptr<VideoFrame> frame, | |
1103 base::TimeTicks capture_timestamp) { | |
1104 TRACE_EVENT0("video", "H264Encoder::EncodeOnEncodingTaskRunner"); | |
1105 DCHECK(encoding_task_runner_->BelongsToCurrentThread()); | |
1106 | |
1107 const gfx::Size frame_size = frame->visible_rect().size(); | |
1108 if (!openh264_encoder_ || configured_size_ != frame_size) { | |
1109 ConfigureEncoderOnEncodingTaskRunner(frame_size); | |
1110 first_frame_timestamp_ = capture_timestamp; | |
1111 } | |
1112 | |
1113 SSourcePicture picture = {}; | |
1114 picture.iPicWidth = frame_size.width(); | |
1115 picture.iPicHeight = frame_size.height(); | |
1116 picture.iColorFormat = EVideoFormatType::videoFormatI420; | |
1117 picture.uiTimeStamp = | |
1118 (capture_timestamp - first_frame_timestamp_).InMilliseconds(); | |
1119 picture.iStride[0] = frame->stride(VideoFrame::kYPlane); | |
1120 picture.iStride[1] = frame->stride(VideoFrame::kUPlane); | |
1121 picture.iStride[2] = frame->stride(VideoFrame::kVPlane); | |
1122 picture.pData[0] = frame->visible_data(VideoFrame::kYPlane); | |
1123 picture.pData[1] = frame->visible_data(VideoFrame::kUPlane); | |
1124 picture.pData[2] = frame->visible_data(VideoFrame::kVPlane); | |
1125 | |
1126 SFrameBSInfo info = {}; | |
1127 if (openh264_encoder_->EncodeFrame(&picture, &info) != cmResultSuccess) { | |
1128 NOTREACHED() << "OpenH264 encoding failed"; | |
1129 return; | |
1130 } | |
1131 const media::WebmMuxer::VideoParameters video_params(frame); | |
1132 frame = nullptr; | |
1133 | |
1134 std::unique_ptr<std::string> data(new std::string); | |
1135 const uint8_t kNALStartCode[4] = {0, 0, 0, 1}; | |
1136 for (int layer = 0; layer < info.iLayerNum; ++layer) { | |
1137 const SLayerBSInfo& layerInfo = info.sLayerInfo[layer]; | |
1138 // Iterate NAL units making up this layer, noting fragments. | |
1139 size_t layer_len = 0; | |
1140 for (int nal = 0; nal < layerInfo.iNalCount; ++nal) { | |
1141 // The following DCHECKs make sure that the header of each NAL unit is OK. | |
1142 DCHECK_GE(layerInfo.pNalLengthInByte[nal], 4); | |
1143 DCHECK_EQ(kNALStartCode[0], layerInfo.pBsBuf[layer_len+0]); | |
1144 DCHECK_EQ(kNALStartCode[1], layerInfo.pBsBuf[layer_len+1]); | |
1145 DCHECK_EQ(kNALStartCode[2], layerInfo.pBsBuf[layer_len+2]); | |
1146 DCHECK_EQ(kNALStartCode[3], layerInfo.pBsBuf[layer_len+3]); | |
1147 | |
1148 layer_len += layerInfo.pNalLengthInByte[nal]; | |
1149 } | |
1150 // Copy the entire layer's data (including NAL start codes). | |
1151 data->append(reinterpret_cast<char*>(layerInfo.pBsBuf), layer_len); | |
1152 } | |
1153 | |
1154 const bool is_key_frame = info.eFrameType == videoFrameTypeIDR; | |
1155 origin_task_runner_->PostTask( | |
1156 FROM_HERE, base::Bind(OnFrameEncodeCompleted, on_encoded_video_callback_, | |
1157 video_params, base::Passed(&data), nullptr, | |
1158 capture_timestamp, is_key_frame)); | |
1159 } | |
1160 | |
1161 void H264Encoder::ConfigureEncoderOnEncodingTaskRunner(const gfx::Size& size) { | |
1162 DCHECK(encoding_task_runner_->BelongsToCurrentThread()); | |
1163 ISVCEncoder* temp_encoder = nullptr; | |
1164 if (WelsCreateSVCEncoder(&temp_encoder) != 0) { | |
1165 NOTREACHED() << "Failed to create OpenH264 encoder"; | |
1166 return; | |
1167 } | |
1168 openh264_encoder_.reset(temp_encoder); | |
1169 configured_size_ = size; | |
1170 | |
1171 #if DCHECK_IS_ON() | |
1172 int trace_level = WELS_LOG_INFO; | |
1173 openh264_encoder_->SetOption(ENCODER_OPTION_TRACE_LEVEL, &trace_level); | |
1174 #endif | |
1175 | |
1176 SEncParamExt init_params; | |
1177 openh264_encoder_->GetDefaultParams(&init_params); | |
1178 init_params.iUsageType = CAMERA_VIDEO_REAL_TIME; | |
1179 | |
1180 DCHECK_EQ(AUTO_REF_PIC_COUNT, init_params.iNumRefFrame); | |
1181 DCHECK(!init_params.bSimulcastAVC); | |
1182 | |
1183 init_params.uiIntraPeriod = 100; // Same as for VpxEncoder. | |
1184 init_params.iPicWidth = size.width(); | |
1185 init_params.iPicHeight = size.height(); | |
1186 | |
1187 DCHECK_EQ(RC_QUALITY_MODE, init_params.iRCMode); | |
1188 DCHECK_EQ(0, init_params.iPaddingFlag); | |
1189 DCHECK_EQ(UNSPECIFIED_BIT_RATE, init_params.iTargetBitrate); | |
1190 DCHECK_EQ(UNSPECIFIED_BIT_RATE, init_params.iMaxBitrate); | |
1191 if (bits_per_second_ > 0) { | |
1192 init_params.iRCMode = RC_BITRATE_MODE; | |
1193 init_params.iTargetBitrate = bits_per_second_; | |
1194 } else { | |
1195 init_params.iRCMode = RC_OFF_MODE; | |
1196 } | |
1197 | |
1198 // Threading model: Set to 1 due to https://crbug.com/583348. | |
1199 init_params.iMultipleThreadIdc = 1; | |
1200 | |
1201 // TODO(mcasas): consider reducing complexity if there are few CPUs available. | |
1202 init_params.iComplexityMode = MEDIUM_COMPLEXITY; | |
1203 DCHECK(!init_params.bEnableDenoise); | |
1204 DCHECK(init_params.bEnableFrameSkip); | |
1205 | |
1206 // The base spatial layer 0 is the only one we use. | |
1207 DCHECK_EQ(1, init_params.iSpatialLayerNum); | |
1208 init_params.sSpatialLayers[0].iVideoWidth = init_params.iPicWidth; | |
1209 init_params.sSpatialLayers[0].iVideoHeight = init_params.iPicHeight; | |
1210 init_params.sSpatialLayers[0].iSpatialBitrate = init_params.iTargetBitrate; | |
1211 | |
1212 // When uiSliceMode = SM_FIXEDSLCNUM_SLICE, uiSliceNum = 0 means auto design | |
1213 // it with cpu core number. | |
1214 // TODO(sprang): Set to 0 when we understand why the rate controller borks | |
1215 // when uiSliceNum > 1. See https://github.com/cisco/openh264/issues/2591 | |
1216 init_params.sSpatialLayers[0].sSliceArgument.uiSliceNum = 1; | |
1217 init_params.sSpatialLayers[0].sSliceArgument.uiSliceMode = | |
1218 SM_FIXEDSLCNUM_SLICE; | |
1219 | |
1220 if (openh264_encoder_->InitializeExt(&init_params) != cmResultSuccess) { | |
1221 NOTREACHED() << "Failed to initialize OpenH264 encoder"; | |
1222 return; | |
1223 } | |
1224 | |
1225 int pixel_format = EVideoFormatType::videoFormatI420; | |
1226 openh264_encoder_->SetOption(ENCODER_OPTION_DATAFORMAT, &pixel_format); | |
1227 } | |
1228 #endif //#if BUILDFLAG(RTC_USE_H264) | |
1229 | |
1230 } // anonymous namespace | |
1231 | |
1232 // static | |
1233 VideoTrackRecorder::CodecId VideoTrackRecorder::GetPreferredCodecId() { | 308 VideoTrackRecorder::CodecId VideoTrackRecorder::GetPreferredCodecId() { |
1234 return GetCodecEnumerator()->GetPreferredCodecId(); | 309 return GetCodecEnumerator()->GetPreferredCodecId(); |
1235 } | 310 } |
1236 | 311 |
1237 VideoTrackRecorder::VideoTrackRecorder( | 312 VideoTrackRecorder::VideoTrackRecorder( |
1238 CodecId codec, | 313 CodecId codec, |
1239 const blink::WebMediaStreamTrack& track, | 314 const blink::WebMediaStreamTrack& track, |
1240 const OnEncodedVideoCB& on_encoded_video_callback, | 315 const OnEncodedVideoCB& on_encoded_video_callback, |
1241 int32_t bits_per_second) | 316 int32_t bits_per_second) |
1242 : track_(track), | 317 : track_(track), |
(...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1359 // thread. | 434 // thread. |
1360 MediaStreamVideoSink::DisconnectFromTrack(); | 435 MediaStreamVideoSink::DisconnectFromTrack(); |
1361 encoder_ = nullptr; | 436 encoder_ = nullptr; |
1362 MediaStreamVideoSink::ConnectToTrack( | 437 MediaStreamVideoSink::ConnectToTrack( |
1363 track_, | 438 track_, |
1364 media::BindToCurrentLoop(base::Bind(initialize_encoder_callback_, | 439 media::BindToCurrentLoop(base::Bind(initialize_encoder_callback_, |
1365 false /*allow_vea_encoder*/)), | 440 false /*allow_vea_encoder*/)), |
1366 false); | 441 false); |
1367 } | 442 } |
1368 | 443 |
1369 bool VideoTrackRecorder::CanEncodeAlphaChannelForTesting() { | |
1370 DCHECK(encoder_); | |
1371 return encoder_->CanEncodeAlphaChannel(); | |
1372 } | |
1373 | |
1374 } // namespace content | 444 } // namespace content |
OLD | NEW |