Chromium Code Reviews| Index: content/renderer/media/video_track_recorder.cc |
| diff --git a/content/renderer/media/video_track_recorder.cc b/content/renderer/media/video_track_recorder.cc |
| index 8da8f3084922aa6e5cd6d32e9b3a918183e05ddf..7624737a7f4bd3abf39d281cf314895c89617226 100644 |
| --- a/content/renderer/media/video_track_recorder.cc |
| +++ b/content/renderer/media/video_track_recorder.cc |
| @@ -10,11 +10,16 @@ |
| #include "base/logging.h" |
| #include "base/macros.h" |
| #include "base/sys_info.h" |
| +#include "base/task_runner_util.h" |
| #include "base/threading/thread.h" |
| #include "base/time/time.h" |
| #include "base/trace_event/trace_event.h" |
| +#include "content/renderer/media/renderer_gpu_video_accelerator_factories.h" |
| +#include "content/renderer/render_thread_impl.h" |
| +#include "media/base/bind_to_current_loop.h" |
| #include "media/base/video_frame.h" |
| #include "media/base/video_util.h" |
| +#include "third_party/libyuv/include/libyuv/convert.h" |
| #include "ui/gfx/geometry/size.h" |
| #if BUILDFLAG(RTC_USE_H264) |
| @@ -34,6 +39,47 @@ extern "C" { |
| using media::VideoFrame; |
| using media::VideoFrameMetadata; |
| +namespace { |
|
mcasas
2016/05/18 22:22:58
IIRC, you can move this anonymous namespace
inside
emircan
2016/05/21 00:01:17
Done.
|
| + |
| +const int kVEAEncoderMinResolutionWidth = 640; |
| +const int kVEAEncoderMinResolutionHeight = 480; |
| +const int kVEAEncoderOutputBufferCount = 4; |
| + |
| +// Returns the preferred codec profile from VEA supported profiles. If no |
| +// profile is found, returns VIDEO_CODEC_PROFILE_UNKNOWN. |
| +media::VideoCodecProfile GetVEASupportedProfile( |
|
mcasas
2016/05/18 22:22:58
nit: s/GetVEASupportedProfile/CodecIdToVEAProfile/
emircan
2016/05/21 00:01:17
Done.
|
| + content::VideoTrackRecorder::CodecId codec) { |
| + content::RenderThreadImpl* render_thread_impl = |
| + content::RenderThreadImpl::current(); |
| + if (!render_thread_impl) |
| + return media::VIDEO_CODEC_PROFILE_UNKNOWN; |
| + |
| + media::GpuVideoAcceleratorFactories* gpu_factories = |
| + content::RenderThreadImpl::current()->GetGpuFactories(); |
| + if (!gpu_factories || !gpu_factories->IsGpuVideoAcceleratorEnabled()) { |
| + DVLOG(3) << "Couldn't initialize GpuVideoAcceleratorFactories"; |
| + return media::VIDEO_CODEC_PROFILE_UNKNOWN; |
| + } |
| + |
| + const media::VideoEncodeAccelerator::SupportedProfiles& profiles = |
| + gpu_factories->GetVideoEncodeAcceleratorSupportedProfiles(); |
| + for (const auto& profile : profiles) { |
| + if ((codec == content::VideoTrackRecorder::CodecId::VP8 && |
| + profile.profile >= media::VP8PROFILE_MIN && |
| + profile.profile <= media::VP8PROFILE_MAX) || |
| + (codec == content::VideoTrackRecorder::CodecId::VP9 && |
| + profile.profile >= media::VP9PROFILE_MIN && |
| + profile.profile <= media::VP9PROFILE_MAX) || |
| + (codec == content::VideoTrackRecorder::CodecId::H264 && |
| + profile.profile >= media::H264PROFILE_MIN && |
| + profile.profile <= media::H264PROFILE_MAX)) |
| + return profile.profile; |
| + } |
| + return media::VIDEO_CODEC_PROFILE_UNKNOWN; |
|
mcasas
2016/05/18 22:22:58
Just some side nit, if you could refactor this blo
emircan
2016/05/21 00:01:17
Done.
|
| +} |
| + |
| +} // anonymous namespace |
| + |
| namespace content { |
| // Base class to describe a generic Encoder, encapsulating all actual encoder |
| @@ -62,7 +108,7 @@ class VideoTrackRecorder::Encoder : public base::RefCountedThreadSafe<Encoder> { |
| on_encoded_video_callback_(on_encoded_video_callback), |
| bits_per_second_(bits_per_second) { |
| DCHECK(!on_encoded_video_callback_.is_null()); |
| - if (encoding_thread_) |
| + if (encoding_task_runner_) |
| return; |
| encoding_thread_.reset(new base::Thread("EncodingThread")); |
| encoding_thread_->Start(); |
| @@ -179,6 +225,69 @@ static int GetNumberOfThreadsForEncoding() { |
| return std::min(8, (base::SysInfo::NumberOfProcessors() + 1) / 2); |
| } |
| +// Class encapsulating VideoEncodeAccelerator interactions. |
| +class VEAEncoder final : public VideoTrackRecorder::Encoder, |
|
mcasas
2016/05/18 22:22:58
Could you comment on the threading here plz?
I se
emircan
2016/05/21 00:01:17
Done.
|
| + public media::VideoEncodeAccelerator::Client { |
| + public: |
| + VEAEncoder( |
| + const VideoTrackRecorder::OnEncodedVideoCB& on_encoded_video_callback, |
| + int32_t bits_per_second, |
| + media::VideoCodecProfile codec); |
| + |
| + // media::VideoEncodeAccelerator::Client implementation. |
| + void RequireBitstreamBuffers(unsigned int input_count, |
| + const gfx::Size& input_coded_size, |
| + size_t output_buffer_size) override; |
| + void BitstreamBufferReady(int32_t bitstream_buffer_id, |
| + size_t payload_size, |
| + bool key_frame) override; |
| + void NotifyError(media::VideoEncodeAccelerator::Error error) override; |
| + |
| + private: |
| + void UseOutputBitstreamBufferId(int32_t bitstream_buffer_id); |
| + void FrameFinished(std::unique_ptr<base::SharedMemory> shm); |
| + |
| + // VideoTrackRecorder::Encoder |
|
mcasas
2016/05/18 22:22:58
... implementation
and that should only apply to t
emircan
2016/05/21 00:01:17
See l.303 and 355. I think it should apply to dtor
|
| + ~VEAEncoder() override; |
| + void EncodeOnEncodingTaskRunner(const scoped_refptr<VideoFrame>& frame, |
| + base::TimeTicks capture_timestamp) override; |
| + void ConfigureEncoderOnEncodingTaskRunner(const gfx::Size& size) override; |
| + |
| + media::GpuVideoAcceleratorFactories* const gpu_factories_; |
| + |
| + media::VideoCodecProfile codec_; |
|
mcasas
2016/05/18 22:22:58
const?
emircan
2016/05/21 00:01:17
Done.
|
| + |
| + // The underlying VEA to perform encoding on. |
| + std::unique_ptr<media::VideoEncodeAccelerator> video_encoder_; |
| + |
| + // Shared memory buffers for output with the VEA. |
| + ScopedVector<base::SharedMemory> output_buffers_; |
|
mcasas
2016/05/18 22:22:58
ScopedVector is deprecated [1], instead, use
std::
emircan
2016/05/21 00:01:17
Done.
|
| + |
| + // Shared memory buffers for output with the VEA as FIFO. |
| + std::queue<std::unique_ptr<base::SharedMemory>> input_buffers_; |
| + |
| + // The number of output buffers ready to be filled. |
| + int output_buffers_free_count_; |
| + |
| + // Tracks error status. |
| + bool error_notified_; |
| + |
| + // Tracks first encode frame and its usage. |
| + std::unique_ptr<std::pair<scoped_refptr<VideoFrame>, base::TimeTicks>> |
|
mcasas
2016/05/18 22:22:58
I suggest
using VideoFrameAndTimestamp
= st
emircan
2016/05/21 00:01:18
Done.
|
| + first_frame_; |
| + bool first_frame_encoded_; |
| + |
| + // Size used to initialize encoder. |
| + gfx::Size input_size_; |
| + |
| + // Coded size that encoder requests as input. |
| + gfx::Size vea_requested_input_size_; |
| + |
| + // Frames and corresponding timestamps in encode as FIFO. |
| + std::queue<std::pair<scoped_refptr<VideoFrame>, base::TimeTicks>> |
| + frames_in_encode_; |
| +}; |
| + |
| // Class encapsulating all libvpx interactions for VP8/VP9 encoding. |
| class VpxEncoder final : public VideoTrackRecorder::Encoder { |
| public: |
| @@ -264,6 +373,210 @@ class H264Encoder final : public VideoTrackRecorder::Encoder { |
| #endif // #if BUILDFLAG(RTC_USE_H264) |
| +VEAEncoder::VEAEncoder( |
| + const VideoTrackRecorder::OnEncodedVideoCB& on_encoded_video_callback, |
| + int32_t bits_per_second, |
| + media::VideoCodecProfile codec) |
| + : Encoder(on_encoded_video_callback, |
| + bits_per_second, |
| + RenderThreadImpl::current()->GetGpuFactories()->GetTaskRunner()), |
| + gpu_factories_(RenderThreadImpl::current()->GetGpuFactories()), |
| + codec_(codec), |
| + output_buffers_free_count_(0), |
| + error_notified_(false), |
| + first_frame_encoded_(false) { |
| + DCHECK(gpu_factories_); |
| +} |
| + |
| +VEAEncoder::~VEAEncoder() { |
| + encoding_task_runner_->PostTask( |
| + FROM_HERE, base::Bind(&media::VideoEncodeAccelerator::Destroy, |
| + base::Unretained(video_encoder_.release()))); |
| +} |
| + |
| +void VEAEncoder::RequireBitstreamBuffers(unsigned int input_count, |
|
mcasas
2016/05/18 22:22:58
Shouldn't we at least
DCHECK_GE(kVEAEncoderOutputB
emircan
2016/05/21 00:01:17
Not really as those two aren't related. I am comme
|
| + const gfx::Size& input_coded_size, |
| + size_t output_buffer_size) { |
| + DVLOG(3) << __FUNCTION__; |
| + DCHECK(encoding_task_runner_->BelongsToCurrentThread()); |
| + |
| + vea_requested_input_size_ = input_coded_size; |
| + output_buffers_.clear(); |
| + output_buffers_free_count_ = 0; |
| + std::queue<std::unique_ptr<base::SharedMemory>>().swap(input_buffers_); |
| + |
| + for (int i = 0; i < kVEAEncoderOutputBufferCount; ++i) { |
| + DCHECK(gpu_factories_); |
|
mcasas
2016/05/18 22:22:58
Needed? It's already checked in ctor l.388?
emircan
2016/05/21 00:01:18
Done.
|
| + std::unique_ptr<base::SharedMemory> shm = |
| + gpu_factories_->CreateSharedMemory(output_buffer_size); |
| + if (!shm) { |
| + NOTREACHED(); |
| + return; |
| + } |
| + output_buffers_.push_back(shm.release()); |
| + } |
| + |
| + for (size_t i = 0; i < output_buffers_.size(); ++i) { |
| + video_encoder_->UseOutputBitstreamBuffer(media::BitstreamBuffer( |
| + i, output_buffers_[i]->handle(), output_buffers_[i]->mapped_size())); |
| + output_buffers_free_count_++; |
| + } |
| +} |
| + |
| +void VEAEncoder::BitstreamBufferReady(int32_t bitstream_buffer_id, |
| + size_t payload_size, |
| + bool keyframe) { |
| + DVLOG(3) << __FUNCTION__; |
| + DCHECK(encoding_task_runner_->BelongsToCurrentThread()); |
| + |
| + output_buffers_free_count_--; |
| + base::SharedMemory* output_buffer = output_buffers_[bitstream_buffer_id]; |
| + |
| + std::unique_ptr<std::string> data(new std::string); |
| + data->append(reinterpret_cast<char*>(output_buffer->memory()), payload_size); |
| + |
| + const auto front_frame = frames_in_encode_.front(); |
| + frames_in_encode_.pop(); |
| + origin_task_runner_->PostTask( |
| + FROM_HERE, base::Bind(OnFrameEncodeCompleted, on_encoded_video_callback_, |
| + front_frame.first, base::Passed(&data), |
| + front_frame.second, keyframe)); |
| + UseOutputBitstreamBufferId(bitstream_buffer_id); |
| +} |
| + |
| +void VEAEncoder::NotifyError(media::VideoEncodeAccelerator::Error error) { |
| + DVLOG(3) << __FUNCTION__; |
| + DCHECK(encoding_task_runner_->BelongsToCurrentThread()); |
| + |
| + error_notified_ = true; |
|
mcasas
2016/05/18 22:22:58
It's unfortunate we don't have a callback
of sorts
emircan
2016/05/21 00:01:18
Done.
|
| +} |
| + |
| +void VEAEncoder::UseOutputBitstreamBufferId(int32_t bitstream_buffer_id) { |
| + DVLOG(3) << __FUNCTION__; |
| + DCHECK(encoding_task_runner_->BelongsToCurrentThread()); |
| + |
| + video_encoder_->UseOutputBitstreamBuffer(media::BitstreamBuffer( |
| + bitstream_buffer_id, output_buffers_[bitstream_buffer_id]->handle(), |
| + output_buffers_[bitstream_buffer_id]->mapped_size())); |
| + output_buffers_free_count_++; |
| +} |
| + |
| +void VEAEncoder::FrameFinished(std::unique_ptr<base::SharedMemory> shm) { |
| + DVLOG(3) << __FUNCTION__; |
| + DCHECK(encoding_task_runner_->BelongsToCurrentThread()); |
| + input_buffers_.push(std::move(shm)); |
| +} |
| + |
| +void VEAEncoder::EncodeOnEncodingTaskRunner( |
| + const scoped_refptr<VideoFrame>& frame, |
| + base::TimeTicks capture_timestamp) { |
| + DVLOG(3) << __FUNCTION__; |
| + DCHECK(encoding_task_runner_->BelongsToCurrentThread()); |
| + |
| + if (input_size_ != frame->visible_rect().size() && video_encoder_) { |
| + video_encoder_->Destroy(); |
| + video_encoder_.reset(); |
| + } |
| + |
| + if (!video_encoder_) { |
| + ConfigureEncoderOnEncodingTaskRunner(frame->visible_rect().size()); |
| + first_frame_.reset( |
|
mcasas
2016/05/18 22:22:58
So, if we hypothetically were to receive
a few |fr
emircan
2016/05/21 00:01:18
Done.
|
| + new std::pair<scoped_refptr<VideoFrame>, base::TimeTicks>( |
| + frame, capture_timestamp)); |
| + } |
| + |
| + if (error_notified_) { |
| + DVLOG(3) << "An error occurred in VEA encoder"; |
|
mcasas
2016/05/18 22:22:58
Would it be a good idea to destroy the
|video_enco
emircan
2016/05/21 00:01:18
We had that logic in place for decoder. I will lea
|
| + return; |
| + } |
| + |
| + // Drop frames if there is no output buffers available. |
| + if (output_buffers_free_count_ == 0) { |
| + DVLOG(3) << "Dropped frame."; |
| + return; |
| + } |
| + |
| + // If first frame hasn't been encoded, do it first. |
| + if (!first_frame_encoded_ && first_frame_) { |
| + std::unique_ptr<std::pair<scoped_refptr<VideoFrame>, base::TimeTicks>> |
| + first_frame(first_frame_.release()); |
| + EncodeOnEncodingTaskRunner(first_frame->first, first_frame->second); |
|
mcasas
2016/05/18 22:22:58
I don't understand, we recursively call
ourselves
emircan
2016/05/21 00:01:17
Second time around it wouldn't fall here since |fi
|
| + first_frame_encoded_ = true; |
|
mcasas
2016/05/18 22:22:58
Couldn't we drop |first_frame_encoded_| and
instea
emircan
2016/05/21 00:01:17
Done.
|
| + } |
| + |
| + // Lower resolutions may fall back to SW encoder in some platforms, i.e. Mac. |
| + // In that case, the encoder expects more frames before returning result. |
| + // Therefore, a copy is necessary to release the current frame. |
| + scoped_refptr<media::VideoFrame> video_frame = frame; |
| + if (vea_requested_input_size_ != input_size_ || |
| + input_size_.width() < kVEAEncoderMinResolutionWidth || |
| + input_size_.height() < kVEAEncoderMinResolutionHeight) { |
| + // Create SharedMemory backed input buffers as necessary. These SharedMemory |
| + // instances will be shared with GPU process. |
| + if (input_buffers_.empty()) { |
| + std::unique_ptr<base::SharedMemory> shm = |
| + gpu_factories_->CreateSharedMemory(media::VideoFrame::AllocationSize( |
| + media::PIXEL_FORMAT_I420, vea_requested_input_size_)); |
| + input_buffers_.push(std::move(shm)); |
| + } |
| + std::unique_ptr<base::SharedMemory> input_buffer = |
| + std::move(input_buffers_.front()); |
| + input_buffers_.pop(); |
|
mcasas
2016/05/18 22:22:58
Suggestion of alternative for l.516-524 ?
st
emircan
2016/05/21 00:01:17
Done.
|
| + while (input_buffer->mapped_size() < |
| + media::VideoFrame::AllocationSize(media::PIXEL_FORMAT_I420, |
| + vea_requested_input_size_)) { |
| + input_buffer.reset(input_buffers_.front().release()); |
| + input_buffers_.pop(); |
| + } |
| + video_frame = media::VideoFrame::WrapExternalSharedMemory( |
| + media::PIXEL_FORMAT_I420, vea_requested_input_size_, |
| + gfx::Rect(input_size_), input_size_, |
| + reinterpret_cast<uint8_t*>(input_buffer->memory()), |
| + input_buffer->mapped_size(), input_buffer->handle(), 0, |
| + frame->timestamp()); |
| + video_frame->AddDestructionObserver(media::BindToCurrentLoop( |
| + base::Bind(&VEAEncoder::FrameFinished, this, |
| + base::Passed(std::move(input_buffer))))); |
| + libyuv::I420Copy(frame->visible_data(media::VideoFrame::kYPlane), |
| + frame->stride(media::VideoFrame::kYPlane), |
| + frame->visible_data(media::VideoFrame::kUPlane), |
| + frame->stride(media::VideoFrame::kUPlane), |
| + frame->visible_data(media::VideoFrame::kVPlane), |
| + frame->stride(media::VideoFrame::kVPlane), |
| + video_frame->visible_data(media::VideoFrame::kYPlane), |
| + video_frame->stride(media::VideoFrame::kYPlane), |
| + video_frame->visible_data(media::VideoFrame::kUPlane), |
| + video_frame->stride(media::VideoFrame::kUPlane), |
| + video_frame->visible_data(media::VideoFrame::kVPlane), |
| + video_frame->stride(media::VideoFrame::kVPlane), |
| + input_size_.width(), input_size_.height()); |
| + } |
| + frames_in_encode_.push(std::make_pair(video_frame, capture_timestamp)); |
| + |
| + encoding_task_runner_->PostTask( |
| + FROM_HERE, base::Bind(&media::VideoEncodeAccelerator::Encode, |
| + base::Unretained(video_encoder_.get()), video_frame, |
| + first_frame_encoded_ ? false : true)); |
| +} |
| + |
| +void VEAEncoder::ConfigureEncoderOnEncodingTaskRunner(const gfx::Size& size) { |
| + DVLOG(3) << __FUNCTION__; |
| + DCHECK(encoding_task_runner_->BelongsToCurrentThread()); |
| + DCHECK(gpu_factories_->GetTaskRunner()->BelongsToCurrentThread()); |
| + |
| + input_size_ = size; |
| + video_encoder_ = gpu_factories_->CreateVideoEncodeAccelerator(); |
| + if (!video_encoder_) { |
| + NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError); |
| + return; |
| + } |
| + if (!video_encoder_->Initialize(media::PIXEL_FORMAT_I420, input_size_, |
|
mcasas
2016/05/18 22:22:58
Bundle these two if()s ?
emircan
2016/05/21 00:01:17
Done.
|
| + codec_, bits_per_second_, this)) { |
| + NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError); |
| + } |
| + first_frame_encoded_ = false; |
| +} |
| + |
| // static |
| void VpxEncoder::ShutdownEncoder(std::unique_ptr<base::Thread> encoding_thread, |
| ScopedVpxCodecCtxPtr encoder) { |
| @@ -626,19 +939,26 @@ VideoTrackRecorder::VideoTrackRecorder( |
| DCHECK(!track_.isNull()); |
| DCHECK(track_.getExtraData()); |
| - switch (codec) { |
| + const auto& vea_supported_profile = GetVEASupportedProfile(codec); |
| + // TODO(emircan): Prioritize software based encoders in lower resolutions. |
| + if (vea_supported_profile != media::VIDEO_CODEC_PROFILE_UNKNOWN) { |
| + encoder_ = new VEAEncoder(on_encoded_video_callback, bits_per_second, |
| + vea_supported_profile); |
| + } else { |
| + switch (codec) { |
| #if BUILDFLAG(RTC_USE_H264) |
| - case CodecId::H264: |
| - encoder_ = new H264Encoder(on_encoded_video_callback, bits_per_second); |
| - break; |
| + case CodecId::H264: |
| + encoder_ = new H264Encoder(on_encoded_video_callback, bits_per_second); |
| + break; |
| #endif |
| - case CodecId::VP8: |
| - case CodecId::VP9: |
| - encoder_ = new VpxEncoder(codec == CodecId::VP9, |
| - on_encoded_video_callback, bits_per_second); |
| - break; |
| - default: |
| - NOTREACHED() << "Unsupported codec"; |
| + case CodecId::VP8: |
| + case CodecId::VP9: |
| + encoder_ = new VpxEncoder(codec == CodecId::VP9, |
| + on_encoded_video_callback, bits_per_second); |
| + break; |
| + default: |
| + NOTREACHED() << "Unsupported codec"; |
| + } |
| } |
| // StartFrameEncode() will be called on Render IO thread. |