| Index: webrtc/modules/video_coding/codecs/stereo/stereo_encoder_adapter.cc
|
| diff --git a/webrtc/modules/video_coding/codecs/stereo/stereo_encoder_adapter.cc b/webrtc/modules/video_coding/codecs/stereo/stereo_encoder_adapter.cc
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..934aed4aed8a1f97250935f9e912b7918d4c4aa5
|
| --- /dev/null
|
| +++ b/webrtc/modules/video_coding/codecs/stereo/stereo_encoder_adapter.cc
|
| @@ -0,0 +1,237 @@
|
| +/*
|
| + * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
| + *
|
| + * Use of this source code is governed by a BSD-style license
|
| + * that can be found in the LICENSE file in the root of the source
|
| + * tree. An additional intellectual property rights grant can be found
|
| + * in the file PATENTS. All contributing project authors may
|
| + * be found in the AUTHORS file in the root of the source tree.
|
| + */
|
| +
|
| +#include "webrtc/modules/video_coding/codecs/stereo/include/stereo_encoder_adapter.h"
|
| +
|
| +#include "webrtc/base/keep_ref_until_done.h"
|
| +#include "webrtc/base/logging.h"
|
| +#include "webrtc/common_video/include/video_frame.h"
|
| +#include "webrtc/common_video/include/video_frame_buffer.h"
|
| +#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
|
| +#include "webrtc/modules/include/module_common_types.h"
|
| +
|
| +namespace webrtc {
|
| +
|
| +class StereoEncoderAdapter::AdapterEncodedImageCallback
|
| + : public webrtc::EncodedImageCallback {
|
| + public:
|
| + AdapterEncodedImageCallback(webrtc::StereoEncoderAdapter* adapter,
|
| + StereoCodecStream stream_idx)
|
| + : adapter_(adapter), stream_idx_(stream_idx) {}
|
| +
|
| + EncodedImageCallback::Result OnEncodedImage(
|
| + const EncodedImage& encoded_image,
|
| + const CodecSpecificInfo* codec_specific_info,
|
| + const RTPFragmentationHeader* fragmentation) override {
|
| + if (!adapter_)
|
| + return webrtc::EncodedImageCallback::Result::OK;
|
| + return adapter_->OnEncodedImage(stream_idx_, encoded_image,
|
| + codec_specific_info, fragmentation);
|
| + }
|
| +
|
| + private:
|
| + StereoEncoderAdapter* adapter_;
|
| + const StereoCodecStream stream_idx_;
|
| +};
|
| +
|
| +struct StereoEncoderAdapter::EncodedImageData {
|
| + explicit EncodedImageData(StereoCodecStream stream_idx)
|
| + : stream_idx_(stream_idx) {
|
| + RTC_DCHECK_EQ(kAXXStream, stream_idx);
|
| + encodedImage_._length = 0;
|
| + }
|
| + EncodedImageData(StereoCodecStream stream_idx,
|
| + const EncodedImage& encodedImage,
|
| + const CodecSpecificInfo* codecSpecificInfo,
|
| + const RTPFragmentationHeader* fragmentation)
|
| + : stream_idx_(stream_idx),
|
| + encodedImage_(encodedImage),
|
| + codecSpecificInfo_(*codecSpecificInfo) {
|
| + fragmentation_.CopyFrom(*fragmentation);
|
| + }
|
| + const StereoCodecStream stream_idx_;
|
| + EncodedImage encodedImage_;
|
| + const CodecSpecificInfo codecSpecificInfo_;
|
| + RTPFragmentationHeader fragmentation_;
|
| +
|
| + private:
|
| + RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(EncodedImageData);
|
| +};
|
| +
|
| +StereoEncoderAdapter::StereoEncoderAdapter(VideoEncoderFactory* factory)
|
| + : factory_(factory), encoded_complete_callback_(nullptr) {}
|
| +
|
| +StereoEncoderAdapter::~StereoEncoderAdapter() {
|
| + Release();
|
| +}
|
| +
|
| +int StereoEncoderAdapter::InitEncode(const VideoCodec* inst,
|
| + int number_of_cores,
|
| + size_t max_payload_size) {
|
| + const size_t buffer_size =
|
| + CalcBufferSize(VideoType::kI420, inst->width, inst->height);
|
| + stereo_dummy_planes_.resize(buffer_size);
|
| + // It is more expensive to encode 0x00, so use 0x80 instead.
|
| + std::fill(stereo_dummy_planes_.begin(), stereo_dummy_planes_.end(), 0x80);
|
| +
|
| + for (size_t i = 0; i < kStereoCodecStreams; ++i) {
|
| + VideoEncoder* encoder = factory_->Create();
|
| + const int rv = encoder->InitEncode(inst, number_of_cores, max_payload_size);
|
| + if (rv)
|
| + return rv;
|
| + encoders_.push_back(encoder);
|
| + adapter_callbacks_.emplace_back(new AdapterEncodedImageCallback(
|
| + this, static_cast<StereoCodecStream>(i)));
|
| + encoder->RegisterEncodeCompleteCallback(adapter_callbacks_.back().get());
|
| + }
|
| + return WEBRTC_VIDEO_CODEC_OK;
|
| +}
|
| +
|
| +int StereoEncoderAdapter::Encode(const VideoFrame& input_image,
|
| + const CodecSpecificInfo* codec_specific_info,
|
| + const std::vector<FrameType>* frame_types) {
|
| + if (!encoded_complete_callback_) {
|
| + return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
| + }
|
| +
|
| + // Encode AXX
|
| + rtc::scoped_refptr<I420BufferInterface> yuva_buffer =
|
| + input_image.video_frame_buffer()->ToI420();
|
| + if (yuva_buffer->HasAlpha()) {
|
| + rtc::scoped_refptr<WrappedI420Buffer> alpha_buffer(
|
| + new rtc::RefCountedObject<webrtc::WrappedI420Buffer>(
|
| + input_image.width(), input_image.height(), yuva_buffer->DataA(),
|
| + yuva_buffer->StrideA(), stereo_dummy_planes_.data(),
|
| + yuva_buffer->StrideU(), stereo_dummy_planes_.data(),
|
| + yuva_buffer->StrideV(),
|
| + rtc::KeepRefUntilDone(input_image.video_frame_buffer())));
|
| + VideoFrame alpha_image(alpha_buffer, input_image.timestamp(),
|
| + input_image.render_time_ms(),
|
| + input_image.rotation());
|
| + encoders_[kAXXStream]->Encode(alpha_image, codec_specific_info,
|
| + frame_types);
|
| + } else {
|
| + RTC_DCHECK(encoded_data_.find(input_image.timestamp()) ==
|
| + encoded_data_.end());
|
| + encoded_data_.emplace(std::piecewise_construct,
|
| + std::forward_as_tuple(input_image.timestamp()),
|
| + std::forward_as_tuple(kAXXStream));
|
| + }
|
| +
|
| + // Encode YUV
|
| + int rv = encoders_[kYUVStream]->Encode(input_image, codec_specific_info,
|
| + frame_types);
|
| + // RTC_DCHECK(rv);
|
| + return rv;
|
| +}
|
| +
|
| +int StereoEncoderAdapter::RegisterEncodeCompleteCallback(
|
| + EncodedImageCallback* callback) {
|
| + encoded_complete_callback_ = callback;
|
| + return WEBRTC_VIDEO_CODEC_OK;
|
| +}
|
| +
|
| +int StereoEncoderAdapter::SetChannelParameters(uint32_t packet_loss,
|
| + int64_t rtt) {
|
| + for (auto encoder : encoders_) {
|
| + const int rv = encoder->SetChannelParameters(packet_loss, rtt);
|
| + if (rv)
|
| + return rv;
|
| + }
|
| + return WEBRTC_VIDEO_CODEC_OK;
|
| +}
|
| +
|
| +int StereoEncoderAdapter::SetRateAllocation(const BitrateAllocation& bitrate,
|
| + uint32_t new_framerate) {
|
| + for (auto encoder : encoders_) {
|
| + const int rv = encoder->SetRateAllocation(bitrate, new_framerate);
|
| + if (rv)
|
| + return rv;
|
| + }
|
| + return WEBRTC_VIDEO_CODEC_OK;
|
| +}
|
| +
|
| +int StereoEncoderAdapter::Release() {
|
| + for (auto encoder : encoders_) {
|
| + const int rv = encoder->Release();
|
| + if (rv)
|
| + return rv;
|
| + factory_->Destroy(encoder);
|
| + }
|
| + encoders_.clear();
|
| + adapter_callbacks_.clear();
|
| + return WEBRTC_VIDEO_CODEC_OK;
|
| +}
|
| +
|
| +EncodedImageCallback::Result StereoEncoderAdapter::OnEncodedImage(
|
| + StereoCodecStream stream_idx,
|
| + const EncodedImage& encodedImage,
|
| + const CodecSpecificInfo* codecSpecificInfo,
|
| + const RTPFragmentationHeader* fragmentation) {
|
| + const auto& other_encoded_data_it =
|
| + encoded_data_.find(encodedImage._timeStamp);
|
| + if (other_encoded_data_it != encoded_data_.end()) {
|
| + const auto& other_image_data = other_encoded_data_it->second;
|
| + EncodedImageCallback::Result res = EncodedImageCallback::Result::OK;
|
| + if (stream_idx == kYUVStream) {
|
| + RTC_DCHECK_EQ(kAXXStream, other_image_data.stream_idx_);
|
| + res = SendEncodedImages(encodedImage, codecSpecificInfo, fragmentation,
|
| + other_image_data.encodedImage_,
|
| + &other_image_data.codecSpecificInfo_,
|
| + &other_image_data.fragmentation_);
|
| + } else {
|
| + RTC_DCHECK_EQ(kYUVStream, other_image_data.stream_idx_);
|
| + RTC_DCHECK_EQ(kAXXStream, stream_idx);
|
| + res = SendEncodedImages(other_image_data.encodedImage_,
|
| + &other_image_data.codecSpecificInfo_,
|
| + &other_image_data.fragmentation_, encodedImage,
|
| + codecSpecificInfo, fragmentation);
|
| + }
|
| + encoded_data_.erase(encoded_data_.begin(), other_encoded_data_it);
|
| + return res;
|
| + }
|
| + RTC_DCHECK(encoded_data_.find(encodedImage._timeStamp) ==
|
| + encoded_data_.end());
|
| + encoded_data_.emplace(
|
| + std::piecewise_construct, std::forward_as_tuple(encodedImage._timeStamp),
|
| + std::forward_as_tuple(stream_idx, encodedImage, codecSpecificInfo,
|
| + fragmentation));
|
| + return webrtc::EncodedImageCallback::Result::OK;
|
| +}
|
| +
|
| +EncodedImageCallback::Result StereoEncoderAdapter::SendEncodedImages(
|
| + const EncodedImage& encoded_image,
|
| + const CodecSpecificInfo* codec_specific_info,
|
| + const RTPFragmentationHeader* fragmentation,
|
| + const EncodedImage& stereo_encoded_image,
|
| + const CodecSpecificInfo* stereo_codec_specific_info,
|
| + const RTPFragmentationHeader* stereo_fragmentation) {
|
| + const bool has_alpha = stereo_encoded_image._length != 0;
|
| +
|
| + CodecSpecificInfo* yuv_codec =
|
| + const_cast<CodecSpecificInfo*>(codec_specific_info);
|
| + yuv_codec->codecType = kVideoCodecStereo;
|
| + yuv_codec->codec_name = "stereo-vp9";
|
| + yuv_codec->stereoInfo.stereoCodecType = kVideoCodecVP9;
|
| + if (!has_alpha) {
|
| + yuv_codec->stereoInfo.num_frames = 0;
|
| + return encoded_complete_callback_->OnEncodedImage(encoded_image, yuv_codec,
|
| + fragmentation);
|
| + }
|
| +
|
| + yuv_codec->stereoInfo.num_frames = 1;
|
| + yuv_codec->stereoInfo.encoded_images[0] = &encoded_image;
|
| + yuv_codec->stereoInfo.codec_specific_infos[0] = stereo_codec_specific_info;
|
| + yuv_codec->stereoInfo.fragmentations[0] = stereo_fragmentation;
|
| + return encoded_complete_callback_->OnEncodedImage(encoded_image, yuv_codec,
|
| + fragmentation);
|
| +}
|
| +
|
| +} // namespace webrtc
|
|
|