| Index: content/renderer/media/rtc_video_encoder.cc
|
| diff --git a/content/renderer/media/rtc_video_encoder.cc b/content/renderer/media/rtc_video_encoder.cc
|
| index 1b33fc54c90abe6881a48565c6d9fbb4aa58b513..380fde736545a2abc1f3bde04cd422f57ccab1bc 100644
|
| --- a/content/renderer/media/rtc_video_encoder.cc
|
| +++ b/content/renderer/media/rtc_video_encoder.cc
|
| @@ -16,6 +16,7 @@
|
| #include "media/base/video_frame.h"
|
| #include "media/base/video_util.h"
|
| #include "media/filters/gpu_video_accelerator_factories.h"
|
| +#include "media/filters/h264_parser.h"
|
| #include "media/video/video_encode_accelerator.h"
|
| #include "third_party/webrtc/system_wrappers/interface/tick_util.h"
|
|
|
| @@ -27,6 +28,41 @@
|
|
|
| namespace content {
|
|
|
| +namespace {
|
| +
|
| +// Populates struct webrtc::RTPFragmentationHeader for H264 codec.
|
| +// Each entry specifies the offset and length (excluding start code) of a NALU.
|
| +void GetRTPFragmentationHeaderH264(
|
| + webrtc::RTPFragmentationHeader& header, uint8_t* data, uint32_t length) {
|
| + media::H264Parser parser;
|
| + parser.SetStream(data, length);
|
| +
|
| + std::vector<media::H264NALU> nalu_vector;
|
| + while (true) {
|
| + media::H264NALU nalu;
|
| + media::H264Parser::Result result;
|
| + result = parser.AdvanceToNextNALU(&nalu);
|
| + if (result == media::H264Parser::kOk) {
|
| + nalu_vector.push_back(nalu);
|
| + } else if (result == media::H264Parser::kEOStream) {
|
| + break;
|
| + } else {
|
| + DLOG(ERROR) << "Unexpected H264 parser result";
|
| + break;
|
| + }
|
| + }
|
| +
|
| + header.VerifyAndAllocateFragmentationHeader(nalu_vector.size());
|
| + for (size_t i = 0; i < nalu_vector.size(); ++i) {
|
| + header.fragmentationOffset[i] = nalu_vector[i].data - data;
|
| + header.fragmentationLength[i] = nalu_vector[i].size;
|
| + header.fragmentationPlType[i] = 0;
|
| + header.fragmentationTimeDiff[i] = 0;
|
| + }
|
| +}
|
| +
|
| +} // namespace
|
| +
|
| // This private class of RTCVideoEncoder does the actual work of communicating
|
| // with a media::VideoEncodeAccelerator for handling video encoding. It can
|
| // be created on any thread, but should subsequently be posted to (and Destroy()
|
| @@ -648,6 +684,25 @@ void RTCVideoEncoder::ReturnEncodedImage(scoped_ptr<webrtc::EncodedImage> image,
|
| if (!encoded_image_callback_)
|
| return;
|
|
|
| + webrtc::RTPFragmentationHeader header;
|
| + memset(&header, 0, sizeof(header));
|
| + switch (video_codec_type_) {
|
| + case webrtc::kVideoCodecVP8:
|
| + case webrtc::kVideoCodecGeneric:
|
| + header.VerifyAndAllocateFragmentationHeader(1);
|
| + header.fragmentationOffset[0] = 0;
|
| + header.fragmentationLength[0] = image->_length;
|
| + header.fragmentationPlType[0] = 0;
|
| + header.fragmentationTimeDiff[0] = 0;
|
| + break;
|
| + case webrtc::kVideoCodecH264:
|
| + GetRTPFragmentationHeaderH264(header, image->_buffer, image->_length);
|
| + break;
|
| + default:
|
| + NOTREACHED() << "Invalid video codec type";
|
| + return;
|
| + }
|
| +
|
| webrtc::CodecSpecificInfo info;
|
| memset(&info, 0, sizeof(info));
|
| info.codecType = video_codec_type_;
|
| @@ -657,15 +712,6 @@ void RTCVideoEncoder::ReturnEncodedImage(scoped_ptr<webrtc::EncodedImage> image,
|
| info.codecSpecific.VP8.keyIdx = -1;
|
| }
|
|
|
| - // Generate a header describing a single fragment.
|
| - webrtc::RTPFragmentationHeader header;
|
| - memset(&header, 0, sizeof(header));
|
| - header.VerifyAndAllocateFragmentationHeader(1);
|
| - header.fragmentationOffset[0] = 0;
|
| - header.fragmentationLength[0] = image->_length;
|
| - header.fragmentationPlType[0] = 0;
|
| - header.fragmentationTimeDiff[0] = 0;
|
| -
|
| int32_t retval = encoded_image_callback_->Encoded(*image, &info, &header);
|
| if (retval < 0) {
|
| DVLOG(2) << "ReturnEncodedImage(): encoded_image_callback_ returned "
|
|
|