| Index: media/cast/sender/h264_vt_encoder.cc
|
| diff --git a/media/cast/sender/h264_vt_encoder.cc b/media/cast/sender/h264_vt_encoder.cc
|
| index 49211fba2ad652756261abe3347c0079e492000f..33e7366d80a3e534653e8b7cfd94c530fac3f28d 100644
|
| --- a/media/cast/sender/h264_vt_encoder.cc
|
| +++ b/media/cast/sender/h264_vt_encoder.cc
|
| @@ -43,6 +43,172 @@
|
| frame_encoded_callback(callback) {}
|
| };
|
|
|
| +base::ScopedCFTypeRef<CFDictionaryRef>
|
| +DictionaryWithKeysAndValues(CFTypeRef* keys, CFTypeRef* values, size_t size) {
|
| + return base::ScopedCFTypeRef<CFDictionaryRef>(CFDictionaryCreate(
|
| + kCFAllocatorDefault, keys, values, size, &kCFTypeDictionaryKeyCallBacks,
|
| + &kCFTypeDictionaryValueCallBacks));
|
| +}
|
| +
|
| +base::ScopedCFTypeRef<CFDictionaryRef> DictionaryWithKeyValue(CFTypeRef key,
|
| + CFTypeRef value) {
|
| + CFTypeRef keys[1] = {key};
|
| + CFTypeRef values[1] = {value};
|
| + return DictionaryWithKeysAndValues(keys, values, 1);
|
| +}
|
| +
|
| +base::ScopedCFTypeRef<CFArrayRef> ArrayWithIntegers(const int* v, size_t size) {
|
| + std::vector<CFNumberRef> numbers;
|
| + numbers.reserve(size);
|
| + for (const int* end = v + size; v < end; ++v)
|
| + numbers.push_back(CFNumberCreate(nullptr, kCFNumberSInt32Type, v));
|
| + base::ScopedCFTypeRef<CFArrayRef> array(CFArrayCreate(
|
| + kCFAllocatorDefault, reinterpret_cast<const void**>(&numbers[0]),
|
| + numbers.size(), &kCFTypeArrayCallBacks));
|
| + for (auto& number : numbers) {
|
| + CFRelease(number);
|
| + }
|
| + return array;
|
| +}
|
| +
|
| +template <typename NalSizeType>
|
| +void CopyNalsToAnnexB(char* avcc_buffer,
|
| + const size_t avcc_size,
|
| + std::string* annexb_buffer) {
|
| + static_assert(sizeof(NalSizeType) == 1 || sizeof(NalSizeType) == 2 ||
|
| + sizeof(NalSizeType) == 4,
|
| + "NAL size type has unsupported size");
|
| + static const char startcode_3[3] = {0, 0, 1};
|
| + DCHECK(avcc_buffer);
|
| + DCHECK(annexb_buffer);
|
| + size_t bytes_left = avcc_size;
|
| + while (bytes_left > 0) {
|
| + DCHECK_GT(bytes_left, sizeof(NalSizeType));
|
| + NalSizeType nal_size;
|
| + base::ReadBigEndian(avcc_buffer, &nal_size);
|
| + bytes_left -= sizeof(NalSizeType);
|
| + avcc_buffer += sizeof(NalSizeType);
|
| +
|
| + DCHECK_GE(bytes_left, nal_size);
|
| + annexb_buffer->append(startcode_3, sizeof(startcode_3));
|
| + annexb_buffer->append(avcc_buffer, nal_size);
|
| + bytes_left -= nal_size;
|
| + avcc_buffer += nal_size;
|
| + }
|
| +}
|
| +
|
| +// Copy a H.264 frame stored in a CM sample buffer to an Annex B buffer. Copies
|
| +// parameter sets for keyframes before the frame data as well.
|
| +void CopySampleBufferToAnnexBBuffer(CoreMediaGlue::CMSampleBufferRef sbuf,
|
| + std::string* annexb_buffer,
|
| + bool keyframe) {
|
| + // Perform two pass, one to figure out the total output size, and another to
|
| + // copy the data after having performed a single output allocation. Note that
|
| + // we'll allocate a bit more because we'll count 4 bytes instead of 3 for
|
| + // video NALs.
|
| +
|
| + OSStatus status;
|
| +
|
| + // Get the sample buffer's block buffer and format description.
|
| + auto bb = CoreMediaGlue::CMSampleBufferGetDataBuffer(sbuf);
|
| + DCHECK(bb);
|
| + auto fdesc = CoreMediaGlue::CMSampleBufferGetFormatDescription(sbuf);
|
| + DCHECK(fdesc);
|
| +
|
| + size_t bb_size = CoreMediaGlue::CMBlockBufferGetDataLength(bb);
|
| + size_t total_bytes = bb_size;
|
| +
|
| + size_t pset_count;
|
| + int nal_size_field_bytes;
|
| + status = CoreMediaGlue::CMVideoFormatDescriptionGetH264ParameterSetAtIndex(
|
| + fdesc, 0, nullptr, nullptr, &pset_count, &nal_size_field_bytes);
|
| + if (status ==
|
| + CoreMediaGlue::kCMFormatDescriptionBridgeError_InvalidParameter) {
|
| + DLOG(WARNING) << " assuming 2 parameter sets and 4 bytes NAL length header";
|
| + pset_count = 2;
|
| + nal_size_field_bytes = 4;
|
| + } else if (status != noErr) {
|
| + DLOG(ERROR)
|
| + << " CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed: "
|
| + << status;
|
| + return;
|
| + }
|
| +
|
| + if (keyframe) {
|
| + const uint8_t* pset;
|
| + size_t pset_size;
|
| + for (size_t pset_i = 0; pset_i < pset_count; ++pset_i) {
|
| + status =
|
| + CoreMediaGlue::CMVideoFormatDescriptionGetH264ParameterSetAtIndex(
|
| + fdesc, pset_i, &pset, &pset_size, nullptr, nullptr);
|
| + if (status != noErr) {
|
| + DLOG(ERROR)
|
| + << " CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed: "
|
| + << status;
|
| + return;
|
| + }
|
| + total_bytes += pset_size + nal_size_field_bytes;
|
| + }
|
| + }
|
| +
|
| + annexb_buffer->reserve(total_bytes);
|
| +
|
| + // Copy all parameter sets before keyframes.
|
| + if (keyframe) {
|
| + const uint8_t* pset;
|
| + size_t pset_size;
|
| + for (size_t pset_i = 0; pset_i < pset_count; ++pset_i) {
|
| + status =
|
| + CoreMediaGlue::CMVideoFormatDescriptionGetH264ParameterSetAtIndex(
|
| + fdesc, pset_i, &pset, &pset_size, nullptr, nullptr);
|
| + if (status != noErr) {
|
| + DLOG(ERROR)
|
| + << " CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed: "
|
| + << status;
|
| + return;
|
| + }
|
| + static const char startcode_4[4] = {0, 0, 0, 1};
|
| + annexb_buffer->append(startcode_4, sizeof(startcode_4));
|
| + annexb_buffer->append(reinterpret_cast<const char*>(pset), pset_size);
|
| + }
|
| + }
|
| +
|
| + // Block buffers can be composed of non-contiguous chunks. For the sake of
|
| + // keeping this code simple, flatten non-contiguous block buffers.
|
| + base::ScopedCFTypeRef<CoreMediaGlue::CMBlockBufferRef> contiguous_bb(
|
| + bb, base::scoped_policy::RETAIN);
|
| + if (!CoreMediaGlue::CMBlockBufferIsRangeContiguous(bb, 0, 0)) {
|
| + contiguous_bb.reset();
|
| + status = CoreMediaGlue::CMBlockBufferCreateContiguous(
|
| + kCFAllocatorDefault, bb, kCFAllocatorDefault, nullptr, 0, 0, 0,
|
| + contiguous_bb.InitializeInto());
|
| + if (status != noErr) {
|
| + DLOG(ERROR) << " CMBlockBufferCreateContiguous failed: " << status;
|
| + return;
|
| + }
|
| + }
|
| +
|
| + // Copy all the NAL units. In the process convert them from AVCC format
|
| + // (length header) to AnnexB format (start code).
|
| + char* bb_data;
|
| + status = CoreMediaGlue::CMBlockBufferGetDataPointer(contiguous_bb, 0, nullptr,
|
| + nullptr, &bb_data);
|
| + if (status != noErr) {
|
| + DLOG(ERROR) << " CMBlockBufferGetDataPointer failed: " << status;
|
| + return;
|
| + }
|
| +
|
| + if (nal_size_field_bytes == 1) {
|
| + CopyNalsToAnnexB<uint8_t>(bb_data, bb_size, annexb_buffer);
|
| + } else if (nal_size_field_bytes == 2) {
|
| + CopyNalsToAnnexB<uint16_t>(bb_data, bb_size, annexb_buffer);
|
| + } else if (nal_size_field_bytes == 4) {
|
| + CopyNalsToAnnexB<uint32_t>(bb_data, bb_size, annexb_buffer);
|
| + } else {
|
| + NOTREACHED();
|
| + }
|
| +}
|
| +
|
| } // namespace
|
|
|
| class H264VideoToolboxEncoder::VideoFrameFactoryImpl
|
| @@ -225,7 +391,7 @@
|
| // all configurations (some of which are used for testing).
|
| base::ScopedCFTypeRef<CFDictionaryRef> encoder_spec;
|
| #if !defined(OS_IOS)
|
| - encoder_spec = video_toolbox::DictionaryWithKeyValue(
|
| + encoder_spec = DictionaryWithKeyValue(
|
| videotoolbox_glue_
|
| ->kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder(),
|
| kCFBooleanTrue);
|
| @@ -245,14 +411,13 @@
|
| CFTypeRef buffer_attributes_keys[] = {kCVPixelBufferPixelFormatTypeKey,
|
| kCVBufferPropagatedAttachmentsKey};
|
| CFTypeRef buffer_attributes_values[] = {
|
| - video_toolbox::ArrayWithIntegers(format, arraysize(format)).release(),
|
| - video_toolbox::DictionaryWithKeysAndValues(
|
| - attachments_keys, attachments_values, arraysize(attachments_keys))
|
| - .release()};
|
| + ArrayWithIntegers(format, arraysize(format)).release(),
|
| + DictionaryWithKeysAndValues(attachments_keys, attachments_values,
|
| + arraysize(attachments_keys)).release()};
|
| const base::ScopedCFTypeRef<CFDictionaryRef> buffer_attributes =
|
| - video_toolbox::DictionaryWithKeysAndValues(
|
| - buffer_attributes_keys, buffer_attributes_values,
|
| - arraysize(buffer_attributes_keys));
|
| + DictionaryWithKeysAndValues(buffer_attributes_keys,
|
| + buffer_attributes_values,
|
| + arraysize(buffer_attributes_keys));
|
| for (auto& v : buffer_attributes_values)
|
| CFRelease(v);
|
|
|
| @@ -299,42 +464,40 @@
|
| }
|
|
|
| void H264VideoToolboxEncoder::ConfigureCompressionSession() {
|
| - video_toolbox::SessionPropertySetter session_property_setter(
|
| - compression_session_, videotoolbox_glue_);
|
| - session_property_setter.Set(
|
| + SetSessionProperty(
|
| videotoolbox_glue_->kVTCompressionPropertyKey_ProfileLevel(),
|
| videotoolbox_glue_->kVTProfileLevel_H264_Main_AutoLevel());
|
| - session_property_setter.Set(
|
| - videotoolbox_glue_->kVTCompressionPropertyKey_RealTime(), true);
|
| - session_property_setter.Set(
|
| + SetSessionProperty(videotoolbox_glue_->kVTCompressionPropertyKey_RealTime(),
|
| + true);
|
| + SetSessionProperty(
|
| videotoolbox_glue_->kVTCompressionPropertyKey_AllowFrameReordering(),
|
| false);
|
| - session_property_setter.Set(
|
| + SetSessionProperty(
|
| videotoolbox_glue_->kVTCompressionPropertyKey_MaxKeyFrameInterval(), 240);
|
| - session_property_setter.Set(
|
| + SetSessionProperty(
|
| videotoolbox_glue_
|
| ->kVTCompressionPropertyKey_MaxKeyFrameIntervalDuration(),
|
| 240);
|
| // TODO(jfroy): implement better bitrate control
|
| // https://crbug.com/425352
|
| - session_property_setter.Set(
|
| + SetSessionProperty(
|
| videotoolbox_glue_->kVTCompressionPropertyKey_AverageBitRate(),
|
| (video_config_.min_bitrate + video_config_.max_bitrate) / 2);
|
| - session_property_setter.Set(
|
| + SetSessionProperty(
|
| videotoolbox_glue_->kVTCompressionPropertyKey_ExpectedFrameRate(),
|
| video_config_.max_frame_rate);
|
| // Keep these attachment settings in-sync with those in Initialize().
|
| - session_property_setter.Set(
|
| + SetSessionProperty(
|
| videotoolbox_glue_->kVTCompressionPropertyKey_ColorPrimaries(),
|
| kCVImageBufferColorPrimaries_ITU_R_709_2);
|
| - session_property_setter.Set(
|
| + SetSessionProperty(
|
| videotoolbox_glue_->kVTCompressionPropertyKey_TransferFunction(),
|
| kCVImageBufferTransferFunction_ITU_R_709_2);
|
| - session_property_setter.Set(
|
| + SetSessionProperty(
|
| videotoolbox_glue_->kVTCompressionPropertyKey_YCbCrMatrix(),
|
| kCVImageBufferYCbCrMatrix_ITU_R_709_2);
|
| if (video_config_.max_number_of_video_buffers_used > 0) {
|
| - session_property_setter.Set(
|
| + SetSessionProperty(
|
| videotoolbox_glue_->kVTCompressionPropertyKey_MaxFrameDelayCount(),
|
| video_config_.max_number_of_video_buffers_used);
|
| }
|
| @@ -403,7 +566,7 @@
|
|
|
| // Convert the frame timestamp to CMTime.
|
| auto timestamp_cm = CoreMediaGlue::CMTimeMake(
|
| - video_frame->timestamp().InMicroseconds(), USEC_PER_SEC);
|
| + (reference_time - base::TimeTicks()).InMicroseconds(), USEC_PER_SEC);
|
|
|
| // Wrap information we'll need after the frame is encoded in a heap object.
|
| // We'll get the pointer back from the VideoToolbox completion callback.
|
| @@ -414,7 +577,7 @@
|
| // Build a suitable frame properties dictionary for keyframes.
|
| base::ScopedCFTypeRef<CFDictionaryRef> frame_props;
|
| if (encode_next_frame_as_keyframe_) {
|
| - frame_props = video_toolbox::DictionaryWithKeyValue(
|
| + frame_props = DictionaryWithKeyValue(
|
| videotoolbox_glue_->kVTEncodeFrameOptionKey_ForceKeyFrame(),
|
| kCFBooleanTrue);
|
| encode_next_frame_as_keyframe_ = false;
|
| @@ -508,6 +671,26 @@
|
| VLOG(1) << "OnResume: Resetting compression session.";
|
| ResetCompressionSession();
|
| }
|
| +}
|
| +
|
| +bool H264VideoToolboxEncoder::SetSessionProperty(CFStringRef key,
|
| + int32_t value) {
|
| + base::ScopedCFTypeRef<CFNumberRef> cfvalue(
|
| + CFNumberCreate(nullptr, kCFNumberSInt32Type, &value));
|
| + return videotoolbox_glue_->VTSessionSetProperty(compression_session_, key,
|
| + cfvalue) == noErr;
|
| +}
|
| +
|
| +bool H264VideoToolboxEncoder::SetSessionProperty(CFStringRef key, bool value) {
|
| + CFBooleanRef cfvalue = (value) ? kCFBooleanTrue : kCFBooleanFalse;
|
| + return videotoolbox_glue_->VTSessionSetProperty(compression_session_, key,
|
| + cfvalue) == noErr;
|
| +}
|
| +
|
| +bool H264VideoToolboxEncoder::SetSessionProperty(CFStringRef key,
|
| + CFStringRef value) {
|
| + return videotoolbox_glue_->VTSessionSetProperty(compression_session_, key,
|
| + value) == noErr;
|
| }
|
|
|
| void H264VideoToolboxEncoder::CompressionCallback(void* encoder_opaque,
|
| @@ -568,10 +751,8 @@
|
| encoded_frame->referenced_frame_id = frame_id - 1;
|
| }
|
|
|
| - if (has_frame_data) {
|
| - video_toolbox::CopySampleBufferToAnnexBBuffer(sbuf, keyframe,
|
| - &encoded_frame->data);
|
| - }
|
| + if (has_frame_data)
|
| + CopySampleBufferToAnnexBBuffer(sbuf, &encoded_frame->data, keyframe);
|
|
|
| // TODO(miu): Compute and populate the |deadline_utilization| and
|
| // |lossy_utilization| performance metrics in |encoded_frame|.
|
|
|