Chromium Code Reviews| Index: media/cast/sender/h264_vt_encoder.cc |
| diff --git a/media/cast/sender/h264_vt_encoder.cc b/media/cast/sender/h264_vt_encoder.cc |
| index 33e7366d80a3e534653e8b7cfd94c530fac3f28d..21022251777d4d7c7142735d505e54166f4fd5d1 100644 |
| --- a/media/cast/sender/h264_vt_encoder.cc |
| +++ b/media/cast/sender/h264_vt_encoder.cc |
| @@ -43,172 +43,6 @@ struct InProgressFrameEncode { |
| frame_encoded_callback(callback) {} |
| }; |
| -base::ScopedCFTypeRef<CFDictionaryRef> |
| -DictionaryWithKeysAndValues(CFTypeRef* keys, CFTypeRef* values, size_t size) { |
| - return base::ScopedCFTypeRef<CFDictionaryRef>(CFDictionaryCreate( |
| - kCFAllocatorDefault, keys, values, size, &kCFTypeDictionaryKeyCallBacks, |
| - &kCFTypeDictionaryValueCallBacks)); |
| -} |
| - |
| -base::ScopedCFTypeRef<CFDictionaryRef> DictionaryWithKeyValue(CFTypeRef key, |
| - CFTypeRef value) { |
| - CFTypeRef keys[1] = {key}; |
| - CFTypeRef values[1] = {value}; |
| - return DictionaryWithKeysAndValues(keys, values, 1); |
| -} |
| - |
| -base::ScopedCFTypeRef<CFArrayRef> ArrayWithIntegers(const int* v, size_t size) { |
| - std::vector<CFNumberRef> numbers; |
| - numbers.reserve(size); |
| - for (const int* end = v + size; v < end; ++v) |
| - numbers.push_back(CFNumberCreate(nullptr, kCFNumberSInt32Type, v)); |
| - base::ScopedCFTypeRef<CFArrayRef> array(CFArrayCreate( |
| - kCFAllocatorDefault, reinterpret_cast<const void**>(&numbers[0]), |
| - numbers.size(), &kCFTypeArrayCallBacks)); |
| - for (auto& number : numbers) { |
| - CFRelease(number); |
| - } |
| - return array; |
| -} |
| - |
| -template <typename NalSizeType> |
| -void CopyNalsToAnnexB(char* avcc_buffer, |
| - const size_t avcc_size, |
| - std::string* annexb_buffer) { |
| - static_assert(sizeof(NalSizeType) == 1 || sizeof(NalSizeType) == 2 || |
| - sizeof(NalSizeType) == 4, |
| - "NAL size type has unsupported size"); |
| - static const char startcode_3[3] = {0, 0, 1}; |
| - DCHECK(avcc_buffer); |
| - DCHECK(annexb_buffer); |
| - size_t bytes_left = avcc_size; |
| - while (bytes_left > 0) { |
| - DCHECK_GT(bytes_left, sizeof(NalSizeType)); |
| - NalSizeType nal_size; |
| - base::ReadBigEndian(avcc_buffer, &nal_size); |
| - bytes_left -= sizeof(NalSizeType); |
| - avcc_buffer += sizeof(NalSizeType); |
| - |
| - DCHECK_GE(bytes_left, nal_size); |
| - annexb_buffer->append(startcode_3, sizeof(startcode_3)); |
| - annexb_buffer->append(avcc_buffer, nal_size); |
| - bytes_left -= nal_size; |
| - avcc_buffer += nal_size; |
| - } |
| -} |
| - |
| -// Copy a H.264 frame stored in a CM sample buffer to an Annex B buffer. Copies |
| -// parameter sets for keyframes before the frame data as well. |
| -void CopySampleBufferToAnnexBBuffer(CoreMediaGlue::CMSampleBufferRef sbuf, |
| - std::string* annexb_buffer, |
| - bool keyframe) { |
| - // Perform two pass, one to figure out the total output size, and another to |
| - // copy the data after having performed a single output allocation. Note that |
| - // we'll allocate a bit more because we'll count 4 bytes instead of 3 for |
| - // video NALs. |
| - |
| - OSStatus status; |
| - |
| - // Get the sample buffer's block buffer and format description. |
| - auto bb = CoreMediaGlue::CMSampleBufferGetDataBuffer(sbuf); |
| - DCHECK(bb); |
| - auto fdesc = CoreMediaGlue::CMSampleBufferGetFormatDescription(sbuf); |
| - DCHECK(fdesc); |
| - |
| - size_t bb_size = CoreMediaGlue::CMBlockBufferGetDataLength(bb); |
| - size_t total_bytes = bb_size; |
| - |
| - size_t pset_count; |
| - int nal_size_field_bytes; |
| - status = CoreMediaGlue::CMVideoFormatDescriptionGetH264ParameterSetAtIndex( |
| - fdesc, 0, nullptr, nullptr, &pset_count, &nal_size_field_bytes); |
| - if (status == |
| - CoreMediaGlue::kCMFormatDescriptionBridgeError_InvalidParameter) { |
| - DLOG(WARNING) << " assuming 2 parameter sets and 4 bytes NAL length header"; |
| - pset_count = 2; |
| - nal_size_field_bytes = 4; |
| - } else if (status != noErr) { |
| - DLOG(ERROR) |
| - << " CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed: " |
| - << status; |
| - return; |
| - } |
| - |
| - if (keyframe) { |
| - const uint8_t* pset; |
| - size_t pset_size; |
| - for (size_t pset_i = 0; pset_i < pset_count; ++pset_i) { |
| - status = |
| - CoreMediaGlue::CMVideoFormatDescriptionGetH264ParameterSetAtIndex( |
| - fdesc, pset_i, &pset, &pset_size, nullptr, nullptr); |
| - if (status != noErr) { |
| - DLOG(ERROR) |
| - << " CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed: " |
| - << status; |
| - return; |
| - } |
| - total_bytes += pset_size + nal_size_field_bytes; |
| - } |
| - } |
| - |
| - annexb_buffer->reserve(total_bytes); |
| - |
| - // Copy all parameter sets before keyframes. |
| - if (keyframe) { |
| - const uint8_t* pset; |
| - size_t pset_size; |
| - for (size_t pset_i = 0; pset_i < pset_count; ++pset_i) { |
| - status = |
| - CoreMediaGlue::CMVideoFormatDescriptionGetH264ParameterSetAtIndex( |
| - fdesc, pset_i, &pset, &pset_size, nullptr, nullptr); |
| - if (status != noErr) { |
| - DLOG(ERROR) |
| - << " CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed: " |
| - << status; |
| - return; |
| - } |
| - static const char startcode_4[4] = {0, 0, 0, 1}; |
| - annexb_buffer->append(startcode_4, sizeof(startcode_4)); |
| - annexb_buffer->append(reinterpret_cast<const char*>(pset), pset_size); |
| - } |
| - } |
| - |
| - // Block buffers can be composed of non-contiguous chunks. For the sake of |
| - // keeping this code simple, flatten non-contiguous block buffers. |
| - base::ScopedCFTypeRef<CoreMediaGlue::CMBlockBufferRef> contiguous_bb( |
| - bb, base::scoped_policy::RETAIN); |
| - if (!CoreMediaGlue::CMBlockBufferIsRangeContiguous(bb, 0, 0)) { |
| - contiguous_bb.reset(); |
| - status = CoreMediaGlue::CMBlockBufferCreateContiguous( |
| - kCFAllocatorDefault, bb, kCFAllocatorDefault, nullptr, 0, 0, 0, |
| - contiguous_bb.InitializeInto()); |
| - if (status != noErr) { |
| - DLOG(ERROR) << " CMBlockBufferCreateContiguous failed: " << status; |
| - return; |
| - } |
| - } |
| - |
| - // Copy all the NAL units. In the process convert them from AVCC format |
| - // (length header) to AnnexB format (start code). |
| - char* bb_data; |
| - status = CoreMediaGlue::CMBlockBufferGetDataPointer(contiguous_bb, 0, nullptr, |
| - nullptr, &bb_data); |
| - if (status != noErr) { |
| - DLOG(ERROR) << " CMBlockBufferGetDataPointer failed: " << status; |
| - return; |
| - } |
| - |
| - if (nal_size_field_bytes == 1) { |
| - CopyNalsToAnnexB<uint8_t>(bb_data, bb_size, annexb_buffer); |
| - } else if (nal_size_field_bytes == 2) { |
| - CopyNalsToAnnexB<uint16_t>(bb_data, bb_size, annexb_buffer); |
| - } else if (nal_size_field_bytes == 4) { |
| - CopyNalsToAnnexB<uint32_t>(bb_data, bb_size, annexb_buffer); |
| - } else { |
| - NOTREACHED(); |
| - } |
| -} |
| - |
| } // namespace |
| class H264VideoToolboxEncoder::VideoFrameFactoryImpl |
| @@ -391,7 +225,7 @@ void H264VideoToolboxEncoder::ResetCompressionSession() { |
| // all configurations (some of which are used for testing). |
| base::ScopedCFTypeRef<CFDictionaryRef> encoder_spec; |
| #if !defined(OS_IOS) |
| - encoder_spec = DictionaryWithKeyValue( |
| + encoder_spec = video_toolbox::DictionaryWithKeyValue( |
| videotoolbox_glue_ |
| ->kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder(), |
| kCFBooleanTrue); |
| @@ -411,13 +245,14 @@ void H264VideoToolboxEncoder::ResetCompressionSession() { |
| CFTypeRef buffer_attributes_keys[] = {kCVPixelBufferPixelFormatTypeKey, |
| kCVBufferPropagatedAttachmentsKey}; |
| CFTypeRef buffer_attributes_values[] = { |
| - ArrayWithIntegers(format, arraysize(format)).release(), |
| - DictionaryWithKeysAndValues(attachments_keys, attachments_values, |
| - arraysize(attachments_keys)).release()}; |
| + video_toolbox::ArrayWithIntegers(format, arraysize(format)).release(), |
| + video_toolbox::DictionaryWithKeysAndValues( |
| + attachments_keys, attachments_values, arraysize(attachments_keys)) |
| + .release()}; |
| const base::ScopedCFTypeRef<CFDictionaryRef> buffer_attributes = |
| - DictionaryWithKeysAndValues(buffer_attributes_keys, |
| - buffer_attributes_values, |
| - arraysize(buffer_attributes_keys)); |
| + video_toolbox::DictionaryWithKeysAndValues( |
| + buffer_attributes_keys, buffer_attributes_values, |
| + arraysize(buffer_attributes_keys)); |
| for (auto& v : buffer_attributes_values) |
| CFRelease(v); |
| @@ -464,40 +299,43 @@ void H264VideoToolboxEncoder::ResetCompressionSession() { |
| } |
| void H264VideoToolboxEncoder::ConfigureCompressionSession() { |
| - SetSessionProperty( |
| + scoped_ptr<video_toolbox::SessionPropertySetter> session_property_setter( |
|
miu
2016/02/10 21:04:56
Please don't allocate on the heap here. It should
emircan
2016/02/11 09:22:10
My bad, it makes total sense to put it on the stac
|
| + new video_toolbox::SessionPropertySetter(compression_session_, |
| + videotoolbox_glue_)); |
| + session_property_setter->SetSessionProperty( |
| videotoolbox_glue_->kVTCompressionPropertyKey_ProfileLevel(), |
| videotoolbox_glue_->kVTProfileLevel_H264_Main_AutoLevel()); |
| - SetSessionProperty(videotoolbox_glue_->kVTCompressionPropertyKey_RealTime(), |
| - true); |
| - SetSessionProperty( |
| + session_property_setter->SetSessionProperty( |
| + videotoolbox_glue_->kVTCompressionPropertyKey_RealTime(), true); |
| + session_property_setter->SetSessionProperty( |
| videotoolbox_glue_->kVTCompressionPropertyKey_AllowFrameReordering(), |
| false); |
| - SetSessionProperty( |
| + session_property_setter->SetSessionProperty( |
| videotoolbox_glue_->kVTCompressionPropertyKey_MaxKeyFrameInterval(), 240); |
| - SetSessionProperty( |
| + session_property_setter->SetSessionProperty( |
| videotoolbox_glue_ |
| ->kVTCompressionPropertyKey_MaxKeyFrameIntervalDuration(), |
| 240); |
| // TODO(jfroy): implement better bitrate control |
| // https://crbug.com/425352 |
| - SetSessionProperty( |
| + session_property_setter->SetSessionProperty( |
| videotoolbox_glue_->kVTCompressionPropertyKey_AverageBitRate(), |
| (video_config_.min_bitrate + video_config_.max_bitrate) / 2); |
| - SetSessionProperty( |
| + session_property_setter->SetSessionProperty( |
| videotoolbox_glue_->kVTCompressionPropertyKey_ExpectedFrameRate(), |
| video_config_.max_frame_rate); |
| // Keep these attachment settings in-sync with those in Initialize(). |
| - SetSessionProperty( |
| + session_property_setter->SetSessionProperty( |
| videotoolbox_glue_->kVTCompressionPropertyKey_ColorPrimaries(), |
| kCVImageBufferColorPrimaries_ITU_R_709_2); |
| - SetSessionProperty( |
| + session_property_setter->SetSessionProperty( |
| videotoolbox_glue_->kVTCompressionPropertyKey_TransferFunction(), |
| kCVImageBufferTransferFunction_ITU_R_709_2); |
| - SetSessionProperty( |
| + session_property_setter->SetSessionProperty( |
| videotoolbox_glue_->kVTCompressionPropertyKey_YCbCrMatrix(), |
| kCVImageBufferYCbCrMatrix_ITU_R_709_2); |
| if (video_config_.max_number_of_video_buffers_used > 0) { |
| - SetSessionProperty( |
| + session_property_setter->SetSessionProperty( |
| videotoolbox_glue_->kVTCompressionPropertyKey_MaxFrameDelayCount(), |
| video_config_.max_number_of_video_buffers_used); |
| } |
| @@ -566,7 +404,7 @@ bool H264VideoToolboxEncoder::EncodeVideoFrame( |
| // Convert the frame timestamp to CMTime. |
| auto timestamp_cm = CoreMediaGlue::CMTimeMake( |
| - (reference_time - base::TimeTicks()).InMicroseconds(), USEC_PER_SEC); |
| + video_frame->timestamp().InMicroseconds(), USEC_PER_SEC); |
| // Wrap information we'll need after the frame is encoded in a heap object. |
| // We'll get the pointer back from the VideoToolbox completion callback. |
| @@ -577,7 +415,7 @@ bool H264VideoToolboxEncoder::EncodeVideoFrame( |
| // Build a suitable frame properties dictionary for keyframes. |
| base::ScopedCFTypeRef<CFDictionaryRef> frame_props; |
| if (encode_next_frame_as_keyframe_) { |
| - frame_props = DictionaryWithKeyValue( |
| + frame_props = video_toolbox::DictionaryWithKeyValue( |
| videotoolbox_glue_->kVTEncodeFrameOptionKey_ForceKeyFrame(), |
| kCFBooleanTrue); |
| encode_next_frame_as_keyframe_ = false; |
| @@ -673,26 +511,6 @@ void H264VideoToolboxEncoder::OnResume() { |
| } |
| } |
| -bool H264VideoToolboxEncoder::SetSessionProperty(CFStringRef key, |
| - int32_t value) { |
| - base::ScopedCFTypeRef<CFNumberRef> cfvalue( |
| - CFNumberCreate(nullptr, kCFNumberSInt32Type, &value)); |
| - return videotoolbox_glue_->VTSessionSetProperty(compression_session_, key, |
| - cfvalue) == noErr; |
| -} |
| - |
| -bool H264VideoToolboxEncoder::SetSessionProperty(CFStringRef key, bool value) { |
| - CFBooleanRef cfvalue = (value) ? kCFBooleanTrue : kCFBooleanFalse; |
| - return videotoolbox_glue_->VTSessionSetProperty(compression_session_, key, |
| - cfvalue) == noErr; |
| -} |
| - |
| -bool H264VideoToolboxEncoder::SetSessionProperty(CFStringRef key, |
| - CFStringRef value) { |
| - return videotoolbox_glue_->VTSessionSetProperty(compression_session_, key, |
| - value) == noErr; |
| -} |
| - |
| void H264VideoToolboxEncoder::CompressionCallback(void* encoder_opaque, |
| void* request_opaque, |
| OSStatus status, |
| @@ -751,8 +569,10 @@ void H264VideoToolboxEncoder::CompressionCallback(void* encoder_opaque, |
| encoded_frame->referenced_frame_id = frame_id - 1; |
| } |
| - if (has_frame_data) |
| - CopySampleBufferToAnnexBBuffer(sbuf, &encoded_frame->data, keyframe); |
| + if (has_frame_data) { |
| + video_toolbox::CopySampleBufferToAnnexBBuffer(sbuf, keyframe, |
| + &encoded_frame->data); |
| + } |
| // TODO(miu): Compute and populate the |deadline_utilization| and |
| // |lossy_utilization| performance metrics in |encoded_frame|. |