OLD | NEW |
1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/cast/sender/h264_vt_encoder.h" | 5 #include "media/cast/sender/h264_vt_encoder.h" |
6 | 6 |
7 #include <stddef.h> | 7 #include <stddef.h> |
8 | 8 |
9 #include <string> | 9 #include <string> |
10 #include <vector> | 10 #include <vector> |
(...skipping 25 matching lines...) Expand all Loading... |
36 const VideoEncoder::FrameEncodedCallback frame_encoded_callback; | 36 const VideoEncoder::FrameEncodedCallback frame_encoded_callback; |
37 | 37 |
38 InProgressFrameEncode(RtpTimeTicks rtp, | 38 InProgressFrameEncode(RtpTimeTicks rtp, |
39 base::TimeTicks r_time, | 39 base::TimeTicks r_time, |
40 VideoEncoder::FrameEncodedCallback callback) | 40 VideoEncoder::FrameEncodedCallback callback) |
41 : rtp_timestamp(rtp), | 41 : rtp_timestamp(rtp), |
42 reference_time(r_time), | 42 reference_time(r_time), |
43 frame_encoded_callback(callback) {} | 43 frame_encoded_callback(callback) {} |
44 }; | 44 }; |
45 | 45 |
| 46 base::ScopedCFTypeRef<CFDictionaryRef> |
| 47 DictionaryWithKeysAndValues(CFTypeRef* keys, CFTypeRef* values, size_t size) { |
| 48 return base::ScopedCFTypeRef<CFDictionaryRef>(CFDictionaryCreate( |
| 49 kCFAllocatorDefault, keys, values, size, &kCFTypeDictionaryKeyCallBacks, |
| 50 &kCFTypeDictionaryValueCallBacks)); |
| 51 } |
| 52 |
| 53 base::ScopedCFTypeRef<CFDictionaryRef> DictionaryWithKeyValue(CFTypeRef key, |
| 54 CFTypeRef value) { |
| 55 CFTypeRef keys[1] = {key}; |
| 56 CFTypeRef values[1] = {value}; |
| 57 return DictionaryWithKeysAndValues(keys, values, 1); |
| 58 } |
| 59 |
| 60 base::ScopedCFTypeRef<CFArrayRef> ArrayWithIntegers(const int* v, size_t size) { |
| 61 std::vector<CFNumberRef> numbers; |
| 62 numbers.reserve(size); |
| 63 for (const int* end = v + size; v < end; ++v) |
| 64 numbers.push_back(CFNumberCreate(nullptr, kCFNumberSInt32Type, v)); |
| 65 base::ScopedCFTypeRef<CFArrayRef> array(CFArrayCreate( |
| 66 kCFAllocatorDefault, reinterpret_cast<const void**>(&numbers[0]), |
| 67 numbers.size(), &kCFTypeArrayCallBacks)); |
| 68 for (auto& number : numbers) { |
| 69 CFRelease(number); |
| 70 } |
| 71 return array; |
| 72 } |
| 73 |
| 74 template <typename NalSizeType> |
| 75 void CopyNalsToAnnexB(char* avcc_buffer, |
| 76 const size_t avcc_size, |
| 77 std::string* annexb_buffer) { |
| 78 static_assert(sizeof(NalSizeType) == 1 || sizeof(NalSizeType) == 2 || |
| 79 sizeof(NalSizeType) == 4, |
| 80 "NAL size type has unsupported size"); |
| 81 static const char startcode_3[3] = {0, 0, 1}; |
| 82 DCHECK(avcc_buffer); |
| 83 DCHECK(annexb_buffer); |
| 84 size_t bytes_left = avcc_size; |
| 85 while (bytes_left > 0) { |
| 86 DCHECK_GT(bytes_left, sizeof(NalSizeType)); |
| 87 NalSizeType nal_size; |
| 88 base::ReadBigEndian(avcc_buffer, &nal_size); |
| 89 bytes_left -= sizeof(NalSizeType); |
| 90 avcc_buffer += sizeof(NalSizeType); |
| 91 |
| 92 DCHECK_GE(bytes_left, nal_size); |
| 93 annexb_buffer->append(startcode_3, sizeof(startcode_3)); |
| 94 annexb_buffer->append(avcc_buffer, nal_size); |
| 95 bytes_left -= nal_size; |
| 96 avcc_buffer += nal_size; |
| 97 } |
| 98 } |
| 99 |
| 100 // Copy a H.264 frame stored in a CM sample buffer to an Annex B buffer. Copies |
| 101 // parameter sets for keyframes before the frame data as well. |
| 102 void CopySampleBufferToAnnexBBuffer(CoreMediaGlue::CMSampleBufferRef sbuf, |
| 103 std::string* annexb_buffer, |
| 104 bool keyframe) { |
| 105 // Perform two pass, one to figure out the total output size, and another to |
| 106 // copy the data after having performed a single output allocation. Note that |
| 107 // we'll allocate a bit more because we'll count 4 bytes instead of 3 for |
| 108 // video NALs. |
| 109 |
| 110 OSStatus status; |
| 111 |
| 112 // Get the sample buffer's block buffer and format description. |
| 113 auto bb = CoreMediaGlue::CMSampleBufferGetDataBuffer(sbuf); |
| 114 DCHECK(bb); |
| 115 auto fdesc = CoreMediaGlue::CMSampleBufferGetFormatDescription(sbuf); |
| 116 DCHECK(fdesc); |
| 117 |
| 118 size_t bb_size = CoreMediaGlue::CMBlockBufferGetDataLength(bb); |
| 119 size_t total_bytes = bb_size; |
| 120 |
| 121 size_t pset_count; |
| 122 int nal_size_field_bytes; |
| 123 status = CoreMediaGlue::CMVideoFormatDescriptionGetH264ParameterSetAtIndex( |
| 124 fdesc, 0, nullptr, nullptr, &pset_count, &nal_size_field_bytes); |
| 125 if (status == |
| 126 CoreMediaGlue::kCMFormatDescriptionBridgeError_InvalidParameter) { |
| 127 DLOG(WARNING) << " assuming 2 parameter sets and 4 bytes NAL length header"; |
| 128 pset_count = 2; |
| 129 nal_size_field_bytes = 4; |
| 130 } else if (status != noErr) { |
| 131 DLOG(ERROR) |
| 132 << " CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed: " |
| 133 << status; |
| 134 return; |
| 135 } |
| 136 |
| 137 if (keyframe) { |
| 138 const uint8_t* pset; |
| 139 size_t pset_size; |
| 140 for (size_t pset_i = 0; pset_i < pset_count; ++pset_i) { |
| 141 status = |
| 142 CoreMediaGlue::CMVideoFormatDescriptionGetH264ParameterSetAtIndex( |
| 143 fdesc, pset_i, &pset, &pset_size, nullptr, nullptr); |
| 144 if (status != noErr) { |
| 145 DLOG(ERROR) |
| 146 << " CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed: " |
| 147 << status; |
| 148 return; |
| 149 } |
| 150 total_bytes += pset_size + nal_size_field_bytes; |
| 151 } |
| 152 } |
| 153 |
| 154 annexb_buffer->reserve(total_bytes); |
| 155 |
| 156 // Copy all parameter sets before keyframes. |
| 157 if (keyframe) { |
| 158 const uint8_t* pset; |
| 159 size_t pset_size; |
| 160 for (size_t pset_i = 0; pset_i < pset_count; ++pset_i) { |
| 161 status = |
| 162 CoreMediaGlue::CMVideoFormatDescriptionGetH264ParameterSetAtIndex( |
| 163 fdesc, pset_i, &pset, &pset_size, nullptr, nullptr); |
| 164 if (status != noErr) { |
| 165 DLOG(ERROR) |
| 166 << " CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed: " |
| 167 << status; |
| 168 return; |
| 169 } |
| 170 static const char startcode_4[4] = {0, 0, 0, 1}; |
| 171 annexb_buffer->append(startcode_4, sizeof(startcode_4)); |
| 172 annexb_buffer->append(reinterpret_cast<const char*>(pset), pset_size); |
| 173 } |
| 174 } |
| 175 |
| 176 // Block buffers can be composed of non-contiguous chunks. For the sake of |
| 177 // keeping this code simple, flatten non-contiguous block buffers. |
| 178 base::ScopedCFTypeRef<CoreMediaGlue::CMBlockBufferRef> contiguous_bb( |
| 179 bb, base::scoped_policy::RETAIN); |
| 180 if (!CoreMediaGlue::CMBlockBufferIsRangeContiguous(bb, 0, 0)) { |
| 181 contiguous_bb.reset(); |
| 182 status = CoreMediaGlue::CMBlockBufferCreateContiguous( |
| 183 kCFAllocatorDefault, bb, kCFAllocatorDefault, nullptr, 0, 0, 0, |
| 184 contiguous_bb.InitializeInto()); |
| 185 if (status != noErr) { |
| 186 DLOG(ERROR) << " CMBlockBufferCreateContiguous failed: " << status; |
| 187 return; |
| 188 } |
| 189 } |
| 190 |
| 191 // Copy all the NAL units. In the process convert them from AVCC format |
| 192 // (length header) to AnnexB format (start code). |
| 193 char* bb_data; |
| 194 status = CoreMediaGlue::CMBlockBufferGetDataPointer(contiguous_bb, 0, nullptr, |
| 195 nullptr, &bb_data); |
| 196 if (status != noErr) { |
| 197 DLOG(ERROR) << " CMBlockBufferGetDataPointer failed: " << status; |
| 198 return; |
| 199 } |
| 200 |
| 201 if (nal_size_field_bytes == 1) { |
| 202 CopyNalsToAnnexB<uint8_t>(bb_data, bb_size, annexb_buffer); |
| 203 } else if (nal_size_field_bytes == 2) { |
| 204 CopyNalsToAnnexB<uint16_t>(bb_data, bb_size, annexb_buffer); |
| 205 } else if (nal_size_field_bytes == 4) { |
| 206 CopyNalsToAnnexB<uint32_t>(bb_data, bb_size, annexb_buffer); |
| 207 } else { |
| 208 NOTREACHED(); |
| 209 } |
| 210 } |
| 211 |
46 } // namespace | 212 } // namespace |
47 | 213 |
48 class H264VideoToolboxEncoder::VideoFrameFactoryImpl | 214 class H264VideoToolboxEncoder::VideoFrameFactoryImpl |
49 : public base::RefCountedThreadSafe<VideoFrameFactoryImpl>, | 215 : public base::RefCountedThreadSafe<VideoFrameFactoryImpl>, |
50 public VideoFrameFactory { | 216 public VideoFrameFactory { |
51 public: | 217 public: |
52 // Type that proxies the VideoFrameFactory interface to this class. | 218 // Type that proxies the VideoFrameFactory interface to this class. |
53 class Proxy; | 219 class Proxy; |
54 | 220 |
55 VideoFrameFactoryImpl(const base::WeakPtr<H264VideoToolboxEncoder>& encoder, | 221 VideoFrameFactoryImpl(const base::WeakPtr<H264VideoToolboxEncoder>& encoder, |
(...skipping 162 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
218 CastEnvironment::MAIN, FROM_HERE, | 384 CastEnvironment::MAIN, FROM_HERE, |
219 base::Bind(status_change_cb_, STATUS_CODEC_REINIT_PENDING)); | 385 base::Bind(status_change_cb_, STATUS_CODEC_REINIT_PENDING)); |
220 | 386 |
221 // Destroy the current session, if any. | 387 // Destroy the current session, if any. |
222 DestroyCompressionSession(); | 388 DestroyCompressionSession(); |
223 | 389 |
224 // On OS X, allow the hardware encoder. Don't require it, it does not support | 390 // On OS X, allow the hardware encoder. Don't require it, it does not support |
225 // all configurations (some of which are used for testing). | 391 // all configurations (some of which are used for testing). |
226 base::ScopedCFTypeRef<CFDictionaryRef> encoder_spec; | 392 base::ScopedCFTypeRef<CFDictionaryRef> encoder_spec; |
227 #if !defined(OS_IOS) | 393 #if !defined(OS_IOS) |
228 encoder_spec = video_toolbox::DictionaryWithKeyValue( | 394 encoder_spec = DictionaryWithKeyValue( |
229 videotoolbox_glue_ | 395 videotoolbox_glue_ |
230 ->kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder()
, | 396 ->kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder()
, |
231 kCFBooleanTrue); | 397 kCFBooleanTrue); |
232 #endif | 398 #endif |
233 | 399 |
234 // Force 420v so that clients can easily use these buffers as GPU textures. | 400 // Force 420v so that clients can easily use these buffers as GPU textures. |
235 const int format[] = { | 401 const int format[] = { |
236 CoreVideoGlue::kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange}; | 402 CoreVideoGlue::kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange}; |
237 | 403 |
238 // Keep these attachment settings in-sync with those in ConfigureSession(). | 404 // Keep these attachment settings in-sync with those in ConfigureSession(). |
239 CFTypeRef attachments_keys[] = {kCVImageBufferColorPrimariesKey, | 405 CFTypeRef attachments_keys[] = {kCVImageBufferColorPrimariesKey, |
240 kCVImageBufferTransferFunctionKey, | 406 kCVImageBufferTransferFunctionKey, |
241 kCVImageBufferYCbCrMatrixKey}; | 407 kCVImageBufferYCbCrMatrixKey}; |
242 CFTypeRef attachments_values[] = {kCVImageBufferColorPrimaries_ITU_R_709_2, | 408 CFTypeRef attachments_values[] = {kCVImageBufferColorPrimaries_ITU_R_709_2, |
243 kCVImageBufferTransferFunction_ITU_R_709_2, | 409 kCVImageBufferTransferFunction_ITU_R_709_2, |
244 kCVImageBufferYCbCrMatrix_ITU_R_709_2}; | 410 kCVImageBufferYCbCrMatrix_ITU_R_709_2}; |
245 CFTypeRef buffer_attributes_keys[] = {kCVPixelBufferPixelFormatTypeKey, | 411 CFTypeRef buffer_attributes_keys[] = {kCVPixelBufferPixelFormatTypeKey, |
246 kCVBufferPropagatedAttachmentsKey}; | 412 kCVBufferPropagatedAttachmentsKey}; |
247 CFTypeRef buffer_attributes_values[] = { | 413 CFTypeRef buffer_attributes_values[] = { |
248 video_toolbox::ArrayWithIntegers(format, arraysize(format)).release(), | 414 ArrayWithIntegers(format, arraysize(format)).release(), |
249 video_toolbox::DictionaryWithKeysAndValues( | 415 DictionaryWithKeysAndValues(attachments_keys, attachments_values, |
250 attachments_keys, attachments_values, arraysize(attachments_keys)) | 416 arraysize(attachments_keys)).release()}; |
251 .release()}; | |
252 const base::ScopedCFTypeRef<CFDictionaryRef> buffer_attributes = | 417 const base::ScopedCFTypeRef<CFDictionaryRef> buffer_attributes = |
253 video_toolbox::DictionaryWithKeysAndValues( | 418 DictionaryWithKeysAndValues(buffer_attributes_keys, |
254 buffer_attributes_keys, buffer_attributes_values, | 419 buffer_attributes_values, |
255 arraysize(buffer_attributes_keys)); | 420 arraysize(buffer_attributes_keys)); |
256 for (auto& v : buffer_attributes_values) | 421 for (auto& v : buffer_attributes_values) |
257 CFRelease(v); | 422 CFRelease(v); |
258 | 423 |
259 // Create the compression session. | 424 // Create the compression session. |
260 | 425 |
261 // Note that the encoder object is given to the compression session as the | 426 // Note that the encoder object is given to the compression session as the |
262 // callback context using a raw pointer. The C API does not allow us to use a | 427 // callback context using a raw pointer. The C API does not allow us to use a |
263 // smart pointer, nor is this encoder ref counted. However, this is still | 428 // smart pointer, nor is this encoder ref counted. However, this is still |
264 // safe, because we 1) we own the compression session and 2) we tear it down | 429 // safe, because we 1) we own the compression session and 2) we tear it down |
265 // safely. When destructing the encoder, the compression session is flushed | 430 // safely. When destructing the encoder, the compression session is flushed |
(...skipping 26 matching lines...) Expand all Loading... |
292 base::scoped_policy::RETAIN); | 457 base::scoped_policy::RETAIN); |
293 video_frame_factory_->Update(pool, frame_size_); | 458 video_frame_factory_->Update(pool, frame_size_); |
294 | 459 |
295 // Notify that reinitialization is done. | 460 // Notify that reinitialization is done. |
296 cast_environment_->PostTask( | 461 cast_environment_->PostTask( |
297 CastEnvironment::MAIN, FROM_HERE, | 462 CastEnvironment::MAIN, FROM_HERE, |
298 base::Bind(status_change_cb_, STATUS_INITIALIZED)); | 463 base::Bind(status_change_cb_, STATUS_INITIALIZED)); |
299 } | 464 } |
300 | 465 |
301 void H264VideoToolboxEncoder::ConfigureCompressionSession() { | 466 void H264VideoToolboxEncoder::ConfigureCompressionSession() { |
302 video_toolbox::SessionPropertySetter session_property_setter( | 467 SetSessionProperty( |
303 compression_session_, videotoolbox_glue_); | |
304 session_property_setter.Set( | |
305 videotoolbox_glue_->kVTCompressionPropertyKey_ProfileLevel(), | 468 videotoolbox_glue_->kVTCompressionPropertyKey_ProfileLevel(), |
306 videotoolbox_glue_->kVTProfileLevel_H264_Main_AutoLevel()); | 469 videotoolbox_glue_->kVTProfileLevel_H264_Main_AutoLevel()); |
307 session_property_setter.Set( | 470 SetSessionProperty(videotoolbox_glue_->kVTCompressionPropertyKey_RealTime(), |
308 videotoolbox_glue_->kVTCompressionPropertyKey_RealTime(), true); | 471 true); |
309 session_property_setter.Set( | 472 SetSessionProperty( |
310 videotoolbox_glue_->kVTCompressionPropertyKey_AllowFrameReordering(), | 473 videotoolbox_glue_->kVTCompressionPropertyKey_AllowFrameReordering(), |
311 false); | 474 false); |
312 session_property_setter.Set( | 475 SetSessionProperty( |
313 videotoolbox_glue_->kVTCompressionPropertyKey_MaxKeyFrameInterval(), 240); | 476 videotoolbox_glue_->kVTCompressionPropertyKey_MaxKeyFrameInterval(), 240); |
314 session_property_setter.Set( | 477 SetSessionProperty( |
315 videotoolbox_glue_ | 478 videotoolbox_glue_ |
316 ->kVTCompressionPropertyKey_MaxKeyFrameIntervalDuration(), | 479 ->kVTCompressionPropertyKey_MaxKeyFrameIntervalDuration(), |
317 240); | 480 240); |
318 // TODO(jfroy): implement better bitrate control | 481 // TODO(jfroy): implement better bitrate control |
319 // https://crbug.com/425352 | 482 // https://crbug.com/425352 |
320 session_property_setter.Set( | 483 SetSessionProperty( |
321 videotoolbox_glue_->kVTCompressionPropertyKey_AverageBitRate(), | 484 videotoolbox_glue_->kVTCompressionPropertyKey_AverageBitRate(), |
322 (video_config_.min_bitrate + video_config_.max_bitrate) / 2); | 485 (video_config_.min_bitrate + video_config_.max_bitrate) / 2); |
323 session_property_setter.Set( | 486 SetSessionProperty( |
324 videotoolbox_glue_->kVTCompressionPropertyKey_ExpectedFrameRate(), | 487 videotoolbox_glue_->kVTCompressionPropertyKey_ExpectedFrameRate(), |
325 video_config_.max_frame_rate); | 488 video_config_.max_frame_rate); |
326 // Keep these attachment settings in-sync with those in Initialize(). | 489 // Keep these attachment settings in-sync with those in Initialize(). |
327 session_property_setter.Set( | 490 SetSessionProperty( |
328 videotoolbox_glue_->kVTCompressionPropertyKey_ColorPrimaries(), | 491 videotoolbox_glue_->kVTCompressionPropertyKey_ColorPrimaries(), |
329 kCVImageBufferColorPrimaries_ITU_R_709_2); | 492 kCVImageBufferColorPrimaries_ITU_R_709_2); |
330 session_property_setter.Set( | 493 SetSessionProperty( |
331 videotoolbox_glue_->kVTCompressionPropertyKey_TransferFunction(), | 494 videotoolbox_glue_->kVTCompressionPropertyKey_TransferFunction(), |
332 kCVImageBufferTransferFunction_ITU_R_709_2); | 495 kCVImageBufferTransferFunction_ITU_R_709_2); |
333 session_property_setter.Set( | 496 SetSessionProperty( |
334 videotoolbox_glue_->kVTCompressionPropertyKey_YCbCrMatrix(), | 497 videotoolbox_glue_->kVTCompressionPropertyKey_YCbCrMatrix(), |
335 kCVImageBufferYCbCrMatrix_ITU_R_709_2); | 498 kCVImageBufferYCbCrMatrix_ITU_R_709_2); |
336 if (video_config_.max_number_of_video_buffers_used > 0) { | 499 if (video_config_.max_number_of_video_buffers_used > 0) { |
337 session_property_setter.Set( | 500 SetSessionProperty( |
338 videotoolbox_glue_->kVTCompressionPropertyKey_MaxFrameDelayCount(), | 501 videotoolbox_glue_->kVTCompressionPropertyKey_MaxFrameDelayCount(), |
339 video_config_.max_number_of_video_buffers_used); | 502 video_config_.max_number_of_video_buffers_used); |
340 } | 503 } |
341 } | 504 } |
342 | 505 |
343 void H264VideoToolboxEncoder::DestroyCompressionSession() { | 506 void H264VideoToolboxEncoder::DestroyCompressionSession() { |
344 DCHECK(thread_checker_.CalledOnValidThread()); | 507 DCHECK(thread_checker_.CalledOnValidThread()); |
345 | 508 |
346 // If the compression session exists, invalidate it. This blocks until all | 509 // If the compression session exists, invalidate it. This blocks until all |
347 // pending output callbacks have returned and any internal threads have | 510 // pending output callbacks have returned and any internal threads have |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
396 // frame into memory visible by the hardware encoder. The VideoFrame's | 559 // frame into memory visible by the hardware encoder. The VideoFrame's |
397 // lifetime is extended for the lifetime of the returned CVPixelBuffer. | 560 // lifetime is extended for the lifetime of the returned CVPixelBuffer. |
398 auto pixel_buffer = media::WrapVideoFrameInCVPixelBuffer(*video_frame); | 561 auto pixel_buffer = media::WrapVideoFrameInCVPixelBuffer(*video_frame); |
399 if (!pixel_buffer) { | 562 if (!pixel_buffer) { |
400 DLOG(ERROR) << "WrapVideoFrameInCVPixelBuffer failed."; | 563 DLOG(ERROR) << "WrapVideoFrameInCVPixelBuffer failed."; |
401 return false; | 564 return false; |
402 } | 565 } |
403 | 566 |
404 // Convert the frame timestamp to CMTime. | 567 // Convert the frame timestamp to CMTime. |
405 auto timestamp_cm = CoreMediaGlue::CMTimeMake( | 568 auto timestamp_cm = CoreMediaGlue::CMTimeMake( |
406 video_frame->timestamp().InMicroseconds(), USEC_PER_SEC); | 569 (reference_time - base::TimeTicks()).InMicroseconds(), USEC_PER_SEC); |
407 | 570 |
408 // Wrap information we'll need after the frame is encoded in a heap object. | 571 // Wrap information we'll need after the frame is encoded in a heap object. |
409 // We'll get the pointer back from the VideoToolbox completion callback. | 572 // We'll get the pointer back from the VideoToolbox completion callback. |
410 scoped_ptr<InProgressFrameEncode> request(new InProgressFrameEncode( | 573 scoped_ptr<InProgressFrameEncode> request(new InProgressFrameEncode( |
411 RtpTimeTicks::FromTimeDelta(video_frame->timestamp(), kVideoFrequency), | 574 RtpTimeTicks::FromTimeDelta(video_frame->timestamp(), kVideoFrequency), |
412 reference_time, frame_encoded_callback)); | 575 reference_time, frame_encoded_callback)); |
413 | 576 |
414 // Build a suitable frame properties dictionary for keyframes. | 577 // Build a suitable frame properties dictionary for keyframes. |
415 base::ScopedCFTypeRef<CFDictionaryRef> frame_props; | 578 base::ScopedCFTypeRef<CFDictionaryRef> frame_props; |
416 if (encode_next_frame_as_keyframe_) { | 579 if (encode_next_frame_as_keyframe_) { |
417 frame_props = video_toolbox::DictionaryWithKeyValue( | 580 frame_props = DictionaryWithKeyValue( |
418 videotoolbox_glue_->kVTEncodeFrameOptionKey_ForceKeyFrame(), | 581 videotoolbox_glue_->kVTEncodeFrameOptionKey_ForceKeyFrame(), |
419 kCFBooleanTrue); | 582 kCFBooleanTrue); |
420 encode_next_frame_as_keyframe_ = false; | 583 encode_next_frame_as_keyframe_ = false; |
421 } | 584 } |
422 | 585 |
423 // Submit the frame to the compression session. The function returns as soon | 586 // Submit the frame to the compression session. The function returns as soon |
424 // as the frame has been enqueued. | 587 // as the frame has been enqueued. |
425 OSStatus status = videotoolbox_glue_->VTCompressionSessionEncodeFrame( | 588 OSStatus status = videotoolbox_glue_->VTCompressionSessionEncodeFrame( |
426 compression_session_, pixel_buffer, timestamp_cm, | 589 compression_session_, pixel_buffer, timestamp_cm, |
427 CoreMediaGlue::CMTime{0, 0, 0, 0}, frame_props, | 590 CoreMediaGlue::CMTime{0, 0, 0, 0}, frame_props, |
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
503 // Reset the compression session only if the frame size is not zero (which | 666 // Reset the compression session only if the frame size is not zero (which |
504 // will obviously fail). It is possible for the frame size to be zero if no | 667 // will obviously fail). It is possible for the frame size to be zero if no |
505 // frame was submitted for encoding or requested from the video frame factory | 668 // frame was submitted for encoding or requested from the video frame factory |
506 // before suspension. | 669 // before suspension. |
507 if (!frame_size_.IsEmpty()) { | 670 if (!frame_size_.IsEmpty()) { |
508 VLOG(1) << "OnResume: Resetting compression session."; | 671 VLOG(1) << "OnResume: Resetting compression session."; |
509 ResetCompressionSession(); | 672 ResetCompressionSession(); |
510 } | 673 } |
511 } | 674 } |
512 | 675 |
| 676 bool H264VideoToolboxEncoder::SetSessionProperty(CFStringRef key, |
| 677 int32_t value) { |
| 678 base::ScopedCFTypeRef<CFNumberRef> cfvalue( |
| 679 CFNumberCreate(nullptr, kCFNumberSInt32Type, &value)); |
| 680 return videotoolbox_glue_->VTSessionSetProperty(compression_session_, key, |
| 681 cfvalue) == noErr; |
| 682 } |
| 683 |
| 684 bool H264VideoToolboxEncoder::SetSessionProperty(CFStringRef key, bool value) { |
| 685 CFBooleanRef cfvalue = (value) ? kCFBooleanTrue : kCFBooleanFalse; |
| 686 return videotoolbox_glue_->VTSessionSetProperty(compression_session_, key, |
| 687 cfvalue) == noErr; |
| 688 } |
| 689 |
| 690 bool H264VideoToolboxEncoder::SetSessionProperty(CFStringRef key, |
| 691 CFStringRef value) { |
| 692 return videotoolbox_glue_->VTSessionSetProperty(compression_session_, key, |
| 693 value) == noErr; |
| 694 } |
| 695 |
513 void H264VideoToolboxEncoder::CompressionCallback(void* encoder_opaque, | 696 void H264VideoToolboxEncoder::CompressionCallback(void* encoder_opaque, |
514 void* request_opaque, | 697 void* request_opaque, |
515 OSStatus status, | 698 OSStatus status, |
516 VTEncodeInfoFlags info, | 699 VTEncodeInfoFlags info, |
517 CMSampleBufferRef sbuf) { | 700 CMSampleBufferRef sbuf) { |
518 auto encoder = reinterpret_cast<H264VideoToolboxEncoder*>(encoder_opaque); | 701 auto encoder = reinterpret_cast<H264VideoToolboxEncoder*>(encoder_opaque); |
519 const scoped_ptr<InProgressFrameEncode> request( | 702 const scoped_ptr<InProgressFrameEncode> request( |
520 reinterpret_cast<InProgressFrameEncode*>(request_opaque)); | 703 reinterpret_cast<InProgressFrameEncode*>(request_opaque)); |
521 bool keyframe = false; | 704 bool keyframe = false; |
522 bool has_frame_data = false; | 705 bool has_frame_data = false; |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
561 // doesn't support the concept of forward-referencing frame dependencies or | 744 // doesn't support the concept of forward-referencing frame dependencies or |
562 // multiple frame dependencies; so pretend that all frames are only | 745 // multiple frame dependencies; so pretend that all frames are only |
563 // decodable after their immediately preceding frame is decoded. This will | 746 // decodable after their immediately preceding frame is decoded. This will |
564 // ensure a Cast receiver only attempts to decode the frames sequentially | 747 // ensure a Cast receiver only attempts to decode the frames sequentially |
565 // and in order. Furthermore, the encoder is configured to never use forward | 748 // and in order. Furthermore, the encoder is configured to never use forward |
566 // references (see |kVTCompressionPropertyKey_AllowFrameReordering|). There | 749 // references (see |kVTCompressionPropertyKey_AllowFrameReordering|). There |
567 // is no way to prevent multiple reference frames. | 750 // is no way to prevent multiple reference frames. |
568 encoded_frame->referenced_frame_id = frame_id - 1; | 751 encoded_frame->referenced_frame_id = frame_id - 1; |
569 } | 752 } |
570 | 753 |
571 if (has_frame_data) { | 754 if (has_frame_data) |
572 video_toolbox::CopySampleBufferToAnnexBBuffer(sbuf, keyframe, | 755 CopySampleBufferToAnnexBBuffer(sbuf, &encoded_frame->data, keyframe); |
573 &encoded_frame->data); | |
574 } | |
575 | 756 |
576 // TODO(miu): Compute and populate the |deadline_utilization| and | 757 // TODO(miu): Compute and populate the |deadline_utilization| and |
577 // |lossy_utilization| performance metrics in |encoded_frame|. | 758 // |lossy_utilization| performance metrics in |encoded_frame|. |
578 | 759 |
579 encoded_frame->encode_completion_time = | 760 encoded_frame->encode_completion_time = |
580 encoder->cast_environment_->Clock()->NowTicks(); | 761 encoder->cast_environment_->Clock()->NowTicks(); |
581 encoder->cast_environment_->PostTask( | 762 encoder->cast_environment_->PostTask( |
582 CastEnvironment::MAIN, FROM_HERE, | 763 CastEnvironment::MAIN, FROM_HERE, |
583 base::Bind(request->frame_encoded_callback, | 764 base::Bind(request->frame_encoded_callback, |
584 base::Passed(&encoded_frame))); | 765 base::Passed(&encoded_frame))); |
585 } | 766 } |
586 | 767 |
587 } // namespace cast | 768 } // namespace cast |
588 } // namespace media | 769 } // namespace media |
OLD | NEW |