OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2014 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "media/cast/sender/h264_vt_encoder.h" | |
6 | |
7 #include <algorithm> | |
8 #include <string> | |
9 | |
10 #include "base/big_endian.h" | |
11 #include "base/bind.h" | |
12 #include "base/bind_helpers.h" | |
13 #include "base/location.h" | |
14 #include "base/logging.h" | |
15 #include "media/base/mac/corevideo_glue.h" | |
16 | |
17 namespace media { | |
18 namespace cast { | |
19 | |
20 namespace { | |
21 | |
22 struct FrameContext { | |
23 base::TimeTicks capture_time; | |
24 media::cast::VideoEncoder::FrameEncodedCallback frame_encoded_callback; | |
25 }; | |
26 | |
27 base::ScopedCFTypeRef<CFDictionaryRef> DictionaryWithKeyValue(CFTypeRef key, | |
28 CFTypeRef value) { | |
29 CFTypeRef keys[1] = {key}; | |
30 CFTypeRef values[1] = {value}; | |
31 return base::ScopedCFTypeRef<CFDictionaryRef>( | |
32 CFDictionaryCreate(kCFAllocatorDefault, | |
33 keys, | |
34 values, | |
35 1, | |
36 &kCFTypeDictionaryKeyCallBacks, | |
37 &kCFTypeDictionaryValueCallBacks)); | |
38 } | |
39 | |
40 // CVPixelBuffer release callback for buffers wrapping a media::VideoFrame. | |
41 void VideoFramePixelBufferReleaseCallback(void* frame_ref, | |
42 const void* data, | |
43 size_t size, | |
44 size_t num_planes, | |
45 const void* planes[]) { | |
46 free(const_cast<void*>(data)); | |
47 reinterpret_cast<media::VideoFrame*>(frame_ref)->Release(); | |
48 } | |
49 | |
50 template <typename NalSizeType> | |
51 void CopyNalsToAnnexB(char* avcc_buffer, | |
52 const size_t avcc_size, | |
53 std::string* annexb_buffer) { | |
54 COMPILE_ASSERT(sizeof(NalSizeType) == 1 || sizeof(NalSizeType) == 2 || | |
55 sizeof(NalSizeType) == 4, | |
56 "NAL size type has unsupported size"); | |
57 static const char startcode_3[3] = {0, 0, 1}; | |
58 DCHECK(avcc_buffer); | |
59 DCHECK(annexb_buffer); | |
60 size_t bytes_left = avcc_size; | |
61 while (bytes_left > 0) { | |
62 DCHECK_GT(bytes_left, sizeof(NalSizeType)); | |
63 NalSizeType nal_size; | |
64 base::ReadBigEndian(avcc_buffer, &nal_size); | |
65 bytes_left -= sizeof(NalSizeType); | |
66 avcc_buffer += sizeof(NalSizeType); | |
67 | |
68 DCHECK_GE(bytes_left, nal_size); | |
69 annexb_buffer->append(startcode_3, sizeof(startcode_3)); | |
70 annexb_buffer->append(avcc_buffer, nal_size); | |
71 bytes_left -= nal_size; | |
72 avcc_buffer += nal_size; | |
73 } | |
74 } | |
75 | |
76 // Copy a H.264 frame stored in a CM sample buffer to an Annex B buffer. Copies | |
77 // parameter sets for keyframes before the frame data as well. | |
78 void CopySampleBufferToAnnexBBuffer(CoreMediaGlue::CMSampleBufferRef sbuf, | |
79 std::string* annexb_buffer, | |
80 bool keyframe) { | |
81 // Perform two pass, one to figure out the total output size, and another to | |
82 // copy the data after having performed a single output allocation. Note that | |
83 // we'll allocate a bit more because we'll count 4 bytes instead of 3 for | |
84 // video NALs. | |
85 | |
86 OSStatus status; | |
87 | |
88 // Get the sample buffer's block buffer and format description. | |
89 auto bb = CoreMediaGlue::CMSampleBufferGetDataBuffer(sbuf); | |
90 DCHECK(bb); | |
91 auto fdesc = CoreMediaGlue::CMSampleBufferGetFormatDescription(sbuf); | |
92 DCHECK(fdesc); | |
93 | |
94 size_t bb_size = CoreMediaGlue::CMBlockBufferGetDataLength(bb); | |
95 size_t total_bytes = bb_size; | |
96 | |
97 size_t pset_count; | |
98 int nal_size_field_bytes; | |
99 status = CoreMediaGlue::CMVideoFormatDescriptionGetH264ParameterSetAtIndex( | |
100 fdesc, 0, nullptr, nullptr, &pset_count, &nal_size_field_bytes); | |
101 if (status == | |
102 CoreMediaGlue::kCMFormatDescriptionBridgeError_InvalidParameter) { | |
103 DLOG(WARNING) << " assuming 2 parameter sets and 4 bytes NAL length header"; | |
104 pset_count = 2; | |
105 nal_size_field_bytes = 4; | |
106 } else if (status != noErr) { | |
107 DLOG(ERROR) | |
108 << " CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed: " | |
109 << status; | |
110 return; | |
111 } | |
112 | |
113 if (keyframe) { | |
114 const uint8_t* pset; | |
115 size_t pset_size; | |
116 for (size_t pset_i = 0; pset_i < pset_count; ++pset_i) { | |
117 status = | |
118 CoreMediaGlue::CMVideoFormatDescriptionGetH264ParameterSetAtIndex( | |
119 fdesc, pset_i, &pset, &pset_size, nullptr, nullptr); | |
120 if (status != noErr) { | |
121 DLOG(ERROR) | |
122 << " CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed: " | |
123 << status; | |
124 return; | |
125 } | |
126 total_bytes += pset_size + nal_size_field_bytes; | |
127 } | |
128 } | |
129 | |
130 annexb_buffer->reserve(total_bytes); | |
131 | |
132 // Copy all parameter sets before keyframes. | |
133 if (keyframe) { | |
134 const uint8_t* pset; | |
135 size_t pset_size; | |
136 for (size_t pset_i = 0; pset_i < pset_count; ++pset_i) { | |
137 status = | |
138 CoreMediaGlue::CMVideoFormatDescriptionGetH264ParameterSetAtIndex( | |
139 fdesc, pset_i, &pset, &pset_size, nullptr, nullptr); | |
140 if (status != noErr) { | |
141 DLOG(ERROR) | |
142 << " CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed: " | |
143 << status; | |
144 return; | |
145 } | |
146 static const char startcode_4[4] = {0, 0, 0, 1}; | |
147 annexb_buffer->append(startcode_4, sizeof(startcode_4)); | |
148 annexb_buffer->append(reinterpret_cast<const char*>(pset), pset_size); | |
149 } | |
150 } | |
151 | |
152 // Block buffers can be composed of non-contiguous chunks. For the sake of | |
153 // keeping this code simple, flatten non-contiguous block buffers. | |
154 base::ScopedCFTypeRef<CoreMediaGlue::CMBlockBufferRef> contiguous_bb( | |
155 bb, base::scoped_policy::RETAIN); | |
156 if (!CoreMediaGlue::CMBlockBufferIsRangeContiguous(bb, 0, 0)) { | |
157 contiguous_bb.reset(); | |
158 status = CoreMediaGlue::CMBlockBufferCreateContiguous( | |
159 kCFAllocatorDefault, | |
160 bb, | |
161 kCFAllocatorDefault, | |
162 nullptr, | |
163 0, | |
164 0, | |
165 0, | |
166 contiguous_bb.InitializeInto()); | |
167 if (status != noErr) { | |
168 DLOG(ERROR) << " CMBlockBufferCreateContiguous failed: " << status; | |
169 return; | |
170 } | |
171 } | |
172 | |
173 // Copy all the NAL units. In the process convert them from AVCC format | |
174 // (length header) to AnnexB format (start code). | |
175 char* bb_data; | |
176 status = CoreMediaGlue::CMBlockBufferGetDataPointer( | |
177 contiguous_bb, 0, nullptr, nullptr, &bb_data); | |
178 if (status != noErr) { | |
179 DLOG(ERROR) << " CMBlockBufferGetDataPointer failed: " << status; | |
180 return; | |
181 } | |
182 | |
183 if (nal_size_field_bytes == 1) { | |
184 CopyNalsToAnnexB<uint8_t>(bb_data, bb_size, annexb_buffer); | |
185 } else if (nal_size_field_bytes == 2) { | |
186 CopyNalsToAnnexB<uint16_t>(bb_data, bb_size, annexb_buffer); | |
187 } else if (nal_size_field_bytes == 4) { | |
188 CopyNalsToAnnexB<uint32_t>(bb_data, bb_size, annexb_buffer); | |
189 } else { | |
190 NOTREACHED(); | |
191 } | |
192 } | |
193 | |
194 } // namespace | |
195 | |
196 H264VideoToolboxEncoder::H264VideoToolboxEncoder( | |
197 scoped_refptr<CastEnvironment> cast_environment, | |
198 const VideoSenderConfig& video_config, | |
199 const CastInitializationCallback& initialization_cb) | |
200 : cast_environment_(cast_environment), | |
201 videotoolbox_glue_(VideoToolboxGlue::Get()), | |
202 frame_id_(kStartFrameId), | |
203 encode_next_frame_as_keyframe_(false) { | |
204 DCHECK(!initialization_cb.is_null()); | |
205 CastInitializationStatus initialization_status; | |
206 if (videotoolbox_glue_) { | |
207 initialization_status = (Initialize(video_config)) | |
208 ? STATUS_VIDEO_INITIALIZED | |
209 : STATUS_INVALID_VIDEO_CONFIGURATION; | |
210 } else { | |
211 LOG(ERROR) << " VideoToolbox is not available"; | |
212 initialization_status = STATUS_HW_VIDEO_ENCODER_NOT_SUPPORTED; | |
213 } | |
214 cast_environment_->PostTask( | |
215 CastEnvironment::MAIN, | |
216 FROM_HERE, | |
217 base::Bind(initialization_cb, initialization_status)); | |
218 } | |
219 | |
220 H264VideoToolboxEncoder::~H264VideoToolboxEncoder() { | |
221 Teardown(); | |
222 } | |
223 | |
224 CVPixelBufferPoolRef H264VideoToolboxEncoder::GetCvPixelBufferPool() const { | |
225 DCHECK(thread_checker_.CalledOnValidThread()); | |
226 DCHECK(compression_session_); | |
227 return videotoolbox_glue_->VTCompressionSessionGetPixelBufferPool( | |
228 compression_session_); | |
229 } | |
230 | |
231 bool H264VideoToolboxEncoder::Initialize( | |
232 const VideoSenderConfig& video_config) { | |
233 DCHECK(thread_checker_.CalledOnValidThread()); | |
234 DCHECK(!compression_session_); | |
235 | |
236 // Note that the encoder object is given to the compression session as the | |
237 // callback context using a raw pointer. The C API does not allow us to use | |
238 // a smart pointer, nor is this encoder ref counted. However, this is still | |
239 // safe, because we 1) we own the compression session and 2) we tear it down | |
240 // safely. When destructing the encoder, the compression session is flushed | |
241 // and invalidated. Internally, VideoToolbox will join all of its threads | |
242 // before returning to the client. Therefore, when control returns to us, we | |
243 // are guaranteed that the output callback will not execute again. | |
244 | |
245 // On OS X, allow the hardware encoder. Don't require it, it does not support | |
246 // all configurations (some of which are used for testing). | |
247 base::ScopedCFTypeRef<CFDictionaryRef> encoder_spec; | |
248 #if !defined(OS_IOS) | |
249 encoder_spec = DictionaryWithKeyValue( | |
250 videotoolbox_glue_ | |
251 ->kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder() , | |
252 kCFBooleanTrue); | |
253 #endif | |
254 | |
255 VTCompressionSessionRef session; | |
256 OSStatus status = videotoolbox_glue_->VTCompressionSessionCreate( | |
257 kCFAllocatorDefault, | |
258 video_config.width, | |
259 video_config.height, | |
260 CoreMediaGlue::kCMVideoCodecType_H264, | |
261 encoder_spec, | |
262 nullptr /* sourceImageBufferAttributes */, | |
263 nullptr /* compressedDataAllocator */, | |
264 &H264VideoToolboxEncoder::CompressionCallback, | |
265 reinterpret_cast<void*>(this), | |
266 &session); | |
267 if (status != noErr) { | |
268 DLOG(ERROR) << " VTCompressionSessionCreate failed: " << status; | |
269 return false; | |
270 } | |
271 compression_session_.reset(session); | |
272 | |
273 ConfigureSession(video_config); | |
274 | |
275 return true; | |
276 } | |
277 | |
278 void H264VideoToolboxEncoder::ConfigureSession( | |
279 const VideoSenderConfig& video_config) { | |
280 SetSessionProperty( | |
281 videotoolbox_glue_->kVTCompressionPropertyKey_ProfileLevel(), | |
282 videotoolbox_glue_->kVTProfileLevel_H264_Main_AutoLevel()); | |
283 SetSessionProperty(videotoolbox_glue_->kVTCompressionPropertyKey_RealTime(), | |
284 true); | |
285 SetSessionProperty( | |
286 videotoolbox_glue_->kVTCompressionPropertyKey_AllowFrameReordering(), | |
287 false); | |
288 SetSessionProperty( | |
289 videotoolbox_glue_->kVTCompressionPropertyKey_MaxKeyFrameInterval(), 240); | |
290 SetSessionProperty( | |
291 videotoolbox_glue_ | |
292 ->kVTCompressionPropertyKey_MaxKeyFrameIntervalDuration(), | |
293 240); | |
294 // TODO(jfroy): implement better bitrate control | |
miu
2014/09/30 19:45:32
Can you file a crbug for this, and put the link he
| |
295 SetSessionProperty( | |
296 videotoolbox_glue_->kVTCompressionPropertyKey_AverageBitRate(), | |
297 (video_config.min_bitrate + video_config.max_bitrate) / 2); | |
298 SetSessionProperty( | |
299 videotoolbox_glue_->kVTCompressionPropertyKey_ExpectedFrameRate(), | |
300 video_config.max_frame_rate); | |
301 SetSessionProperty( | |
302 videotoolbox_glue_->kVTCompressionPropertyKey_ColorPrimaries(), | |
303 kCVImageBufferColorPrimaries_ITU_R_709_2); | |
304 SetSessionProperty( | |
305 videotoolbox_glue_->kVTCompressionPropertyKey_TransferFunction(), | |
306 kCVImageBufferTransferFunction_ITU_R_709_2); | |
307 SetSessionProperty( | |
308 videotoolbox_glue_->kVTCompressionPropertyKey_YCbCrMatrix(), | |
309 kCVImageBufferYCbCrMatrix_ITU_R_709_2); | |
310 } | |
311 | |
312 void H264VideoToolboxEncoder::Teardown() { | |
313 DCHECK(thread_checker_.CalledOnValidThread()); | |
314 | |
315 // If the compression session exists, invalidate it. This blocks until all | |
316 // pending output callbacks have returned and any internal threads have | |
317 // joined, ensuring no output callback ever sees a dangling encoder pointer. | |
318 if (compression_session_) { | |
319 videotoolbox_glue_->VTCompressionSessionInvalidate(compression_session_); | |
320 compression_session_.reset(); | |
321 } | |
322 } | |
323 | |
324 bool H264VideoToolboxEncoder::EncodeVideoFrame( | |
325 const scoped_refptr<media::VideoFrame>& video_frame, | |
326 const base::TimeTicks& capture_time, | |
327 const FrameEncodedCallback& frame_encoded_callback) { | |
328 DCHECK(thread_checker_.CalledOnValidThread()); | |
329 DCHECK(!capture_time.is_null()); | |
330 | |
331 if (!compression_session_) { | |
332 DLOG(ERROR) << " compression session is null"; | |
333 return false; | |
334 } | |
335 | |
336 // Clients can opt-in to an optimization whereby frames are stored in pixel | |
337 // buffers owned by the encoder. This can eliminate a data copy on some | |
338 // hardware configurations. When this optimization is used, the VideoFrame | |
339 // will wrap a CVPixelBuffer which we attempt to get first. If that fails, | |
340 // then we must wrap the VideoFrame in a CVPixelBuffer as an adapter to the | |
341 // VideoToolbox API. The WrapVideoFrame function performs this operation. The | |
342 // VideoFrame reference count is incremented and the resulting CVPixelBuffer | |
343 // will release the VideoFrame when it itself is destroyed. Because encoding | |
344 // is asynchronous and the encoder can reference CVPixelBuffers for a period | |
345 // of time in order to perform inter-frame compression, CVPixelBuffers must | |
346 // "own" the VideoFrame they wrap, not the other way around. | |
347 base::ScopedCFTypeRef<CVPixelBufferRef> pixel_buffer( | |
348 video_frame->cv_pixel_buffer(), base::scoped_policy::RETAIN); | |
349 if (!pixel_buffer) { | |
350 pixel_buffer = WrapVideoFrame(video_frame); | |
351 if (!pixel_buffer) { | |
352 return false; | |
353 } | |
354 } | |
355 | |
356 auto timestamp_cm = CoreMediaGlue::CMTimeMake( | |
357 (capture_time - base::TimeTicks()).InMicroseconds(), USEC_PER_SEC); | |
358 | |
359 scoped_ptr<FrameContext> frame_context(new FrameContext()); | |
360 frame_context->capture_time = capture_time; | |
361 frame_context->frame_encoded_callback = frame_encoded_callback; | |
362 | |
363 base::ScopedCFTypeRef<CFDictionaryRef> frame_props; | |
364 if (encode_next_frame_as_keyframe_) { | |
365 frame_props = DictionaryWithKeyValue( | |
366 videotoolbox_glue_->kVTEncodeFrameOptionKey_ForceKeyFrame(), | |
367 kCFBooleanTrue); | |
368 encode_next_frame_as_keyframe_ = false; | |
369 } | |
370 | |
371 VTEncodeInfoFlags info; | |
372 OSStatus status = videotoolbox_glue_->VTCompressionSessionEncodeFrame( | |
373 compression_session_, | |
374 pixel_buffer, | |
375 timestamp_cm, | |
376 CoreMediaGlue::CMTime{0, 0, 0, 0}, | |
377 frame_props, | |
378 reinterpret_cast<void*>(frame_context.release()), | |
379 &info); | |
380 if (status != noErr) { | |
381 DLOG(ERROR) << " VTCompressionSessionEncodeFrame failed: " << status; | |
382 return false; | |
383 } | |
384 if ((info & VideoToolboxGlue::kVTEncodeInfo_FrameDropped)) { | |
385 DLOG(ERROR) << " frame dropped"; | |
386 return false; | |
387 } | |
388 | |
389 return true; | |
390 } | |
391 | |
392 void H264VideoToolboxEncoder::SetBitRate(int new_bit_rate) { | |
393 DCHECK(thread_checker_.CalledOnValidThread()); | |
394 // VideoToolbox does not seem to support bitrate reconfiguration. | |
395 } | |
396 | |
397 void H264VideoToolboxEncoder::GenerateKeyFrame() { | |
398 DCHECK(thread_checker_.CalledOnValidThread()); | |
399 DCHECK(compression_session_); | |
400 | |
401 encode_next_frame_as_keyframe_ = true; | |
402 } | |
403 | |
404 void H264VideoToolboxEncoder::LatestFrameIdToReference(uint32 /*frame_id*/) { | |
405 // Not supported by VideoToolbox in any meaningful manner. | |
406 } | |
407 | |
408 base::ScopedCFTypeRef<CVPixelBufferRef> H264VideoToolboxEncoder::WrapVideoFrame( | |
409 const scoped_refptr<media::VideoFrame>& frame) { | |
410 static const size_t MAX_PLANES = 3; | |
411 | |
412 media::VideoFrame::Format video_frame_format = frame->format(); | |
413 size_t num_planes = media::VideoFrame::NumPlanes(video_frame_format); | |
414 DCHECK_LE(num_planes, MAX_PLANES); | |
415 gfx::Size coded_size = frame->coded_size(); | |
416 | |
417 // media::VideoFrame only supports YUV formats, so there is no way to | |
418 // leverage VideoToolbox's ability to convert RGBA formats automatically. In | |
419 // addition, most of the media::VideoFrame formats are YVU, which VT does not | |
420 // support. Finally, media::VideoFrame formats do not carry any information | |
421 // about the color space, transform or any other colorimetric information | |
422 // that is generally needed to fully specify the input data. So essentially | |
423 // require that the input be YCbCr 4:2:0 (either planar or biplanar) and | |
424 // assume the standard video dynamic range for samples (although most modern | |
425 // HDTVs support full-range video these days). | |
426 OSType cv_format; | |
427 if (video_frame_format == media::VideoFrame::Format::I420) { | |
428 cv_format = kCVPixelFormatType_420YpCbCr8Planar; | |
429 } else if (video_frame_format == media::VideoFrame::Format::NV12) { | |
430 // TODO(jfroy): Use kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange when the | |
431 // minimum OS X and iOS SDKs permits it. | |
432 cv_format = '420v'; | |
433 } else { | |
434 DLOG(ERROR) << " unsupported frame format: " << video_frame_format; | |
435 return base::ScopedCFTypeRef<CVPixelBufferRef>(nullptr); | |
436 } | |
437 | |
438 // TODO(jfroy): Support extended pixels (i.e. padding). | |
439 if (frame->coded_size() != frame->visible_rect().size()) { | |
440 DLOG(ERROR) << " frame with extended pixels not supported: " | |
441 << " coded_size: " << coded_size.ToString() | |
442 << ", visible_rect: " << frame->visible_rect().ToString(); | |
443 return base::ScopedCFTypeRef<CVPixelBufferRef>(nullptr); | |
444 } | |
445 | |
446 void* plane_ptrs[MAX_PLANES]; | |
447 size_t plane_widths[MAX_PLANES]; | |
448 size_t plane_heights[MAX_PLANES]; | |
449 size_t plane_bytes_per_row[MAX_PLANES]; | |
450 for (size_t plane_i = 0; plane_i < num_planes; ++plane_i) { | |
451 plane_ptrs[plane_i] = frame->data(plane_i); | |
452 gfx::Size plane_size = | |
453 media::VideoFrame::PlaneSize(video_frame_format, plane_i, coded_size); | |
454 plane_widths[plane_i] = plane_size.width(); | |
455 plane_heights[plane_i] = plane_size.height(); | |
456 plane_bytes_per_row[plane_i] = frame->stride(plane_i); | |
457 } | |
458 | |
459 // CVPixelBufferCreateWithPlanarBytes needs a dummy plane descriptor or the | |
460 // release callback will not execute. The descriptor is freed in the callback. | |
461 void* descriptor = calloc( | |
462 1, | |
463 std::max(sizeof(CVPlanarPixelBufferInfo_YCbCrPlanar), | |
464 sizeof(CoreVideoGlue::CVPlanarPixelBufferInfo_YCbCrBiPlanar))); | |
465 | |
466 // Wrap the frame's data in a CVPixelBuffer. Because this is a C API, we can't | |
467 // give it a smart pointer to the frame, so instead pass a raw pointer and | |
468 // increment the frame's reference count manually. | |
469 CVPixelBufferRef pixel_buffer; | |
470 CVReturn result = | |
471 CVPixelBufferCreateWithPlanarBytes(kCFAllocatorDefault, | |
472 coded_size.width(), | |
473 coded_size.height(), | |
474 cv_format, | |
475 descriptor, | |
476 0, | |
477 num_planes, | |
478 plane_ptrs, | |
479 plane_widths, | |
480 plane_heights, | |
481 plane_bytes_per_row, | |
482 &VideoFramePixelBufferReleaseCallback, | |
483 frame.get(), | |
484 nullptr, | |
485 &pixel_buffer); | |
486 if (result != kCVReturnSuccess) { | |
487 DLOG(ERROR) << " CVPixelBufferCreateWithPlanarBytes failed: " << result; | |
488 return base::ScopedCFTypeRef<CVPixelBufferRef>(nullptr); | |
489 } | |
490 | |
491 // The CVPixelBuffer now references the data of the frame, so increment its | |
492 // reference count manually. The release callback set on the pixel buffer will | |
493 // release the frame. | |
494 frame->AddRef(); | |
495 | |
496 return base::ScopedCFTypeRef<CVPixelBufferRef>(pixel_buffer); | |
497 } | |
498 | |
499 bool H264VideoToolboxEncoder::SetSessionProperty(CFStringRef key, | |
500 int32_t value) { | |
501 base::ScopedCFTypeRef<CFNumberRef> cfvalue( | |
502 CFNumberCreate(nullptr, kCFNumberSInt32Type, &value)); | |
503 return videotoolbox_glue_->VTSessionSetProperty( | |
504 compression_session_, key, cfvalue) == noErr; | |
505 } | |
506 | |
507 bool H264VideoToolboxEncoder::SetSessionProperty(CFStringRef key, bool value) { | |
508 CFBooleanRef cfvalue = (value) ? kCFBooleanTrue : kCFBooleanFalse; | |
509 return videotoolbox_glue_->VTSessionSetProperty( | |
510 compression_session_, key, cfvalue) == noErr; | |
511 } | |
512 | |
513 bool H264VideoToolboxEncoder::SetSessionProperty(CFStringRef key, | |
514 CFStringRef value) { | |
515 return videotoolbox_glue_->VTSessionSetProperty( | |
516 compression_session_, key, value) == noErr; | |
517 } | |
518 | |
519 void H264VideoToolboxEncoder::CompressionCallback(void* encoder_opaque, | |
520 void* frame_opaque, | |
521 OSStatus status, | |
522 VTEncodeInfoFlags info, | |
523 CMSampleBufferRef sbuf) { | |
524 auto encoder = reinterpret_cast<H264VideoToolboxEncoder*>(encoder_opaque); | |
525 scoped_ptr<FrameContext> frame_context( | |
526 reinterpret_cast<FrameContext*>(frame_opaque)); | |
527 | |
528 if (status != noErr) { | |
529 DLOG(ERROR) << " encoding failed: " << status; | |
530 return; | |
531 } | |
532 if ((info & VideoToolboxGlue::kVTEncodeInfo_FrameDropped)) { | |
533 DVLOG(2) << " frame dropped"; | |
534 return; | |
535 } | |
536 | |
537 auto sample_attachments = static_cast<CFDictionaryRef>(CFArrayGetValueAtIndex( | |
538 CoreMediaGlue::CMSampleBufferGetSampleAttachmentsArray(sbuf, true), 0)); | |
539 | |
540 // If the NotSync key is not present, it implies Sync, which indicates a | |
541 // keyframe (at least I think, VT documentation is, erm, sparse). Could | |
542 // alternatively use kCMSampleAttachmentKey_DependsOnOthers == false. | |
543 bool keyframe = | |
544 !CFDictionaryContainsKey(sample_attachments, | |
545 CoreMediaGlue::kCMSampleAttachmentKey_NotSync()); | |
546 | |
547 // Increment the encoder-scoped frame id and assign the new value to this | |
548 // frame. VideoToolbox calls the output callback serially, so this is safe. | |
549 uint32 frame_id = ++encoder->frame_id_; | |
550 | |
551 scoped_ptr<EncodedFrame> encoded_frame(new EncodedFrame()); | |
552 encoded_frame->frame_id = frame_id; | |
553 encoded_frame->reference_time = frame_context->capture_time; | |
554 encoded_frame->rtp_timestamp = | |
555 GetVideoRtpTimestamp(frame_context->capture_time); | |
556 if (keyframe) { | |
557 encoded_frame->dependency = EncodedFrame::KEY; | |
558 encoded_frame->referenced_frame_id = frame_id; | |
559 } else { | |
560 encoded_frame->dependency = EncodedFrame::DEPENDENT; | |
561 // H.264 supports complex frame reference schemes (multiple reference | |
562 // frames, slice references, backward and forward references, etc). Cast | |
563 // doesn't support the concept of forward-referencing frame dependencies or | |
564 // multiple frame dependencies; so pretend that all frames are only | |
565 // decodable after their immediately preceding frame is decoded. This will | |
566 // ensure a Cast receiver only attempts to decode the frames sequentially | |
567 // and in order. Furthermore, the encoder is configured to never use forward | |
568 // references (see |kVTCompressionPropertyKey_AllowFrameReordering|). There | |
569 // is no way to prevent multiple reference frames. | |
570 encoded_frame->referenced_frame_id = frame_id - 1; | |
571 } | |
572 | |
573 CopySampleBufferToAnnexBBuffer(sbuf, &encoded_frame->data, keyframe); | |
574 | |
575 encoder->cast_environment_->PostTask( | |
576 CastEnvironment::MAIN, | |
577 FROM_HERE, | |
578 base::Bind(frame_context->frame_encoded_callback, | |
579 base::Passed(&encoded_frame))); | |
580 } | |
581 | |
582 } // namespace cast | |
583 } // namespace media | |
OLD | NEW |