Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(75)

Side by Side Diff: media/cast/sender/h264_vt_encoder.cc

Issue 450693006: VideoToolbox encoder for cast senders. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Rebase build files on top of media_for_cast_ios component defined in 581803003. Created 6 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "media/cast/sender/h264_vt_encoder.h"
6
7 #include <algorithm>
8
9 #include "base/big_endian.h"
10 #include "base/bind.h"
11 #include "base/bind_helpers.h"
12 #include "base/location.h"
13 #include "base/logging.h"
14 #include "media/base/mac/corevideo_glue.h"
15
16 namespace media {
17 namespace cast {
18
19 namespace {
20
21 struct FrameContext {
22 base::TimeTicks capture_time;
23 media::cast::VideoEncoder::FrameEncodedCallback frame_encoded_callback;
24 };
25
26 base::ScopedCFTypeRef<CFDictionaryRef> DictionaryWithKeyValue(CFTypeRef key,
27 CFTypeRef value) {
28 CFTypeRef keys[1] = {key};
29 CFTypeRef values[1] = {value};
30 return base::ScopedCFTypeRef<CFDictionaryRef>(
31 CFDictionaryCreate(kCFAllocatorDefault,
32 keys,
33 values,
34 1,
35 &kCFTypeDictionaryKeyCallBacks,
36 &kCFTypeDictionaryValueCallBacks));
37 }
38
39 template <typename NalSizeType>
40 void CopyNalsToAnnexB(char* avcc_buffer,
41 const size_t avcc_size,
42 std::string* annexb_buffer) {
43 COMPILE_ASSERT(sizeof(NalSizeType) == 1 || sizeof(NalSizeType) == 2 ||
44 sizeof(NalSizeType) == 4,
45 "NAL size type has unsupported size");
46 static const char startcode_3[3] = {0, 0, 1};
47 DCHECK(avcc_buffer);
48 DCHECK(annexb_buffer);
49 size_t bytes_left = avcc_size;
50 while (bytes_left > 0) {
51 DCHECK_GT(bytes_left, sizeof(NalSizeType));
52 NalSizeType nal_size;
53 base::ReadBigEndian(avcc_buffer, &nal_size);
54 bytes_left -= sizeof(NalSizeType);
55 avcc_buffer += sizeof(NalSizeType);
56
57 DCHECK_GE(bytes_left, nal_size);
58 annexb_buffer->append(startcode_3, sizeof(startcode_3));
59 annexb_buffer->append(avcc_buffer, nal_size);
60 bytes_left -= nal_size;
61 avcc_buffer += nal_size;
62 }
63 }
64
65 // Copy a H.264 frame stored in a CM sample buffer to an Annex B buffer. Copies
66 // parameter sets for keyframes before the frame data as well.
67 void CopySampleBufferToAnnexBBuffer(CoreMediaGlue::CMSampleBufferRef sbuf,
68 std::string* annexb_buffer,
69 bool keyframe) {
70 // Perform two pass, one to figure out the total output size, and another to
71 // copy the data after having performed a single output allocation. Note that
72 // we'll allocate a bit more because we'll count 4 bytes instead of 3 for
73 // video NALs.
74
75 OSStatus status;
76
77 // Get the sample buffer's block buffer and format description.
78 CoreMediaGlue::CMBlockBufferRef bb =
79 CoreMediaGlue::CMSampleBufferGetDataBuffer(sbuf);
80 DCHECK(bb);
81 CoreMediaGlue::CMFormatDescriptionRef fdesc =
82 CoreMediaGlue::CMSampleBufferGetFormatDescription(sbuf);
83 DCHECK(fdesc);
84
85 size_t bb_size = CoreMediaGlue::CMBlockBufferGetDataLength(bb);
86 size_t total_bytes = bb_size;
87
88 size_t pset_count;
89 int nal_size_field_bytes;
90 status = CoreMediaGlue::CMVideoFormatDescriptionGetH264ParameterSetAtIndex(
91 fdesc, 0, NULL, NULL, &pset_count, &nal_size_field_bytes);
92 if (status ==
93 CoreMediaGlue::kCMFormatDescriptionBridgeError_InvalidParameter) {
94 DLOG(WARNING) << " assuming 2 parameter sets and 4 bytes NAL length header";
95 pset_count = 2;
96 nal_size_field_bytes = 4;
97 } else if (status != noErr) {
98 DLOG(ERROR)
99 << " CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed: "
100 << status;
101 return;
102 }
103
104 if (keyframe) {
105 const uint8_t* pset;
106 size_t pset_size;
107 for (size_t pset_i = 0; pset_i < pset_count; ++pset_i) {
108 status =
109 CoreMediaGlue::CMVideoFormatDescriptionGetH264ParameterSetAtIndex(
110 fdesc, pset_i, &pset, &pset_size, NULL, NULL);
111 if (status != noErr) {
112 DLOG(ERROR)
113 << " CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed: "
114 << status;
115 return;
116 }
117 total_bytes += pset_size + nal_size_field_bytes;
118 }
119 }
120
121 annexb_buffer->reserve(total_bytes);
122
123 // Copy all parameter sets before keyframes.
124 if (keyframe) {
125 const uint8_t* pset;
126 size_t pset_size;
127 for (size_t pset_i = 0; pset_i < pset_count; ++pset_i) {
128 status =
129 CoreMediaGlue::CMVideoFormatDescriptionGetH264ParameterSetAtIndex(
130 fdesc, pset_i, &pset, &pset_size, NULL, NULL);
131 if (status != noErr) {
132 DLOG(ERROR)
133 << " CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed: "
134 << status;
135 return;
136 }
137 static const char startcode_4[4] = {0, 0, 0, 1};
138 annexb_buffer->append(startcode_4, sizeof(startcode_4));
139 annexb_buffer->append(reinterpret_cast<const char*>(pset), pset_size);
140 }
141 }
142
143 // Block buffers can be composed of non-contiguous chunks. For the sake of
144 // keeping this code simple, flatten non-contiguous block buffers.
145 base::ScopedCFTypeRef<CoreMediaGlue::CMBlockBufferRef> contiguous_bb(
146 bb, base::scoped_policy::RETAIN);
147 if (!CoreMediaGlue::CMBlockBufferIsRangeContiguous(bb, 0, 0)) {
148 contiguous_bb.reset();
149 status = CoreMediaGlue::CMBlockBufferCreateContiguous(
150 kCFAllocatorDefault,
151 bb,
152 kCFAllocatorDefault,
153 NULL,
154 0,
155 0,
156 0,
157 contiguous_bb.InitializeInto());
158 if (status != noErr) {
159 DLOG(ERROR) << " CMBlockBufferCreateContiguous failed: " << status;
160 return;
161 }
162 }
163
164 // Copy all the NAL units. In the process convert them from AVCC format
165 // (length header) to AnnexB format (start code).
166 char* bb_data;
167 status = CoreMediaGlue::CMBlockBufferGetDataPointer(
168 contiguous_bb, 0, NULL, NULL, &bb_data);
169 if (status != noErr) {
170 DLOG(ERROR) << " CMBlockBufferGetDataPointer failed: " << status;
171 return;
172 }
173
174 if (nal_size_field_bytes == 1) {
175 CopyNalsToAnnexB<uint8_t>(bb_data, bb_size, annexb_buffer);
176 } else if (nal_size_field_bytes == 2) {
177 CopyNalsToAnnexB<uint16_t>(bb_data, bb_size, annexb_buffer);
178 } else if (nal_size_field_bytes == 4) {
179 CopyNalsToAnnexB<uint32_t>(bb_data, bb_size, annexb_buffer);
180 }
miu 2014/09/24 00:10:13 For safety: } else { NOTREACHED(); } Or,
jfroy 2014/09/24 01:00:58 No those are the only valid values per the H264 sp
181 }
182
183 } // namespace
184
185 H264VideoToolboxEncoder::H264VideoToolboxEncoder(
186 scoped_refptr<CastEnvironment> cast_environment,
187 const VideoSenderConfig& video_config,
188 const CastInitializationCallback& initialization_cb)
189 : cast_environment_(cast_environment),
190 videotoolbox_glue_(VideoToolboxGlue::Get()),
191 frame_id_(kStartFrameId),
192 encode_next_frame_as_keyframe_(false) {
193 DCHECK(!initialization_cb.is_null());
194 CastInitializationStatus initialization_status;
195 if (videotoolbox_glue_) {
196 initialization_status = (Initialize(video_config))
197 ? STATUS_VIDEO_INITIALIZED
198 : STATUS_INVALID_VIDEO_CONFIGURATION;
199 } else {
200 LOG(ERROR) << " VideoToolbox is not available";
201 initialization_status = STATUS_HW_VIDEO_ENCODER_NOT_SUPPORTED;
202 }
203 cast_environment_->PostTask(
204 CastEnvironment::MAIN,
205 FROM_HERE,
206 base::Bind(initialization_cb, initialization_status));
207 }
208
209 H264VideoToolboxEncoder::~H264VideoToolboxEncoder() {
210 Teardown();
211 }
212
213 CVPixelBufferPoolRef H264VideoToolboxEncoder::cv_pixel_buffer_pool() const {
214 DCHECK(thread_checker_.CalledOnValidThread());
215 DCHECK(compression_session_);
216 return videotoolbox_glue_->VTCompressionSessionGetPixelBufferPool(
217 compression_session_);
218 }
219
220 bool H264VideoToolboxEncoder::Initialize(
221 const VideoSenderConfig& video_config) {
222 DCHECK(thread_checker_.CalledOnValidThread());
223 DCHECK(!compression_session_);
224
225 // Note that the encoder object is given to the compression session as the
226 // callback context using a raw pointer. The C API does not allow us to use
227 // a smart pointer, nor is this encoder ref counted. However, this is still
228 // safe, because we 1) we own the compression session and 2) we tear it down
229 // safely. When destructing the encoder, the compression session is flushed
230 // and invalidated. Internally, VideoToolbox will join all of its threads
231 // before returning to the client. Therefore, when control returns to us, we
232 // are guaranteed that the output callback will not execute again.
233
234 // On OS X, allow the hardware encoder. Don't require it, it does not support
235 // all configurations (some of which are used for testing).
236 base::ScopedCFTypeRef<CFDictionaryRef> encoder_spec;
237 #if !defined(OS_IOS)
238 encoder_spec = DictionaryWithKeyValue(
239 videotoolbox_glue_
240 ->kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder() ,
241 kCFBooleanTrue);
242 #endif
243
244 VTCompressionSessionRef session;
245 OSStatus status = videotoolbox_glue_->VTCompressionSessionCreate(
246 kCFAllocatorDefault,
247 video_config.width,
248 video_config.height,
249 CoreMediaGlue::kCMVideoCodecType_H264,
250 encoder_spec,
251 NULL /* sourceImageBufferAttributes */,
252 NULL /* compressedDataAllocator */,
253 CompressionCallback,
miu 2014/09/24 00:10:13 Style nit (function pointer): &H264VideoToolbox
254 reinterpret_cast<void*>(this),
255 &session);
256 if (status != noErr) {
257 DLOG(ERROR) << " VTCompressionSessionCreate failed: " << status;
258 return false;
259 }
260 compression_session_.reset(session);
261
262 ConfigureSession(video_config);
263
264 return true;
265 }
266
267 void H264VideoToolboxEncoder::ConfigureSession(
268 const VideoSenderConfig& video_config) {
269 SetSessionProperty(
270 videotoolbox_glue_->kVTCompressionPropertyKey_ProfileLevel(),
271 videotoolbox_glue_->kVTProfileLevel_H264_Main_AutoLevel());
272 SetSessionProperty(videotoolbox_glue_->kVTCompressionPropertyKey_RealTime(),
273 true);
274 SetSessionProperty(
275 videotoolbox_glue_->kVTCompressionPropertyKey_AllowFrameReordering(),
276 false);
277 SetSessionProperty(
278 videotoolbox_glue_->kVTCompressionPropertyKey_MaxKeyFrameInterval(),
279 240u);
280 SetSessionProperty(
281 videotoolbox_glue_
282 ->kVTCompressionPropertyKey_MaxKeyFrameIntervalDuration(),
283 240u);
284 SetSessionProperty(
285 videotoolbox_glue_->kVTCompressionPropertyKey_AverageBitRate(),
286 static_cast<uint32_t>(video_config.start_bitrate));
miu 2014/09/24 00:10:13 Since bit rate change is not supported, should thi
jfroy 2014/09/24 01:00:58 Yeah probably. My producer code currently has min
287 SetSessionProperty(
288 videotoolbox_glue_->kVTCompressionPropertyKey_ExpectedFrameRate(),
289 static_cast<uint32_t>(video_config.max_frame_rate));
290 SetSessionProperty(
291 videotoolbox_glue_->kVTCompressionPropertyKey_ColorPrimaries(),
292 kCVImageBufferColorPrimaries_ITU_R_709_2);
293 SetSessionProperty(
294 videotoolbox_glue_->kVTCompressionPropertyKey_TransferFunction(),
295 kCVImageBufferTransferFunction_ITU_R_709_2);
296 SetSessionProperty(
297 videotoolbox_glue_->kVTCompressionPropertyKey_YCbCrMatrix(),
298 kCVImageBufferYCbCrMatrix_ITU_R_709_2);
299 }
300
301 void H264VideoToolboxEncoder::Teardown() {
302 DCHECK(thread_checker_.CalledOnValidThread());
303
304 // If the compression session exists, invalidate it. This blocks until all
305 // pending output callbacks have returned and any internal threads have
306 // joined, ensuring no output callback ever sees a dangling encoder pointer.
307 if (compression_session_) {
308 videotoolbox_glue_->VTCompressionSessionInvalidate(compression_session_);
309 compression_session_.reset();
310 }
311 }
312
313 bool H264VideoToolboxEncoder::EncodeVideoFrame(
314 const scoped_refptr<media::VideoFrame>& video_frame,
315 const base::TimeTicks& capture_time,
316 const FrameEncodedCallback& frame_encoded_callback) {
317 DCHECK(thread_checker_.CalledOnValidThread());
318 DCHECK(!capture_time.is_null());
319
320 if (!compression_session_) {
321 DLOG(ERROR) << " compression session is null";
322 return false;
323 }
324
325 // Clients can opt-in to an optimization whereby frames are stored in pixel
326 // buffers owned by the encoder. This can eliminate a data copy on some
327 // hardware configurations. When this optimization is used, the VideoFrame
328 // will wrap a CVPixelBuffer which we attempt to get first. If that fails,
329 // then we must wrap the VideoFrame in a CVPixelBuffer as an adapter to the
330 // VideoToolbox API. The WrapVideoFrame function performs this operation. The
331 // VideoFrame reference count is incremented and the resulting CVPixelBuffer
332 // will release the VideoFrame when it itself is destroyed. Because encoding
333 // is asynchronous and the encoder can reference CVPixelBuffers for a period
334 // of time in order to perform inter-frame compression, CVPixelBuffers must
335 // "own" the VideoFrame they wrap, not the other way around.
336 base::ScopedCFTypeRef<CVPixelBufferRef> pixel_buffer(
337 video_frame->cv_pixel_buffer(), base::scoped_policy::RETAIN);
338 if (!pixel_buffer) {
339 pixel_buffer = WrapVideoFrame(video_frame);
340 if (!pixel_buffer) {
341 return false;
342 }
343 }
344
345 CoreMediaGlue::CMTime timestamp_cm = CoreMediaGlue::CMTimeMake(
346 (capture_time - base::TimeTicks()).InMicroseconds(), USEC_PER_SEC);
347
348 scoped_ptr<FrameContext> frame_context(new FrameContext());
349 frame_context->capture_time = capture_time;
350 frame_context->frame_encoded_callback = frame_encoded_callback;
351
352 base::ScopedCFTypeRef<CFDictionaryRef> frame_props;
353 if (encode_next_frame_as_keyframe_) {
354 frame_props = DictionaryWithKeyValue(
355 videotoolbox_glue_->kVTEncodeFrameOptionKey_ForceKeyFrame(),
356 kCFBooleanTrue);
357 encode_next_frame_as_keyframe_ = false;
358 }
359
360 VTEncodeInfoFlags info;
361 OSStatus status = videotoolbox_glue_->VTCompressionSessionEncodeFrame(
362 compression_session_,
363 pixel_buffer,
364 timestamp_cm,
365 CoreMediaGlue::CMTime{0, 0, 0, 0},
366 frame_props,
367 reinterpret_cast<void*>(frame_context.release()),
368 &info);
369 if (status != noErr) {
370 DLOG(ERROR) << " VTCompressionSessionEncodeFrame failed: " << status;
371 return false;
372 }
373 if ((info & VideoToolboxGlue::kVTEncodeInfo_FrameDropped)) {
374 DLOG(ERROR) << " frame dropped";
375 return false;
376 }
377
378 return true;
379 }
380
381 void H264VideoToolboxEncoder::SetBitRate(int new_bit_rate) {
382 DCHECK(thread_checker_.CalledOnValidThread());
383 // VideoToolbox does not seem to support bitrate reconfiguration.
384 }
385
386 void H264VideoToolboxEncoder::GenerateKeyFrame() {
387 DCHECK(thread_checker_.CalledOnValidThread());
388 DCHECK(compression_session_);
389
390 encode_next_frame_as_keyframe_ = true;
391 }
392
393 void H264VideoToolboxEncoder::LatestFrameIdToReference(uint32 /*frame_id*/) {
394 // Not supported by VideoToolbox in any meaningful manner.
395 }
396
397 static void VideoFramePixelBufferReleaseCallback(void* frame_ref,
398 const void* data,
399 size_t size,
400 size_t num_planes,
401 const void* planes[]) {
402 free(const_cast<void*>(data));
403 reinterpret_cast<media::VideoFrame*>(frame_ref)->Release();
404 }
405
406 base::ScopedCFTypeRef<CVPixelBufferRef> H264VideoToolboxEncoder::WrapVideoFrame(
407 const scoped_refptr<media::VideoFrame>& frame) {
408 static const size_t MAX_PLANES = 3;
409
410 media::VideoFrame::Format format = frame->format();
411 size_t num_planes = media::VideoFrame::NumPlanes(format);
412 DCHECK_LE(num_planes, MAX_PLANES);
413 gfx::Size coded_size = frame->coded_size();
414
415 // media::VideoFrame only supports YUV formats, so there is no way to
416 // leverage VideoToolbox's ability to convert RGBA formats automatically. In
417 // addition, most of the media::VideoFrame formats are YVU, which VT does not
418 // support. Finally, media::VideoFrame formats do not carry any information
419 // about the color space, transform or any other colorimetric information
420 // that is generally needed to fully specify the input data. So essentially
421 // require that the input be YCbCr 4:2:0 (either planar or biplanar) and
422 // assume the standard video dynamic range for samples (although most modern
423 // HDTVs support full-range video these days).
424 OSType pixel_format;
425 if (format == media::VideoFrame::Format::I420) {
426 pixel_format = kCVPixelFormatType_420YpCbCr8Planar;
427 } else if (format == media::VideoFrame::Format::NV12) {
428 // TODO(jfroy): Use kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange when the
429 // minimum OS X and iOS SDKs permits it.
430 pixel_format = '420v';
431 } else {
432 DLOG(ERROR) << " unsupported frame format: " << format;
433 return base::ScopedCFTypeRef<CVPixelBufferRef>(NULL);
434 }
435
436 // TODO(jfroy): Support extended pixels (i.e. padding).
437 if (frame->coded_size() != frame->visible_rect().size()) {
438 DLOG(ERROR) << " frame with extended pixels not supported: "
439 << " coded_size: " << coded_size.ToString()
440 << ", visible_rect: " << frame->visible_rect().ToString();
441 return base::ScopedCFTypeRef<CVPixelBufferRef>(NULL);
442 }
443
444 void* plane_ptrs[MAX_PLANES];
445 size_t plane_widths[MAX_PLANES];
446 size_t plane_heights[MAX_PLANES];
447 size_t plane_bytes_per_row[MAX_PLANES];
448 for (size_t plane_i = 0; plane_i < num_planes; ++plane_i) {
449 plane_ptrs[plane_i] = frame->data(plane_i);
450 gfx::Size plane_size =
451 media::VideoFrame::PlaneSize(format, plane_i, coded_size);
452 plane_widths[plane_i] = plane_size.width();
453 plane_heights[plane_i] = plane_size.height();
454 plane_bytes_per_row[plane_i] = frame->stride(plane_i);
455 }
456
457 // CVPixelBufferCreateWithPlanarBytes needs a dummy plane descriptor or the
458 // release callback will not execute. The descriptor is freed in the callback.
459 void* descriptor = calloc(
460 1,
461 std::max(sizeof(CVPlanarPixelBufferInfo_YCbCrPlanar),
462 sizeof(CoreVideoGlue::CVPlanarPixelBufferInfo_YCbCrBiPlanar)));
463
464 // Wrap the frame's data in a CVPixelBuffer. Because this is a C API, we can't
465 // give it a smart pointer to the frame, so instead pass a raw pointer and
466 // increment the frame's reference count manually.
467 CVPixelBufferRef pixel_buffer;
468 CVReturn result =
469 CVPixelBufferCreateWithPlanarBytes(kCFAllocatorDefault,
470 coded_size.width(),
471 coded_size.height(),
472 format,
miu 2014/09/24 00:10:13 Should this be pixel_format instead?
jfroy 2014/09/24 01:00:58 Wow yeah. Good catch. Thanks for nothing, compiler
473 descriptor,
474 0,
475 num_planes,
476 plane_ptrs,
477 plane_widths,
478 plane_heights,
479 plane_bytes_per_row,
480 VideoFramePixelBufferReleaseCallback,
miu 2014/09/24 00:10:13 nit: &VideoFramePixelBufferReleaseCallback ^^^
481 frame.get(),
482 NULL,
483 &pixel_buffer);
484 if (result != kCVReturnSuccess) {
485 DLOG(ERROR) << " CVPixelBufferCreateWithPlanarBytes failed: " << result;
486 return base::ScopedCFTypeRef<CVPixelBufferRef>(NULL);
487 }
488
489 // The CVPixelBuffer now references the data of the frame, so increment its
490 // reference count manually. The release callback set on the pixel buffer will
491 // release the frame.
492 frame->AddRef();
493
494 return base::ScopedCFTypeRef<CVPixelBufferRef>(pixel_buffer);
495 }
496
497 bool H264VideoToolboxEncoder::SetSessionProperty(CFStringRef key,
498 uint32_t value) {
miu 2014/09/24 00:10:12 This should probably be a signed int32_t instead,
jfroy 2014/09/24 01:00:58 Fair enough.
499 base::ScopedCFTypeRef<CFNumberRef> cfvalue(
500 CFNumberCreate(NULL, kCFNumberSInt32Type, &value));
501 return videotoolbox_glue_->VTSessionSetProperty(
502 compression_session_, key, cfvalue) == noErr;
503 }
504
505 bool H264VideoToolboxEncoder::SetSessionProperty(CFStringRef key, bool value) {
506 CFBooleanRef cfvalue = (value) ? kCFBooleanTrue : kCFBooleanFalse;
507 return videotoolbox_glue_->VTSessionSetProperty(
508 compression_session_, key, cfvalue) == noErr;
509 }
510
511 bool H264VideoToolboxEncoder::SetSessionProperty(CFStringRef key,
512 CFStringRef value) {
513 return videotoolbox_glue_->VTSessionSetProperty(
514 compression_session_, key, value) == noErr;
515 }
516
517 void H264VideoToolboxEncoder::CompressionCallback(void* encoder_opaque,
518 void* frame_opaque,
519 OSStatus status,
520 VTEncodeInfoFlags info,
521 CMSampleBufferRef sbuf) {
522 H264VideoToolboxEncoder* encoder =
523 reinterpret_cast<H264VideoToolboxEncoder*>(encoder_opaque);
524 scoped_ptr<FrameContext> frame_context(
525 reinterpret_cast<FrameContext*>(frame_opaque));
526
527 if (status != noErr) {
528 DLOG(ERROR) << " encoding failed: " << status;
529 return;
530 }
531 if ((info & VideoToolboxGlue::kVTEncodeInfo_FrameDropped)) {
532 DVLOG(2) << " frame dropped";
533 return;
534 }
535
536 CFDictionaryRef sample_attachments =
537 static_cast<CFDictionaryRef>(CFArrayGetValueAtIndex(
538 CoreMediaGlue::CMSampleBufferGetSampleAttachmentsArray(sbuf, true),
539 0));
540
541 // If the NotSync key is not present, it implies Sync, which indicates a
542 // keyframe (at least I think, VT documentation is, erm, sparse). Could
543 // alternatively use kCMSampleAttachmentKey_DependsOnOthers == false.
544 bool keyframe = CFDictionaryContainsKey(
miu 2014/09/24 00:10:13 nit: bool keyframe = !CFDictionaryContainsKey(...)
545 sample_attachments,
546 CoreMediaGlue::kCMSampleAttachmentKey_NotSync()) == false;
547
548 // Increment the encoder-scoped frame id and assign the new value to this
549 // frame. VideoToolbox calls the output callback serially, so this is safe.
550 uint32 frame_id = ++encoder->frame_id_;
551
552 scoped_ptr<EncodedFrame> encoded_frame(new EncodedFrame());
553 encoded_frame->frame_id = frame_id;
554 encoded_frame->reference_time = frame_context->capture_time;
555 encoded_frame->rtp_timestamp =
556 GetVideoRtpTimestamp(frame_context->capture_time);
557 if (keyframe) {
558 encoded_frame->dependency = EncodedFrame::KEY;
559 encoded_frame->referenced_frame_id = frame_id;
560 } else {
561 encoded_frame->dependency = EncodedFrame::DEPENDENT;
562 // H.264 supports complex frame reference schemes (multiple reference
563 // frames, slice references, backward and forward references, etc). This
564 // implementation compromises by setting the referenced frame ID to that of
miu 2014/09/24 00:10:13 IMO, for clarity, the last sentence in this commen
jfroy 2014/09/24 01:00:58 Alright. I'm also going to mention that forward re
565 // the previous frame.
566 encoded_frame->referenced_frame_id = frame_id - 1;
567 }
568
569 CopySampleBufferToAnnexBBuffer(sbuf, &encoded_frame->data, keyframe);
570
571 encoder->cast_environment_->PostTask(
572 CastEnvironment::MAIN,
573 FROM_HERE,
574 base::Bind(frame_context->frame_encoded_callback,
575 base::Passed(&encoded_frame)));
576 }
577
578 } // namespace cast
579 } // namespace media
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698