Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(97)

Side by Side Diff: media/cast/sender/h264_vt_encoder.cc

Issue 450693006: VideoToolbox encoder for cast senders. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Remove pragma marks, debug logging. Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "media/cast/sender/h264_vt_encoder.h"
6
7 #include <algorithm>
8 #include <vector>
9
10 #include "base/big_endian.h"
11 #include "base/bind.h"
12 #include "base/bind_helpers.h"
13 #include "base/location.h"
14 #include "base/logging.h"
15
16 namespace media {
17 namespace cast {
18
19 bool SetSessionProperty(VTSessionRef session,
Robert Sesek 2014/08/08 17:24:16 Mark this static, or remove static from all of the
jfroy 2014/08/08 22:25:18 Oversight from removing the template parameter. Th
20 CFStringRef key,
21 CFTypeRef cfvalue) {
22 OSStatus status = VTSessionSetProperty(session, key, cfvalue);
23 return status == noErr;
24 }
25
26 static bool SetSessionProperty(VTSessionRef session,
27 CFStringRef key,
28 uint32_t value) {
29 base::ScopedCFTypeRef<CFNumberRef> cfvalue(
30 CFNumberCreate(nullptr, kCFNumberSInt32Type, &value));
31 return SetSessionProperty(session, key, value, cfvalue);
32 }
33
34 static bool SetSessionProperty(VTSessionRef session,
35 CFStringRef key,
36 bool value) {
37 CFBooleanRef cfvalue = (value) ? kCFBooleanTrue : kCFBooleanFalse;
38 return SetSessionProperty(session, key, value, cfvalue);
39 }
40
41 static bool SetSessionProperty(VTSessionRef session,
42 CFStringRef key,
43 CFStringRef value) {
44 return SetSessionProperty(session, key, value, value);
45 }
46
47 static base::ScopedCFTypeRef<CFDictionaryRef> DictionaryWithKeyValue(
48 CFTypeRef key,
49 CFTypeRef value) {
50 CFTypeRef keys[1] = {key};
51 CFTypeRef values[1] = {value};
52 return base::ScopedCFTypeRef<CFDictionaryRef>(
53 CFDictionaryCreate(kCFAllocatorDefault,
54 keys,
55 values,
56 1,
57 &kCFTypeDictionaryKeyCallBacks,
58 &kCFTypeDictionaryValueCallBacks));
59 }
60
61 struct H264VideoToolboxEncoder::FrameContext {
62 base::TimeTicks capture_time;
63 FrameEncodedCallback frame_encoded_callback;
64 };
65
66 H264VideoToolboxEncoder::H264VideoToolboxEncoder(
67 scoped_refptr<CastEnvironment> cast_environment,
68 const VideoSenderConfig& video_config)
69 : cast_environment_(cast_environment),
70 cast_config_(video_config),
71 frame_id_(kStartFrameId),
72 last_keyframe_id_(kStartFrameId),
73 encode_next_frame_as_keyframe_(false) {
Robert Sesek 2014/08/08 17:24:17 using_hardware_ is uninitialized and will remain u
jfroy 2014/08/08 22:25:18 I'm just going to remove that member, it is not us
74 Initialize();
75 }
76
77 H264VideoToolboxEncoder::~H264VideoToolboxEncoder() {
78 Teardown();
79 }
80
81 CVPixelBufferPoolRef H264VideoToolboxEncoder::cv_pixel_buffer_pool() const {
82 DCHECK(thread_checker_.CalledOnValidThread());
83 DCHECK(compression_session_);
84 return VTCompressionSessionGetPixelBufferPool(compression_session_);
85 }
86
87 void H264VideoToolboxEncoder::Initialize() {
88 DCHECK(thread_checker_.CalledOnValidThread());
89 DCHECK(!compression_session_);
90
91 // Note that the encoder object is given to the compression session as the
92 // callback context using a raw pointer. The C API does not allow us to use
93 // a smart pointer, nor is this encoder ref counted. However, this is still
94 // safe, because we 1) we own the compression session and 2) we tear it down
95 // safely. When destructing the encoder, the compression session is flushed
96 // and invalidated. Internally, VideoToolbox will join all of its threads
97 // before returning to the client. Therefore, when control returns to us, we
98 // are guaranteed that the output callback will not execute again.
99
100 // On OS X, allow the hardware encoder. Don't require it, it does not support
101 // all configurations (some of which are used for testing).
102 DictionaryPtr encoder_spec(nullptr);
103 #if !defined(OS_IOS)
104 encoder_spec = DictionaryWithKeyValue(
105 kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
106 kCFBooleanTrue);
107 #endif
108
109 VTCompressionSessionRef session;
110 OSStatus status =
111 VTCompressionSessionCreate(kCFAllocatorDefault,
112 cast_config_.width,
113 cast_config_.height,
114 kCMVideoCodecType_H264,
115 encoder_spec,
116 nullptr /* sourceImageBufferAttributes */,
117 nullptr /* compressedDataAllocator */,
118 CompressionCallback,
119 reinterpret_cast<void*>(this),
120 &session);
121 if (status != noErr) {
122 DLOG(ERROR) << __func__ << " VTCompressionSessionCreate failed: " << status;
Robert Sesek 2014/08/08 17:24:17 I'd remove __func__ from all your DLOGs. It should
jfroy 2014/08/08 22:25:17 Acknowledged. It was largely inspired by the loggi
123 return;
124 }
125 compression_session_.reset(session);
126
127 #if defined(OS_IOS)
128 using_hardware_ = true;
129 #else
130 CFBooleanRef using_hardware_cf = nullptr;
131 status = VTSessionCopyProperty(
132 session,
133 kVTCompressionPropertyKey_UsingHardwareAcceleratedVideoEncoder,
134 kCFAllocatorDefault,
135 &using_hardware_cf);
136 if (status == noErr) {
137 using_hardware_ = CFBooleanGetValue(using_hardware_cf);
138 CFRelease(using_hardware_cf);
139 }
140 #endif
141
142 ConfigureSession();
143 }
144
145 static void SetConfigurationApplier(CFStringRef key,
146 CFTypeRef value,
147 VTCompressionSessionRef session) {
148 SetSessionProperty(session, key, CFTypeEmittable(value), value);
149 }
150
151 void H264VideoToolboxEncoder::ConfigureSession() {
152 SetSessionProperty(compression_session_,
153 kVTCompressionPropertyKey_ProfileLevel,
154 kVTProfileLevel_H264_Main_AutoLevel);
155 SetSessionProperty(
156 compression_session_, kVTCompressionPropertyKey_RealTime, true);
157 SetSessionProperty(compression_session_,
158 kVTCompressionPropertyKey_AllowFrameReordering,
159 false);
160 SetSessionProperty(compression_session_,
161 kVTCompressionPropertyKey_MaxKeyFrameInterval,
162 240u);
163 SetSessionProperty(compression_session_,
164 kVTCompressionPropertyKey_MaxKeyFrameIntervalDuration,
165 240u);
166 SetSessionProperty(compression_session_,
167 kVTCompressionPropertyKey_AverageBitRate,
168 static_cast<uint32_t>(cast_config_.start_bitrate));
169 SetSessionProperty(compression_session_,
170 kVTCompressionPropertyKey_ExpectedFrameRate,
171 static_cast<uint32_t>(cast_config_.max_frame_rate));
172 SetSessionProperty(compression_session_,
173 kVTCompressionPropertyKey_ColorPrimaries,
174 kCVImageBufferColorPrimaries_ITU_R_709_2);
175 SetSessionProperty(compression_session_,
176 kVTCompressionPropertyKey_TransferFunction,
177 kCVImageBufferTransferFunction_ITU_R_709_2);
178 SetSessionProperty(compression_session_,
179 kVTCompressionPropertyKey_YCbCrMatrix,
180 kCVImageBufferYCbCrMatrix_ITU_R_709_2);
181
182 if (compression_properties_) {
Robert Sesek 2014/08/08 17:24:16 This is always NULL.
jfroy 2014/08/08 22:25:18 A prior prototype implementation used this additio
183 CFDictionaryApplyFunction(
184 compression_properties_,
185 reinterpret_cast<CFDictionaryApplierFunction>(SetConfigurationApplier),
186 compression_session_.get());
187 }
188 }
189
190 void H264VideoToolboxEncoder::Teardown() {
191 DCHECK(thread_checker_.CalledOnValidThread());
192
193 // If the compression session exists, invalidate it. This blocks until all
194 // pending output callbacks have returned and any internal threads have
195 // joined, ensuring no output callback ever sees a dangling encoder pointer.
196 if (compression_session_) {
197 VTCompressionSessionInvalidate(compression_session_);
198 compression_session_.reset();
199 }
200 }
201
202 bool H264VideoToolboxEncoder::EncodeVideoFrame(
203 const scoped_refptr<media::VideoFrame>& video_frame,
204 const base::TimeTicks& capture_time,
205 const FrameEncodedCallback& frame_encoded_callback) {
206 DCHECK(thread_checker_.CalledOnValidThread());
207
208 if (!compression_session_) {
209 DLOG(ERROR) << __func__ << " compression session is null";
210 return false;
211 }
212
213 PixelBufferPtr pixel_buffer(video_frame->cv_pixel_buffer(),
214 base::scoped_policy::RETAIN);
215 if (!pixel_buffer) {
216 pixel_buffer = WrapVideoFrame(*video_frame);
217 if (!pixel_buffer) {
218 return false;
219 }
220 }
221
222 CMTime timestamp_cm;
223 if (capture_time.is_null()) {
224 timestamp_cm = kCMTimeInvalid;
225 } else {
226 timestamp_cm = CMTimeMake(capture_time.ToInternalValue(), USEC_PER_SEC);
227 }
228
229 FrameContext* fc = new FrameContext();
230 fc->capture_time = capture_time;
231 fc->frame_encoded_callback = frame_encoded_callback;
232
233 DictionaryPtr frame_props(nullptr);
Robert Sesek 2014/08/08 17:24:17 This will automatically initialize to NULL.
jfroy 2014/08/08 22:25:18 Acknowledged.
234 if (encode_next_frame_as_keyframe_) {
235 frame_props = DictionaryWithKeyValue(kVTEncodeFrameOptionKey_ForceKeyFrame,
236 kCFBooleanTrue);
237 encode_next_frame_as_keyframe_ = false;
238 }
239
240 VTEncodeInfoFlags info;
241 OSStatus status = VTCompressionSessionEncodeFrame(compression_session_,
242 pixel_buffer,
243 timestamp_cm,
244 kCMTimeInvalid,
245 frame_props,
246 reinterpret_cast<void*>(fc),
247 &info);
248 if (status != noErr) {
249 DLOG(ERROR) << __func__
250 << " VTCompressionSessionEncodeFrame failed: " << status;
251 return false;
252 }
253 if ((info & kVTEncodeInfo_FrameDropped)) {
254 DLOG(ERROR) << __func__ << " frame dropped";
255 return false;
256 }
257
258 return true;
259 }
260
261 void H264VideoToolboxEncoder::SetBitRate(int new_bit_rate) {
262 DCHECK(thread_checker_.CalledOnValidThread());
263 // NOTE: VideoToolbox does not seem to support bitrate reconfiguration.
264 }
265
266 void H264VideoToolboxEncoder::GenerateKeyFrame() {
267 DCHECK(thread_checker_.CalledOnValidThread());
268 DCHECK(compression_session_);
269
270 encode_next_frame_as_keyframe_ = true;
271 }
272
273 void H264VideoToolboxEncoder::LatestFrameIdToReference(uint32 /*frame_id*/) {
274 }
275
276 static void VideoFramePixelBufferReleaseCallback(void* frame_ref,
277 const void* data,
278 size_t size,
279 size_t num_planes,
280 const void* planes[]) {
281 free(const_cast<void*>(data));
282 reinterpret_cast<media::VideoFrame*>(frame_ref)->Release();
283 }
284
285 H264VideoToolboxEncoder::PixelBufferPtr H264VideoToolboxEncoder::WrapVideoFrame(
286 const scoped_refptr<media::VideoFrame>& frame) {
287 static const size_t MAX_PLANES = 3;
288
289 media::VideoFrame::Format format = frame->format();
290 size_t num_planes = media::VideoFrame::NumPlanes(format);
291 gfx::Size coded_size = frame->coded_size();
292
293 // media::VideoFrame only supports YUV formats, so there is no way to
294 // leverage VideoToolbox's ability to convert RGBA formats automatically. In
295 // addition, most of the media::VideoFrame formats are YVU, which VT does not
296 // support. Finally, media::VideoFrame formats do not carry any information
297 // about the color space, transform or any other colorimetric information
298 // that is generally needed to fully specify the input data. So essentially
299 // require that the input be YCbCr 4:2:0 (either planar or biplanar) and
300 // assume the standard video dynamic range for samples (although most modern
301 // HDTVs support full-range video these days).
302 OSType pixel_format;
303 if (format == media::VideoFrame::Format::I420) {
304 pixel_format = kCVPixelFormatType_420YpCbCr8Planar;
305 } else if (format == media::VideoFrame::Format::NV12) {
306 pixel_format = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
307 } else {
308 DLOG(ERROR) << __func__ << " unsupported frame format: " << format;
309 return PixelBufferPtr(nullptr);
310 }
311
312 // TODO(jfroy): Support extended pixels (i.e. padding).
313 if (frame->coded_size() != frame->visible_rect().size()) {
314 DLOG(ERROR) << __func__ << " frame with extended pixels not supported: "
315 << " coded_size: " << coded_size.ToString()
316 << ", visible_rect: " << frame->visible_rect().ToString();
317 return PixelBufferPtr(nullptr);
318 }
319
320 DCHECK(media::VideoFrame::NumPlanes(format) <= MAX_PLANES);
Robert Sesek 2014/08/08 17:24:17 DCHECK_LE
jfroy 2014/08/08 22:25:18 Acknowledged.
321 void* plane_ptrs[MAX_PLANES];
322 size_t plane_widths[MAX_PLANES];
323 size_t plane_heights[MAX_PLANES];
324 size_t plane_bytes_per_row[MAX_PLANES];
325 for (size_t plane_i = 0; plane_i < num_planes; ++plane_i) {
326 plane_ptrs[plane_i] = frame->data(plane_i);
327 gfx::Size plane_size =
328 media::VideoFrame::PlaneSize(format, plane_i, coded_size);
329 plane_widths[plane_i] = plane_size.width();
330 plane_heights[plane_i] = plane_size.height();
331 plane_bytes_per_row[plane_i] = frame->stride(plane_i);
332 }
333
334 // CVPixelBufferCreateWithPlanarBytes needs a dummy plane descriptor or the
335 // release callback will not execute. The descriptor is freed in the callback.
336 void* descriptor =
337 calloc(1,
338 std::max(sizeof(CVPlanarPixelBufferInfo_YCbCrPlanar),
339 sizeof(CVPlanarPixelBufferInfo_YCbCrBiPlanar)));
340
341 // Wrap the frame's data in a CVPixelBuffer. Because this is a C API, we can't
342 // give it a smart pointer to the frame, so instead pass a raw pointer and
343 // increment the frame's reference count manually.
344 CVPixelBufferRef pixel_buffer;
345 CVReturn result =
346 CVPixelBufferCreateWithPlanarBytes(kCFAllocatorDefault,
347 coded_size.width(),
348 coded_size.height(),
349 format,
350 &descriptor,
351 0,
352 num_planes,
353 plane_ptrs,
354 plane_widths,
355 plane_heights,
356 plane_bytes_per_row,
357 VideoFramePixelBufferReleaseCallback,
358 frame.get(),
359 nullptr,
360 &pixel_buffer);
361 if (result != kCVReturnSuccess) {
362 DLOG(ERROR) << __func__
363 << " CVPixelBufferCreateWithPlanarBytes failed: " << result;
364 return PixelBufferPtr(nullptr);
365 }
366
367 // The CVPixelBuffer now references the data of the frame, so increment its
368 // reference count manually. The release callback set on the pixel buffer will
369 // release the frame.
370 frame.AddRef();
371
372 return PixelBufferPtr(pixel_buffer);
373 }
374
375 void H264VideoToolboxEncoder::CompressionCallback(void* encoder_opaque,
376 void* frame_opaque,
377 OSStatus status,
378 VTEncodeInfoFlags info,
379 CMSampleBufferRef sbuf) {
380 H264VideoToolboxEncoder* encoder =
381 reinterpret_cast<H264VideoToolboxEncoder*>(encoder_opaque);
382 scoped_ptr<FrameContext> fc(reinterpret_cast<FrameContext*>(frame_opaque));
383
384 if (status != noErr) {
385 DLOG(ERROR) << __func__ << " encoding failed: " << status;
386 return;
387 }
388 if ((info & kVTEncodeInfo_FrameDropped)) {
389 DVLOG(2) << __func__ << " frame dropped";
390 return;
391 }
392 CMItemCount sample_count = CMSampleBufferGetNumSamples(sbuf);
393 if (sample_count > 1) {
394 DLOG(ERROR) << __func__
395 << " more than one sample in sample buffer: " << sample_count;
396 return;
397 }
398
399 CFDictionaryRef sample_attachments =
400 static_cast<CFDictionaryRef>(CFArrayGetValueAtIndex(
401 CMSampleBufferGetSampleAttachmentsArray(sbuf, true), 0));
402
403 // If the NotSync key is not present, it implies Sync, which indicates a
404 // keyframe (at least I think, VT documentation is, erm, sparse). Could
405 // alternatively use kCMSampleAttachmentKey_DependsOnOthers == false.
406 bool keyframe =
407 CFDictionaryContainsKey(sample_attachments,
408 kCMSampleAttachmentKey_NotSync) == false;
409
410 // Generate a frame id and update the last keyframe id if needed.
411 // NOTE: VideoToolbox calls the output callback serially, so this is safe.
412 uint32 frame_id = ++encoder->frame_id_;
413 if (keyframe) {
414 encoder->last_keyframe_id_ = frame_id;
415 }
416
417 CMSampleTimingInfo timing_info;
418 CMSampleBufferGetSampleTimingInfo(sbuf, 0, &timing_info);
419
420 scoped_ptr<EncodedFrame> encoded_frame(new EncodedFrame());
421 encoded_frame->frame_id = frame_id;
422 encoded_frame->reference_time = fc->capture_time;
423 encoded_frame->rtp_timestamp = GetVideoRtpTimestamp(fc->capture_time);
424 if (keyframe) {
425 encoded_frame->dependency = EncodedFrame::KEY;
426 encoded_frame->referenced_frame_id = frame_id;
427 } else {
428 encoded_frame->dependency = EncodedFrame::DEPENDENT;
429 // NOTE: Technically wrong, but without parsing the NALs our best guess is
430 // the last keyframe.
431 encoded_frame->referenced_frame_id = encoder->last_keyframe_id_;
432 }
433
434 CopySampleBufferToAnnexBBuffer(sbuf, &encoded_frame->data, keyframe);
435
436 encoder->cast_environment_->PostTask(
437 CastEnvironment::MAIN,
438 FROM_HERE,
439 base::Bind(fc->frame_encoded_callback, base::Passed(&encoded_frame)));
440 }
441
442 template <typename NalSizeType>
443 static void CopyNalsToAnnexB(char* avcc_buffer,
444 const size_t avcc_size,
445 std::string* annexb_buffer) {
446 static_assert(sizeof(NalSizeType) == 1 || sizeof(NalSizeType) == 2 ||
Robert Sesek 2014/08/08 17:24:17 I believe this requires C++11 library support, whi
jfroy 2014/08/08 22:25:17 Acknowledged.
447 sizeof(NalSizeType) == 4,
448 "NAL size type has unsupported size");
449 static const char startcode_3[3] = {0, 0, 1};
450 DCHECK(avcc_buffer);
451 DCHECK(annexb_buffer);
452 size_t bytes_left = avcc_size;
453 while (bytes_left > 0) {
454 DCHECK(bytes_left > sizeof(NalSizeType));
Robert Sesek 2014/08/08 17:24:17 DCHECK_GT
jfroy 2014/08/08 22:25:18 Acknowledged.
455 NalSizeType nal_size;
456 base::ReadBigEndian(avcc_buffer, &nal_size);
457 bytes_left -= sizeof(NalSizeType);
458 avcc_buffer += sizeof(NalSizeType);
459
460 DCHECK(bytes_left >= nal_size);
461 annexb_buffer->append(startcode_3, sizeof(startcode_3));
462 annexb_buffer->append(avcc_buffer, nal_size);
463 bytes_left -= nal_size;
464 avcc_buffer += nal_size;
465 }
466 }
467
468 void H264VideoToolboxEncoder::CopySampleBufferToAnnexBBuffer(
469 CMSampleBufferRef sbuf,
470 std::string* annexb_buffer,
471 bool keyframe) {
472 // Perform two pass, one to figure out the total output size, and another to
473 // copy the data after having performed a single output allocation. Note that
474 // we'll allocate a bit more because we'll count 4 bytes instead of 3 for
475 // video NALs.
476
477 // TODO(jfroy): There is a bug in
478 // CMVideoFormatDescriptionGetH264ParameterSetAtIndex, iterate until fail.
479
480 OSStatus status;
481
482 // Get the sample buffer's block buffer and format description.
483 CMBlockBufferRef bb = CMSampleBufferGetDataBuffer(sbuf);
484 DCHECK(bb);
485 CMFormatDescriptionRef fdesc = CMSampleBufferGetFormatDescription(sbuf);
486 DCHECK(fdesc);
487
488 size_t bb_size = CMBlockBufferGetDataLength(bb);
489 size_t total_bytes = bb_size;
490
491 size_t pset_count;
492 int nal_size_field_bytes;
493 status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(
494 fdesc, 0, nullptr, nullptr, &pset_count, &nal_size_field_bytes);
495 if (status == kCMFormatDescriptionBridgeError_InvalidParameter) {
496 DLOG(WARNING) << __func__ << " assuming 2 parameter sets and 4 bytes NAL "
497 "length header" pset_count = 2;
498 nal_size_field_bytes = 4;
499 } else if (status != noErr) {
500 DLOG(ERROR)
501 << __func__
502 << " CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed: "
503 << status;
504 return;
505 }
506
507 if (keyframe) {
508 const uint8_t* pset;
509 size_t pset_size;
510 for (size_t pset_i = 0; pset_i < pset_count; ++pset_i) {
511 status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(
512 fdesc, pset_i, &pset, &pset_size, nullptr, nullptr);
513 if (status != noErr) {
514 DLOG(ERROR)
515 << __func__
516 << " CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed: "
517 << status;
518 return;
519 }
520 total_bytes += pset_size + nal_size_field_bytes;
521 }
522 }
523
524 annexb_buffer->reserve(total_bytes);
525
526 // Copy all parameter sets before keyframes.
527 if (keyframe) {
528 const uint8_t* pset;
529 size_t pset_size;
530 for (size_t pset_i = 0; pset_i < pset_count; ++pset_i) {
531 status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(
532 fdesc, pset_i, &pset, &pset_size, nullptr, nullptr);
533 if (status != noErr) {
534 DLOG(ERROR)
535 << __func__
536 << " CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed: "
537 << status;
538 return;
539 }
540 static const char startcode_4[4] = {0, 0, 0, 1};
541 annexb_buffer->append(startcode_4, sizeof(startcode_4));
542 annexb_buffer->append(reinterpret_cast<const char*>(pset), pset_size);
543 }
544 }
545
546 // Block buffers can be composed of non-contiguous chunks. For the sake of
547 // keeping this code simple, flatten non-contiguous block buffers.
548 base::ScopedCFTypeRef<CMBlockBufferRef> contiguous_bb(
549 bb, base::scoped_policy::RETAIN);
550 if (!CMBlockBufferIsRangeContiguous(bb, 0, 0)) {
551 contiguous_bb.reset();
552 status = CMBlockBufferCreateContiguous(kCFAllocatorDefault,
553 bb,
554 kCFAllocatorDefault,
555 nullptr,
556 0,
557 0,
558 0,
559 contiguous_bb.InitializeInto());
560 if (status != noErr) {
561 DLOG(ERROR) << __func__
562 << " CMBlockBufferCreateContiguous failed: " << status;
563 return;
564 }
565 }
566
567 // Copy all the NAL units. In the process convert them from AVCC format
568 // (length header) to AnnexB format (start code).
569 char* bb_data;
570 status =
571 CMBlockBufferGetDataPointer(contiguous_bb, 0, nullptr, nullptr, &bb_data);
572 if (status != noErr) {
573 DLOG(ERROR) << __func__
574 << " CMBlockBufferGetDataPointer failed: " << status;
575 return;
576 }
577
578 if (nal_size_field_bytes == 1) {
579 CopyNalsToAnnexB<uint8_t>(bb_data, bb_size, annexb_buffer);
580 } else if (nal_size_field_bytes == 2) {
581 CopyNalsToAnnexB<uint16_t>(bb_data, bb_size, annexb_buffer);
582 } else if (nal_size_field_bytes == 4) {
583 CopyNalsToAnnexB<uint32_t>(bb_data, bb_size, annexb_buffer);
584 }
585 }
586
587 } // namespace cast
588 } // namespace media
OLDNEW
« media/cast/sender/h264_vt_encoder.h ('K') | « media/cast/sender/h264_vt_encoder.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698