OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2014 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "media/cast/sender/h264_vt_encoder.h" | |
6 | |
7 #include <algorithm> | |
8 #include <vector> | |
9 | |
10 #include "base/big_endian.h" | |
11 #include "base/bind.h" | |
12 #include "base/bind_helpers.h" | |
13 #include "base/location.h" | |
14 #include "base/logging.h" | |
15 | |
16 namespace media { | |
17 namespace cast { | |
18 | |
19 namespace { | |
20 | |
21 bool SetSessionProperty(VTSessionRef session, CFStringRef key, uint32_t value) { | |
22 base::ScopedCFTypeRef<CFNumberRef> cfvalue( | |
23 CFNumberCreate(NULL, kCFNumberSInt32Type, &value)); | |
Robert Sesek
2014/08/12 00:06:12
You're converting a uint32 to an int32. That seems
jfroy
2014/08/12 01:04:15
There are no constants for unsigned types in CFNum
Robert Sesek
2014/08/12 18:47:09
Right, so either use kCFNumberSInt64Type or base/n
| |
24 return VTSessionSetProperty(session, key, cfvalue) == noErr; | |
25 } | |
26 | |
27 bool SetSessionProperty(VTSessionRef session, CFStringRef key, bool value) { | |
28 CFBooleanRef cfvalue = (value) ? kCFBooleanTrue : kCFBooleanFalse; | |
29 return VTSessionSetProperty(session, key, cfvalue) == noErr; | |
30 } | |
31 | |
32 bool SetSessionProperty(VTSessionRef session, | |
33 CFStringRef key, | |
34 CFStringRef value) { | |
35 return VTSessionSetProperty(session, key, value) == noErr; | |
36 } | |
37 | |
38 base::ScopedCFTypeRef<CFDictionaryRef> DictionaryWithKeyValue(CFTypeRef key, | |
39 CFTypeRef value) { | |
40 CFTypeRef keys[1] = {key}; | |
41 CFTypeRef values[1] = {value}; | |
42 return base::ScopedCFTypeRef<CFDictionaryRef>( | |
43 CFDictionaryCreate(kCFAllocatorDefault, | |
44 keys, | |
45 values, | |
46 1, | |
47 &kCFTypeDictionaryKeyCallBacks, | |
48 &kCFTypeDictionaryValueCallBacks)); | |
49 } | |
50 | |
51 struct FrameContext { | |
52 base::TimeTicks capture_time; | |
53 media::cast::VideoEncoder::FrameEncodedCallback frame_encoded_callback; | |
54 }; | |
55 | |
56 } // namespace | |
57 | |
58 H264VideoToolboxEncoder::H264VideoToolboxEncoder( | |
59 scoped_refptr<CastEnvironment> cast_environment, | |
60 const VideoSenderConfig& video_config) | |
61 : cast_environment_(cast_environment), | |
62 cast_config_(video_config), | |
63 frame_id_(kStartFrameId), | |
64 last_keyframe_id_(kStartFrameId), | |
65 encode_next_frame_as_keyframe_(false) { | |
66 Initialize(); | |
67 } | |
68 | |
69 H264VideoToolboxEncoder::~H264VideoToolboxEncoder() { | |
70 Teardown(); | |
71 } | |
72 | |
73 CVPixelBufferPoolRef H264VideoToolboxEncoder::cv_pixel_buffer_pool() const { | |
74 DCHECK(thread_checker_.CalledOnValidThread()); | |
75 DCHECK(compression_session_); | |
76 return VTCompressionSessionGetPixelBufferPool(compression_session_); | |
77 } | |
78 | |
79 void H264VideoToolboxEncoder::Initialize() { | |
80 DCHECK(thread_checker_.CalledOnValidThread()); | |
81 DCHECK(!compression_session_); | |
82 | |
83 // Note that the encoder object is given to the compression session as the | |
84 // callback context using a raw pointer. The C API does not allow us to use | |
85 // a smart pointer, nor is this encoder ref counted. However, this is still | |
86 // safe, because we 1) we own the compression session and 2) we tear it down | |
87 // safely. When destructing the encoder, the compression session is flushed | |
88 // and invalidated. Internally, VideoToolbox will join all of its threads | |
89 // before returning to the client. Therefore, when control returns to us, we | |
90 // are guaranteed that the output callback will not execute again. | |
91 | |
92 // On OS X, allow the hardware encoder. Don't require it, it does not support | |
93 // all configurations (some of which are used for testing). | |
94 base::ScopedCFTypeRef<CFDictionaryRef> encoder_spec; | |
95 #if !defined(OS_IOS) | |
96 encoder_spec = DictionaryWithKeyValue( | |
97 kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder, | |
98 kCFBooleanTrue); | |
99 #endif | |
100 | |
101 VTCompressionSessionRef session; | |
102 OSStatus status = | |
103 VTCompressionSessionCreate(kCFAllocatorDefault, | |
104 cast_config_.width, | |
105 cast_config_.height, | |
106 kCMVideoCodecType_H264, | |
107 encoder_spec, | |
108 NULL /* sourceImageBufferAttributes */, | |
109 NULL /* compressedDataAllocator */, | |
110 CompressionCallback, | |
111 reinterpret_cast<void*>(this), | |
112 &session); | |
113 if (status != noErr) { | |
114 DLOG(ERROR) << " VTCompressionSessionCreate failed: " << status; | |
115 return; | |
116 } | |
117 compression_session_.reset(session); | |
118 | |
119 ConfigureSession(); | |
120 } | |
121 | |
122 void H264VideoToolboxEncoder::ConfigureSession() { | |
123 SetSessionProperty(compression_session_, | |
124 kVTCompressionPropertyKey_ProfileLevel, | |
125 kVTProfileLevel_H264_Main_AutoLevel); | |
126 SetSessionProperty( | |
127 compression_session_, kVTCompressionPropertyKey_RealTime, true); | |
128 SetSessionProperty(compression_session_, | |
129 kVTCompressionPropertyKey_AllowFrameReordering, | |
130 false); | |
131 SetSessionProperty(compression_session_, | |
132 kVTCompressionPropertyKey_MaxKeyFrameInterval, | |
133 240u); | |
134 SetSessionProperty(compression_session_, | |
135 kVTCompressionPropertyKey_MaxKeyFrameIntervalDuration, | |
136 240u); | |
137 SetSessionProperty(compression_session_, | |
138 kVTCompressionPropertyKey_AverageBitRate, | |
139 static_cast<uint32_t>(cast_config_.start_bitrate)); | |
140 SetSessionProperty(compression_session_, | |
141 kVTCompressionPropertyKey_ExpectedFrameRate, | |
142 static_cast<uint32_t>(cast_config_.max_frame_rate)); | |
143 SetSessionProperty(compression_session_, | |
144 kVTCompressionPropertyKey_ColorPrimaries, | |
145 kCVImageBufferColorPrimaries_ITU_R_709_2); | |
146 SetSessionProperty(compression_session_, | |
147 kVTCompressionPropertyKey_TransferFunction, | |
148 kCVImageBufferTransferFunction_ITU_R_709_2); | |
149 SetSessionProperty(compression_session_, | |
150 kVTCompressionPropertyKey_YCbCrMatrix, | |
151 kCVImageBufferYCbCrMatrix_ITU_R_709_2); | |
152 } | |
153 | |
154 void H264VideoToolboxEncoder::Teardown() { | |
155 DCHECK(thread_checker_.CalledOnValidThread()); | |
156 | |
157 // If the compression session exists, invalidate it. This blocks until all | |
158 // pending output callbacks have returned and any internal threads have | |
159 // joined, ensuring no output callback ever sees a dangling encoder pointer. | |
160 if (compression_session_) { | |
161 VTCompressionSessionInvalidate(compression_session_); | |
162 compression_session_.reset(); | |
163 } | |
164 } | |
165 | |
166 bool H264VideoToolboxEncoder::EncodeVideoFrame( | |
167 const scoped_refptr<media::VideoFrame>& video_frame, | |
168 const base::TimeTicks& capture_time, | |
169 const FrameEncodedCallback& frame_encoded_callback) { | |
170 DCHECK(thread_checker_.CalledOnValidThread()); | |
171 DCHECK(!capture_time.is_null()); | |
172 | |
173 if (!compression_session_) { | |
174 DLOG(ERROR) << " compression session is null"; | |
175 return false; | |
176 } | |
177 | |
178 base::ScopedCFTypeRef<CVPixelBufferRef> pixel_buffer( | |
179 video_frame->cv_pixel_buffer(), base::scoped_policy::RETAIN); | |
180 if (!pixel_buffer) { | |
181 pixel_buffer = WrapVideoFrame(video_frame); | |
182 if (!pixel_buffer) { | |
183 return false; | |
184 } | |
185 } | |
186 | |
187 CMTime timestamp_cm = | |
188 CMTimeMake(capture_time.ToInternalValue(), USEC_PER_SEC); | |
189 | |
190 scoped_ptr<FrameContext> frame_context(new FrameContext()); | |
191 frame_context->capture_time = capture_time; | |
192 frame_context->frame_encoded_callback = frame_encoded_callback; | |
193 | |
194 base::ScopedCFTypeRef<CFDictionaryRef> frame_props; | |
195 if (encode_next_frame_as_keyframe_) { | |
196 frame_props = DictionaryWithKeyValue(kVTEncodeFrameOptionKey_ForceKeyFrame, | |
197 kCFBooleanTrue); | |
198 encode_next_frame_as_keyframe_ = false; | |
199 } | |
200 | |
201 VTEncodeInfoFlags info; | |
202 OSStatus status = VTCompressionSessionEncodeFrame( | |
203 compression_session_, | |
204 pixel_buffer, | |
205 timestamp_cm, | |
206 kCMTimeInvalid, | |
207 frame_props, | |
208 reinterpret_cast<void*>(frame_context.release()), | |
209 &info); | |
210 if (status != noErr) { | |
211 DLOG(ERROR) << " VTCompressionSessionEncodeFrame failed: " << status; | |
212 return false; | |
213 } | |
214 if ((info & kVTEncodeInfo_FrameDropped)) { | |
215 DLOG(ERROR) << " frame dropped"; | |
216 return false; | |
217 } | |
218 | |
219 return true; | |
220 } | |
221 | |
222 void H264VideoToolboxEncoder::SetBitRate(int new_bit_rate) { | |
223 DCHECK(thread_checker_.CalledOnValidThread()); | |
224 // NOTE: VideoToolbox does not seem to support bitrate reconfiguration. | |
Robert Sesek
2014/08/12 00:06:12
Remove "NOTE:" since this is a comment.
jfroy
2014/08/12 01:04:15
Acknowledged.
| |
225 } | |
226 | |
227 void H264VideoToolboxEncoder::GenerateKeyFrame() { | |
228 DCHECK(thread_checker_.CalledOnValidThread()); | |
229 DCHECK(compression_session_); | |
230 | |
231 encode_next_frame_as_keyframe_ = true; | |
232 } | |
233 | |
234 void H264VideoToolboxEncoder::LatestFrameIdToReference(uint32 /*frame_id*/) { | |
235 // NOTE: Not supported by VideoToolbox in any meaningful manner. | |
Robert Sesek
2014/08/12 00:06:12
Same.
jfroy
2014/08/12 01:04:15
Acknowledged.
| |
236 } | |
237 | |
238 static void VideoFramePixelBufferReleaseCallback(void* frame_ref, | |
239 const void* data, | |
240 size_t size, | |
241 size_t num_planes, | |
242 const void* planes[]) { | |
243 free(const_cast<void*>(data)); | |
244 reinterpret_cast<media::VideoFrame*>(frame_ref)->Release(); | |
245 } | |
246 | |
247 base::ScopedCFTypeRef<CVPixelBufferRef> H264VideoToolboxEncoder::WrapVideoFrame( | |
248 const scoped_refptr<media::VideoFrame>& frame) { | |
249 static const size_t MAX_PLANES = 3; | |
250 | |
251 media::VideoFrame::Format format = frame->format(); | |
252 size_t num_planes = media::VideoFrame::NumPlanes(format); | |
253 DCHECK_LE(num_planes, MAX_PLANES); | |
254 gfx::Size coded_size = frame->coded_size(); | |
255 | |
256 // media::VideoFrame only supports YUV formats, so there is no way to | |
257 // leverage VideoToolbox's ability to convert RGBA formats automatically. In | |
258 // addition, most of the media::VideoFrame formats are YVU, which VT does not | |
259 // support. Finally, media::VideoFrame formats do not carry any information | |
260 // about the color space, transform or any other colorimetric information | |
261 // that is generally needed to fully specify the input data. So essentially | |
262 // require that the input be YCbCr 4:2:0 (either planar or biplanar) and | |
263 // assume the standard video dynamic range for samples (although most modern | |
264 // HDTVs support full-range video these days). | |
265 OSType pixel_format; | |
266 if (format == media::VideoFrame::Format::I420) { | |
267 pixel_format = kCVPixelFormatType_420YpCbCr8Planar; | |
268 } else if (format == media::VideoFrame::Format::NV12) { | |
269 pixel_format = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange; | |
270 } else { | |
271 DLOG(ERROR) << " unsupported frame format: " << format; | |
272 return base::ScopedCFTypeRef<CVPixelBufferRef>(NULL); | |
273 } | |
274 | |
275 // TODO(jfroy): Support extended pixels (i.e. padding). | |
276 if (frame->coded_size() != frame->visible_rect().size()) { | |
277 DLOG(ERROR) << " frame with extended pixels not supported: " | |
278 << " coded_size: " << coded_size.ToString() | |
279 << ", visible_rect: " << frame->visible_rect().ToString(); | |
280 return base::ScopedCFTypeRef<CVPixelBufferRef>(NULL); | |
281 } | |
282 | |
283 void* plane_ptrs[MAX_PLANES]; | |
284 size_t plane_widths[MAX_PLANES]; | |
285 size_t plane_heights[MAX_PLANES]; | |
286 size_t plane_bytes_per_row[MAX_PLANES]; | |
287 for (size_t plane_i = 0; plane_i < num_planes; ++plane_i) { | |
288 plane_ptrs[plane_i] = frame->data(plane_i); | |
289 gfx::Size plane_size = | |
290 media::VideoFrame::PlaneSize(format, plane_i, coded_size); | |
291 plane_widths[plane_i] = plane_size.width(); | |
292 plane_heights[plane_i] = plane_size.height(); | |
293 plane_bytes_per_row[plane_i] = frame->stride(plane_i); | |
294 } | |
295 | |
296 // CVPixelBufferCreateWithPlanarBytes needs a dummy plane descriptor or the | |
297 // release callback will not execute. The descriptor is freed in the callback. | |
298 void* descriptor = | |
299 calloc(1, | |
300 std::max(sizeof(CVPlanarPixelBufferInfo_YCbCrPlanar), | |
301 sizeof(CVPlanarPixelBufferInfo_YCbCrBiPlanar))); | |
302 | |
303 // Wrap the frame's data in a CVPixelBuffer. Because this is a C API, we can't | |
304 // give it a smart pointer to the frame, so instead pass a raw pointer and | |
305 // increment the frame's reference count manually. | |
306 CVPixelBufferRef pixel_buffer; | |
307 CVReturn result = | |
308 CVPixelBufferCreateWithPlanarBytes(kCFAllocatorDefault, | |
309 coded_size.width(), | |
310 coded_size.height(), | |
311 format, | |
312 descriptor, | |
313 0, | |
314 num_planes, | |
315 plane_ptrs, | |
316 plane_widths, | |
317 plane_heights, | |
318 plane_bytes_per_row, | |
319 VideoFramePixelBufferReleaseCallback, | |
320 frame.get(), | |
321 NULL, | |
322 &pixel_buffer); | |
323 if (result != kCVReturnSuccess) { | |
324 DLOG(ERROR) << " CVPixelBufferCreateWithPlanarBytes failed: " << result; | |
325 return base::ScopedCFTypeRef<CVPixelBufferRef>(NULL); | |
326 } | |
327 | |
328 // The CVPixelBuffer now references the data of the frame, so increment its | |
329 // reference count manually. The release callback set on the pixel buffer will | |
330 // release the frame. | |
331 frame->AddRef(); | |
332 | |
333 return base::ScopedCFTypeRef<CVPixelBufferRef>(pixel_buffer); | |
334 } | |
335 | |
336 void H264VideoToolboxEncoder::CompressionCallback(void* encoder_opaque, | |
337 void* frame_opaque, | |
338 OSStatus status, | |
339 VTEncodeInfoFlags info, | |
340 CMSampleBufferRef sbuf) { | |
341 H264VideoToolboxEncoder* encoder = | |
342 reinterpret_cast<H264VideoToolboxEncoder*>(encoder_opaque); | |
343 scoped_ptr<FrameContext> frame_context( | |
344 reinterpret_cast<FrameContext*>(frame_opaque)); | |
345 | |
346 if (status != noErr) { | |
347 DLOG(ERROR) << " encoding failed: " << status; | |
348 return; | |
349 } | |
350 if ((info & kVTEncodeInfo_FrameDropped)) { | |
351 DVLOG(2) << " frame dropped"; | |
352 return; | |
353 } | |
354 DCHECK_EQ(CMSampleBufferGetNumSamples(sbuf), 1); | |
355 | |
356 CFDictionaryRef sample_attachments = | |
357 static_cast<CFDictionaryRef>(CFArrayGetValueAtIndex( | |
358 CMSampleBufferGetSampleAttachmentsArray(sbuf, true), 0)); | |
359 | |
360 // If the NotSync key is not present, it implies Sync, which indicates a | |
361 // keyframe (at least I think, VT documentation is, erm, sparse). Could | |
362 // alternatively use kCMSampleAttachmentKey_DependsOnOthers == false. | |
363 bool keyframe = | |
364 CFDictionaryContainsKey(sample_attachments, | |
365 kCMSampleAttachmentKey_NotSync) == false; | |
366 | |
367 // Generate a frame id and update the last keyframe id if needed. | |
368 // NOTE: VideoToolbox calls the output callback serially, so this is safe. | |
369 uint32 frame_id = ++encoder->frame_id_; | |
370 if (keyframe) { | |
371 encoder->last_keyframe_id_ = frame_id; | |
372 } | |
373 | |
374 CMSampleTimingInfo timing_info; | |
375 CMSampleBufferGetSampleTimingInfo(sbuf, 0, &timing_info); | |
376 | |
377 scoped_ptr<EncodedFrame> encoded_frame(new EncodedFrame()); | |
378 encoded_frame->frame_id = frame_id; | |
379 encoded_frame->reference_time = frame_context->capture_time; | |
380 encoded_frame->rtp_timestamp = | |
381 GetVideoRtpTimestamp(frame_context->capture_time); | |
382 if (keyframe) { | |
383 encoded_frame->dependency = EncodedFrame::KEY; | |
384 encoded_frame->referenced_frame_id = frame_id; | |
385 } else { | |
386 encoded_frame->dependency = EncodedFrame::DEPENDENT; | |
387 // NOTE: Technically wrong, but without parsing the NALs our best guess is | |
388 // the last keyframe. | |
389 encoded_frame->referenced_frame_id = encoder->last_keyframe_id_; | |
390 } | |
391 | |
392 CopySampleBufferToAnnexBBuffer(sbuf, &encoded_frame->data, keyframe); | |
393 | |
394 encoder->cast_environment_->PostTask( | |
395 CastEnvironment::MAIN, | |
396 FROM_HERE, | |
397 base::Bind(frame_context->frame_encoded_callback, | |
398 base::Passed(&encoded_frame))); | |
399 } | |
400 | |
401 template <typename NalSizeType> | |
402 static void CopyNalsToAnnexB(char* avcc_buffer, | |
403 const size_t avcc_size, | |
404 std::string* annexb_buffer) { | |
405 COMPILE_ASSERT(sizeof(NalSizeType) == 1 || sizeof(NalSizeType) == 2 || | |
406 sizeof(NalSizeType) == 4, | |
407 "NAL size type has unsupported size"); | |
408 static const char startcode_3[3] = {0, 0, 1}; | |
409 DCHECK(avcc_buffer); | |
410 DCHECK(annexb_buffer); | |
411 size_t bytes_left = avcc_size; | |
412 while (bytes_left > 0) { | |
413 DCHECK_GT(bytes_left, sizeof(NalSizeType)); | |
414 NalSizeType nal_size; | |
415 base::ReadBigEndian(avcc_buffer, &nal_size); | |
416 bytes_left -= sizeof(NalSizeType); | |
417 avcc_buffer += sizeof(NalSizeType); | |
418 | |
419 DCHECK_GE(bytes_left, nal_size); | |
420 annexb_buffer->append(startcode_3, sizeof(startcode_3)); | |
421 annexb_buffer->append(avcc_buffer, nal_size); | |
422 bytes_left -= nal_size; | |
423 avcc_buffer += nal_size; | |
424 } | |
425 } | |
426 | |
427 void H264VideoToolboxEncoder::CopySampleBufferToAnnexBBuffer( | |
428 CMSampleBufferRef sbuf, | |
429 std::string* annexb_buffer, | |
430 bool keyframe) { | |
431 // Perform two pass, one to figure out the total output size, and another to | |
432 // copy the data after having performed a single output allocation. Note that | |
433 // we'll allocate a bit more because we'll count 4 bytes instead of 3 for | |
434 // video NALs. | |
435 | |
436 // TODO(jfroy): There is a bug in | |
437 // CMVideoFormatDescriptionGetH264ParameterSetAtIndex, iterate until fail. | |
438 | |
439 OSStatus status; | |
440 | |
441 // Get the sample buffer's block buffer and format description. | |
442 CMBlockBufferRef bb = CMSampleBufferGetDataBuffer(sbuf); | |
443 DCHECK(bb); | |
444 CMFormatDescriptionRef fdesc = CMSampleBufferGetFormatDescription(sbuf); | |
445 DCHECK(fdesc); | |
446 | |
447 size_t bb_size = CMBlockBufferGetDataLength(bb); | |
448 size_t total_bytes = bb_size; | |
449 | |
450 size_t pset_count; | |
451 int nal_size_field_bytes; | |
452 status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex( | |
453 fdesc, 0, NULL, NULL, &pset_count, &nal_size_field_bytes); | |
454 if (status == kCMFormatDescriptionBridgeError_InvalidParameter) { | |
455 DLOG(WARNING) << " assuming 2 parameter sets and 4 bytes NAL length header"; | |
456 pset_count = 2; | |
457 nal_size_field_bytes = 4; | |
458 } else if (status != noErr) { | |
459 DLOG(ERROR) | |
460 << " CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed: " | |
461 << status; | |
462 return; | |
463 } | |
464 | |
465 if (keyframe) { | |
466 const uint8_t* pset; | |
467 size_t pset_size; | |
468 for (size_t pset_i = 0; pset_i < pset_count; ++pset_i) { | |
469 status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex( | |
470 fdesc, pset_i, &pset, &pset_size, NULL, NULL); | |
471 if (status != noErr) { | |
472 DLOG(ERROR) | |
473 << " CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed: " | |
474 << status; | |
475 return; | |
476 } | |
477 total_bytes += pset_size + nal_size_field_bytes; | |
478 } | |
479 } | |
480 | |
481 annexb_buffer->reserve(total_bytes); | |
482 | |
483 // Copy all parameter sets before keyframes. | |
484 if (keyframe) { | |
485 const uint8_t* pset; | |
486 size_t pset_size; | |
487 for (size_t pset_i = 0; pset_i < pset_count; ++pset_i) { | |
488 status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex( | |
489 fdesc, pset_i, &pset, &pset_size, NULL, NULL); | |
490 if (status != noErr) { | |
491 DLOG(ERROR) | |
492 << " CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed: " | |
493 << status; | |
494 return; | |
495 } | |
496 static const char startcode_4[4] = {0, 0, 0, 1}; | |
497 annexb_buffer->append(startcode_4, sizeof(startcode_4)); | |
498 annexb_buffer->append(reinterpret_cast<const char*>(pset), pset_size); | |
499 } | |
500 } | |
501 | |
502 // Block buffers can be composed of non-contiguous chunks. For the sake of | |
503 // keeping this code simple, flatten non-contiguous block buffers. | |
504 base::ScopedCFTypeRef<CMBlockBufferRef> contiguous_bb( | |
505 bb, base::scoped_policy::RETAIN); | |
506 if (!CMBlockBufferIsRangeContiguous(bb, 0, 0)) { | |
507 contiguous_bb.reset(); | |
508 status = CMBlockBufferCreateContiguous(kCFAllocatorDefault, | |
509 bb, | |
510 kCFAllocatorDefault, | |
511 NULL, | |
512 0, | |
513 0, | |
514 0, | |
515 contiguous_bb.InitializeInto()); | |
516 if (status != noErr) { | |
517 DLOG(ERROR) << " CMBlockBufferCreateContiguous failed: " << status; | |
518 return; | |
519 } | |
520 } | |
521 | |
522 // Copy all the NAL units. In the process convert them from AVCC format | |
523 // (length header) to AnnexB format (start code). | |
524 char* bb_data; | |
525 status = CMBlockBufferGetDataPointer(contiguous_bb, 0, NULL, NULL, &bb_data); | |
526 if (status != noErr) { | |
527 DLOG(ERROR) << " CMBlockBufferGetDataPointer failed: " << status; | |
528 return; | |
529 } | |
530 | |
531 if (nal_size_field_bytes == 1) { | |
532 CopyNalsToAnnexB<uint8_t>(bb_data, bb_size, annexb_buffer); | |
533 } else if (nal_size_field_bytes == 2) { | |
534 CopyNalsToAnnexB<uint16_t>(bb_data, bb_size, annexb_buffer); | |
535 } else if (nal_size_field_bytes == 4) { | |
536 CopyNalsToAnnexB<uint32_t>(bb_data, bb_size, annexb_buffer); | |
537 } | |
538 } | |
539 | |
540 } // namespace cast | |
541 } // namespace media | |
OLD | NEW |