Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(49)

Side by Side Diff: media/cast/sender/h264_vt_encoder.cc

Issue 450693006: VideoToolbox encoder for cast senders. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Edit comments per review feedback and for clarity. Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "media/cast/sender/h264_vt_encoder.h"
6
7 #include <algorithm>
8 #include <vector>
miu 2014/08/25 19:21:17 Looks like std::vector isn't being used anywhere.
jfroy 2014/08/25 20:59:13 Done.
9
10 #include "base/big_endian.h"
11 #include "base/bind.h"
12 #include "base/bind_helpers.h"
13 #include "base/location.h"
14 #include "base/logging.h"
15
16 namespace media {
17 namespace cast {
18
19 namespace {
20
21 bool SetSessionProperty(VTSessionRef session, CFStringRef key, uint32_t value) {
22 base::ScopedCFTypeRef<CFNumberRef> cfvalue(
23 CFNumberCreate(NULL, kCFNumberSInt32Type, &value));
24 return VTSessionSetProperty(session, key, cfvalue) == noErr;
25 }
26
27 bool SetSessionProperty(VTSessionRef session, CFStringRef key, bool value) {
28 CFBooleanRef cfvalue = (value) ? kCFBooleanTrue : kCFBooleanFalse;
29 return VTSessionSetProperty(session, key, cfvalue) == noErr;
30 }
31
32 bool SetSessionProperty(VTSessionRef session,
33 CFStringRef key,
34 CFStringRef value) {
35 return VTSessionSetProperty(session, key, value) == noErr;
36 }
37
38 base::ScopedCFTypeRef<CFDictionaryRef> DictionaryWithKeyValue(CFTypeRef key,
39 CFTypeRef value) {
40 CFTypeRef keys[1] = {key};
41 CFTypeRef values[1] = {value};
42 return base::ScopedCFTypeRef<CFDictionaryRef>(
43 CFDictionaryCreate(kCFAllocatorDefault,
44 keys,
45 values,
46 1,
47 &kCFTypeDictionaryKeyCallBacks,
48 &kCFTypeDictionaryValueCallBacks));
49 }
50
51 struct FrameContext {
52 base::TimeTicks capture_time;
53 media::cast::VideoEncoder::FrameEncodedCallback frame_encoded_callback;
54 };
55
56 } // namespace
57
58 H264VideoToolboxEncoder::H264VideoToolboxEncoder(
59 scoped_refptr<CastEnvironment> cast_environment,
60 const VideoSenderConfig& video_config)
61 : cast_environment_(cast_environment),
62 cast_config_(video_config),
63 frame_id_(kStartFrameId),
64 last_keyframe_id_(kStartFrameId),
65 encode_next_frame_as_keyframe_(false) {
66 Initialize();
67 }
68
69 H264VideoToolboxEncoder::~H264VideoToolboxEncoder() {
70 Teardown();
71 }
72
73 CVPixelBufferPoolRef H264VideoToolboxEncoder::cv_pixel_buffer_pool() const {
74 DCHECK(thread_checker_.CalledOnValidThread());
75 DCHECK(compression_session_);
76 return VTCompressionSessionGetPixelBufferPool(compression_session_);
77 }
78
79 void H264VideoToolboxEncoder::Initialize() {
80 DCHECK(thread_checker_.CalledOnValidThread());
81 DCHECK(!compression_session_);
82
83 // Note that the encoder object is given to the compression session as the
84 // callback context using a raw pointer. The C API does not allow us to use
85 // a smart pointer, nor is this encoder ref counted. However, this is still
86 // safe, because we 1) we own the compression session and 2) we tear it down
87 // safely. When destructing the encoder, the compression session is flushed
88 // and invalidated. Internally, VideoToolbox will join all of its threads
89 // before returning to the client. Therefore, when control returns to us, we
90 // are guaranteed that the output callback will not execute again.
91
92 // On OS X, allow the hardware encoder. Don't require it, it does not support
93 // all configurations (some of which are used for testing).
94 base::ScopedCFTypeRef<CFDictionaryRef> encoder_spec;
95 #if !defined(OS_IOS)
96 encoder_spec = DictionaryWithKeyValue(
97 kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
98 kCFBooleanTrue);
99 #endif
100
101 VTCompressionSessionRef session;
102 OSStatus status =
103 VTCompressionSessionCreate(kCFAllocatorDefault,
104 cast_config_.width,
105 cast_config_.height,
106 kCMVideoCodecType_H264,
107 encoder_spec,
108 NULL /* sourceImageBufferAttributes */,
109 NULL /* compressedDataAllocator */,
110 CompressionCallback,
111 reinterpret_cast<void*>(this),
112 &session);
113 if (status != noErr) {
114 DLOG(ERROR) << " VTCompressionSessionCreate failed: " << status;
115 return;
116 }
117 compression_session_.reset(session);
118
119 ConfigureSession();
120 }
121
122 void H264VideoToolboxEncoder::ConfigureSession() {
123 SetSessionProperty(compression_session_,
124 kVTCompressionPropertyKey_ProfileLevel,
125 kVTProfileLevel_H264_Main_AutoLevel);
126 SetSessionProperty(
127 compression_session_, kVTCompressionPropertyKey_RealTime, true);
128 SetSessionProperty(compression_session_,
129 kVTCompressionPropertyKey_AllowFrameReordering,
130 false);
131 SetSessionProperty(compression_session_,
132 kVTCompressionPropertyKey_MaxKeyFrameInterval,
133 240u);
134 SetSessionProperty(compression_session_,
135 kVTCompressionPropertyKey_MaxKeyFrameIntervalDuration,
136 240u);
137 SetSessionProperty(compression_session_,
138 kVTCompressionPropertyKey_AverageBitRate,
139 static_cast<uint32_t>(cast_config_.start_bitrate));
140 SetSessionProperty(compression_session_,
141 kVTCompressionPropertyKey_ExpectedFrameRate,
142 static_cast<uint32_t>(cast_config_.max_frame_rate));
143 SetSessionProperty(compression_session_,
144 kVTCompressionPropertyKey_ColorPrimaries,
145 kCVImageBufferColorPrimaries_ITU_R_709_2);
146 SetSessionProperty(compression_session_,
147 kVTCompressionPropertyKey_TransferFunction,
148 kCVImageBufferTransferFunction_ITU_R_709_2);
149 SetSessionProperty(compression_session_,
150 kVTCompressionPropertyKey_YCbCrMatrix,
151 kCVImageBufferYCbCrMatrix_ITU_R_709_2);
152 }
153
154 void H264VideoToolboxEncoder::Teardown() {
155 DCHECK(thread_checker_.CalledOnValidThread());
156
157 // If the compression session exists, invalidate it. This blocks until all
158 // pending output callbacks have returned and any internal threads have
159 // joined, ensuring no output callback ever sees a dangling encoder pointer.
160 if (compression_session_) {
161 VTCompressionSessionInvalidate(compression_session_);
162 compression_session_.reset();
163 }
164 }
165
166 bool H264VideoToolboxEncoder::EncodeVideoFrame(
167 const scoped_refptr<media::VideoFrame>& video_frame,
168 const base::TimeTicks& capture_time,
169 const FrameEncodedCallback& frame_encoded_callback) {
170 DCHECK(thread_checker_.CalledOnValidThread());
171 DCHECK(!capture_time.is_null());
172
173 if (!compression_session_) {
174 DLOG(ERROR) << " compression session is null";
175 return false;
176 }
177
178 base::ScopedCFTypeRef<CVPixelBufferRef> pixel_buffer(
179 video_frame->cv_pixel_buffer(), base::scoped_policy::RETAIN);
180 if (!pixel_buffer) {
181 pixel_buffer = WrapVideoFrame(video_frame);
miu 2014/08/25 22:22:09 Just to retain the findings of our discussion (wha
jfroy 2014/08/25 22:30:38 Done.
182 if (!pixel_buffer) {
183 return false;
184 }
185 }
186
187 CMTime timestamp_cm =
188 CMTimeMake(capture_time.ToInternalValue(), USEC_PER_SEC);
189
190 scoped_ptr<FrameContext> frame_context(new FrameContext());
191 frame_context->capture_time = capture_time;
192 frame_context->frame_encoded_callback = frame_encoded_callback;
193
194 base::ScopedCFTypeRef<CFDictionaryRef> frame_props;
195 if (encode_next_frame_as_keyframe_) {
196 frame_props = DictionaryWithKeyValue(kVTEncodeFrameOptionKey_ForceKeyFrame,
197 kCFBooleanTrue);
198 encode_next_frame_as_keyframe_ = false;
199 }
200
201 VTEncodeInfoFlags info;
202 OSStatus status = VTCompressionSessionEncodeFrame(
203 compression_session_,
204 pixel_buffer,
205 timestamp_cm,
206 kCMTimeInvalid,
207 frame_props,
208 reinterpret_cast<void*>(frame_context.release()),
209 &info);
210 if (status != noErr) {
211 DLOG(ERROR) << " VTCompressionSessionEncodeFrame failed: " << status;
212 return false;
213 }
214 if ((info & kVTEncodeInfo_FrameDropped)) {
215 DLOG(ERROR) << " frame dropped";
216 return false;
217 }
218
219 return true;
220 }
221
222 void H264VideoToolboxEncoder::SetBitRate(int new_bit_rate) {
223 DCHECK(thread_checker_.CalledOnValidThread());
224 // VideoToolbox does not seem to support bitrate reconfiguration.
225 }
226
227 void H264VideoToolboxEncoder::GenerateKeyFrame() {
228 DCHECK(thread_checker_.CalledOnValidThread());
229 DCHECK(compression_session_);
230
231 encode_next_frame_as_keyframe_ = true;
232 }
233
234 void H264VideoToolboxEncoder::LatestFrameIdToReference(uint32 /*frame_id*/) {
235 // Not supported by VideoToolbox in any meaningful manner.
236 }
237
238 static void VideoFramePixelBufferReleaseCallback(void* frame_ref,
239 const void* data,
240 size_t size,
241 size_t num_planes,
242 const void* planes[]) {
243 free(const_cast<void*>(data));
244 reinterpret_cast<media::VideoFrame*>(frame_ref)->Release();
245 }
246
247 base::ScopedCFTypeRef<CVPixelBufferRef> H264VideoToolboxEncoder::WrapVideoFrame(
miu 2014/08/25 19:21:17 IMHO, it would be cleaner for the CVPixelBuffer to
jfroy 2014/08/25 20:59:13 I really don't want to burden VideoFrame with more
miu 2014/08/25 21:47:40 Okay. I'm fine with this scheme. But, I don't th
jfroy 2014/08/25 21:57:56 This is essentially a fallback function. It is exp
miu 2014/08/25 22:22:09 I understand now. Thanks for being patient.
jfroy 2014/08/25 22:30:38 No problem :)
248 const scoped_refptr<media::VideoFrame>& frame) {
249 static const size_t MAX_PLANES = 3;
250
251 media::VideoFrame::Format format = frame->format();
252 size_t num_planes = media::VideoFrame::NumPlanes(format);
253 DCHECK_LE(num_planes, MAX_PLANES);
254 gfx::Size coded_size = frame->coded_size();
255
256 // media::VideoFrame only supports YUV formats, so there is no way to
257 // leverage VideoToolbox's ability to convert RGBA formats automatically. In
258 // addition, most of the media::VideoFrame formats are YVU, which VT does not
259 // support. Finally, media::VideoFrame formats do not carry any information
260 // about the color space, transform or any other colorimetric information
261 // that is generally needed to fully specify the input data. So essentially
262 // require that the input be YCbCr 4:2:0 (either planar or biplanar) and
263 // assume the standard video dynamic range for samples (although most modern
264 // HDTVs support full-range video these days).
265 OSType pixel_format;
266 if (format == media::VideoFrame::Format::I420) {
267 pixel_format = kCVPixelFormatType_420YpCbCr8Planar;
268 } else if (format == media::VideoFrame::Format::NV12) {
269 pixel_format = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
270 } else {
271 DLOG(ERROR) << " unsupported frame format: " << format;
272 return base::ScopedCFTypeRef<CVPixelBufferRef>(NULL);
273 }
274
275 // TODO(jfroy): Support extended pixels (i.e. padding).
276 if (frame->coded_size() != frame->visible_rect().size()) {
277 DLOG(ERROR) << " frame with extended pixels not supported: "
278 << " coded_size: " << coded_size.ToString()
279 << ", visible_rect: " << frame->visible_rect().ToString();
280 return base::ScopedCFTypeRef<CVPixelBufferRef>(NULL);
281 }
282
283 void* plane_ptrs[MAX_PLANES];
284 size_t plane_widths[MAX_PLANES];
285 size_t plane_heights[MAX_PLANES];
286 size_t plane_bytes_per_row[MAX_PLANES];
287 for (size_t plane_i = 0; plane_i < num_planes; ++plane_i) {
288 plane_ptrs[plane_i] = frame->data(plane_i);
289 gfx::Size plane_size =
290 media::VideoFrame::PlaneSize(format, plane_i, coded_size);
291 plane_widths[plane_i] = plane_size.width();
292 plane_heights[plane_i] = plane_size.height();
293 plane_bytes_per_row[plane_i] = frame->stride(plane_i);
294 }
295
296 // CVPixelBufferCreateWithPlanarBytes needs a dummy plane descriptor or the
297 // release callback will not execute. The descriptor is freed in the callback.
298 void* descriptor =
299 calloc(1,
300 std::max(sizeof(CVPlanarPixelBufferInfo_YCbCrPlanar),
301 sizeof(CVPlanarPixelBufferInfo_YCbCrBiPlanar)));
302
303 // Wrap the frame's data in a CVPixelBuffer. Because this is a C API, we can't
304 // give it a smart pointer to the frame, so instead pass a raw pointer and
305 // increment the frame's reference count manually.
306 CVPixelBufferRef pixel_buffer;
307 CVReturn result =
308 CVPixelBufferCreateWithPlanarBytes(kCFAllocatorDefault,
309 coded_size.width(),
310 coded_size.height(),
311 format,
312 descriptor,
313 0,
314 num_planes,
315 plane_ptrs,
316 plane_widths,
317 plane_heights,
318 plane_bytes_per_row,
319 VideoFramePixelBufferReleaseCallback,
320 frame.get(),
321 NULL,
322 &pixel_buffer);
323 if (result != kCVReturnSuccess) {
324 DLOG(ERROR) << " CVPixelBufferCreateWithPlanarBytes failed: " << result;
325 return base::ScopedCFTypeRef<CVPixelBufferRef>(NULL);
326 }
327
328 // The CVPixelBuffer now references the data of the frame, so increment its
329 // reference count manually. The release callback set on the pixel buffer will
330 // release the frame.
331 frame->AddRef();
332
333 return base::ScopedCFTypeRef<CVPixelBufferRef>(pixel_buffer);
334 }
335
336 void H264VideoToolboxEncoder::CompressionCallback(void* encoder_opaque,
337 void* frame_opaque,
338 OSStatus status,
339 VTEncodeInfoFlags info,
340 CMSampleBufferRef sbuf) {
341 H264VideoToolboxEncoder* encoder =
342 reinterpret_cast<H264VideoToolboxEncoder*>(encoder_opaque);
343 scoped_ptr<FrameContext> frame_context(
344 reinterpret_cast<FrameContext*>(frame_opaque));
345
346 if (status != noErr) {
347 DLOG(ERROR) << " encoding failed: " << status;
348 return;
349 }
350 if ((info & kVTEncodeInfo_FrameDropped)) {
351 DVLOG(2) << " frame dropped";
352 return;
353 }
354 DCHECK_EQ(CMSampleBufferGetNumSamples(sbuf), 1);
355
356 CFDictionaryRef sample_attachments =
357 static_cast<CFDictionaryRef>(CFArrayGetValueAtIndex(
358 CMSampleBufferGetSampleAttachmentsArray(sbuf, true), 0));
359
360 // If the NotSync key is not present, it implies Sync, which indicates a
361 // keyframe (at least I think, VT documentation is, erm, sparse). Could
362 // alternatively use kCMSampleAttachmentKey_DependsOnOthers == false.
363 bool keyframe =
364 CFDictionaryContainsKey(sample_attachments,
365 kCMSampleAttachmentKey_NotSync) == false;
366
367 // Generate a frame id and update the last keyframe id if needed. VideoToolbox
368 // calls the output callback serially, so this is safe.
369 uint32 frame_id = ++encoder->frame_id_;
370 if (keyframe) {
371 encoder->last_keyframe_id_ = frame_id;
372 }
373
374 CMSampleTimingInfo timing_info;
375 CMSampleBufferGetSampleTimingInfo(sbuf, 0, &timing_info);
376
377 scoped_ptr<EncodedFrame> encoded_frame(new EncodedFrame());
378 encoded_frame->frame_id = frame_id;
379 encoded_frame->reference_time = frame_context->capture_time;
380 encoded_frame->rtp_timestamp =
381 GetVideoRtpTimestamp(frame_context->capture_time);
382 if (keyframe) {
383 encoded_frame->dependency = EncodedFrame::KEY;
384 encoded_frame->referenced_frame_id = frame_id;
385 } else {
386 encoded_frame->dependency = EncodedFrame::DEPENDENT;
387 // H.264 supports complex frame reference schmes (multiple reference frames,
388 // slice references, backward and forward references, etc). This encoder
389 // compromises by using the last keyframe as the reference frame. This will
390 // force retransmission of keyframes, which are necessary for decoding of
391 // any following frames since parameter sets are attached to them, while
392 // allowing other frames to be dropped (which may force the receiver to drop
393 // frames at decode time). Keyframes are emitted at a regular interval, so
394 // this should only cause temporary frame drops.
395 encoded_frame->referenced_frame_id = encoder->last_keyframe_id_;
miu 2014/08/25 19:21:17 If this is true, the encoder might not be very spa
jfroy 2014/08/25 20:59:13 In my experimentation (and there is no documentati
miu 2014/08/25 21:47:40 Can it be determined from the encoded data? (read
jfroy 2014/08/25 21:57:56 I'd rather not have to decode the video layer data
396 }
397
398 CopySampleBufferToAnnexBBuffer(sbuf, &encoded_frame->data, keyframe);
399
400 encoder->cast_environment_->PostTask(
401 CastEnvironment::MAIN,
402 FROM_HERE,
403 base::Bind(frame_context->frame_encoded_callback,
404 base::Passed(&encoded_frame)));
405 }
406
407 template <typename NalSizeType>
408 static void CopyNalsToAnnexB(char* avcc_buffer,
409 const size_t avcc_size,
410 std::string* annexb_buffer) {
411 COMPILE_ASSERT(sizeof(NalSizeType) == 1 || sizeof(NalSizeType) == 2 ||
412 sizeof(NalSizeType) == 4,
413 "NAL size type has unsupported size");
414 static const char startcode_3[3] = {0, 0, 1};
415 DCHECK(avcc_buffer);
416 DCHECK(annexb_buffer);
417 size_t bytes_left = avcc_size;
418 while (bytes_left > 0) {
419 DCHECK_GT(bytes_left, sizeof(NalSizeType));
420 NalSizeType nal_size;
421 base::ReadBigEndian(avcc_buffer, &nal_size);
422 bytes_left -= sizeof(NalSizeType);
423 avcc_buffer += sizeof(NalSizeType);
424
425 DCHECK_GE(bytes_left, nal_size);
426 annexb_buffer->append(startcode_3, sizeof(startcode_3));
427 annexb_buffer->append(avcc_buffer, nal_size);
428 bytes_left -= nal_size;
429 avcc_buffer += nal_size;
430 }
431 }
432
433 void H264VideoToolboxEncoder::CopySampleBufferToAnnexBBuffer(
434 CMSampleBufferRef sbuf,
435 std::string* annexb_buffer,
436 bool keyframe) {
437 // Perform two pass, one to figure out the total output size, and another to
438 // copy the data after having performed a single output allocation. Note that
439 // we'll allocate a bit more because we'll count 4 bytes instead of 3 for
440 // video NALs.
441
442 // TODO(jfroy): There is a bug in
443 // CMVideoFormatDescriptionGetH264ParameterSetAtIndex, iterate until fail.
444
445 OSStatus status;
446
447 // Get the sample buffer's block buffer and format description.
448 CMBlockBufferRef bb = CMSampleBufferGetDataBuffer(sbuf);
449 DCHECK(bb);
450 CMFormatDescriptionRef fdesc = CMSampleBufferGetFormatDescription(sbuf);
451 DCHECK(fdesc);
452
453 size_t bb_size = CMBlockBufferGetDataLength(bb);
454 size_t total_bytes = bb_size;
455
456 size_t pset_count;
457 int nal_size_field_bytes;
458 status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(
459 fdesc, 0, NULL, NULL, &pset_count, &nal_size_field_bytes);
460 if (status == kCMFormatDescriptionBridgeError_InvalidParameter) {
461 DLOG(WARNING) << " assuming 2 parameter sets and 4 bytes NAL length header";
462 pset_count = 2;
463 nal_size_field_bytes = 4;
464 } else if (status != noErr) {
465 DLOG(ERROR)
466 << " CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed: "
467 << status;
468 return;
469 }
470
471 if (keyframe) {
472 const uint8_t* pset;
473 size_t pset_size;
474 for (size_t pset_i = 0; pset_i < pset_count; ++pset_i) {
475 status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(
476 fdesc, pset_i, &pset, &pset_size, NULL, NULL);
477 if (status != noErr) {
478 DLOG(ERROR)
479 << " CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed: "
480 << status;
481 return;
482 }
483 total_bytes += pset_size + nal_size_field_bytes;
484 }
485 }
486
487 annexb_buffer->reserve(total_bytes);
488
489 // Copy all parameter sets before keyframes.
490 if (keyframe) {
491 const uint8_t* pset;
492 size_t pset_size;
493 for (size_t pset_i = 0; pset_i < pset_count; ++pset_i) {
494 status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(
495 fdesc, pset_i, &pset, &pset_size, NULL, NULL);
496 if (status != noErr) {
497 DLOG(ERROR)
498 << " CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed: "
499 << status;
500 return;
501 }
502 static const char startcode_4[4] = {0, 0, 0, 1};
503 annexb_buffer->append(startcode_4, sizeof(startcode_4));
504 annexb_buffer->append(reinterpret_cast<const char*>(pset), pset_size);
505 }
506 }
507
508 // Block buffers can be composed of non-contiguous chunks. For the sake of
509 // keeping this code simple, flatten non-contiguous block buffers.
510 base::ScopedCFTypeRef<CMBlockBufferRef> contiguous_bb(
511 bb, base::scoped_policy::RETAIN);
512 if (!CMBlockBufferIsRangeContiguous(bb, 0, 0)) {
513 contiguous_bb.reset();
514 status = CMBlockBufferCreateContiguous(kCFAllocatorDefault,
515 bb,
516 kCFAllocatorDefault,
517 NULL,
518 0,
519 0,
520 0,
521 contiguous_bb.InitializeInto());
522 if (status != noErr) {
523 DLOG(ERROR) << " CMBlockBufferCreateContiguous failed: " << status;
524 return;
525 }
526 }
527
528 // Copy all the NAL units. In the process convert them from AVCC format
529 // (length header) to AnnexB format (start code).
530 char* bb_data;
531 status = CMBlockBufferGetDataPointer(contiguous_bb, 0, NULL, NULL, &bb_data);
532 if (status != noErr) {
533 DLOG(ERROR) << " CMBlockBufferGetDataPointer failed: " << status;
534 return;
535 }
536
537 if (nal_size_field_bytes == 1) {
538 CopyNalsToAnnexB<uint8_t>(bb_data, bb_size, annexb_buffer);
539 } else if (nal_size_field_bytes == 2) {
540 CopyNalsToAnnexB<uint16_t>(bb_data, bb_size, annexb_buffer);
541 } else if (nal_size_field_bytes == 4) {
542 CopyNalsToAnnexB<uint32_t>(bb_data, bb_size, annexb_buffer);
543 }
544 }
545
546 } // namespace cast
547 } // namespace media
OLDNEW
« media/cast/sender/h264_vt_encoder.h ('K') | « media/cast/sender/h264_vt_encoder.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698