OLD | NEW |
---|---|
1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/cast/sender/h264_vt_encoder.h" | 5 #include "media/cast/sender/h264_vt_encoder.h" |
6 | 6 |
7 #include <string> | 7 #include <string> |
8 #include <vector> | 8 #include <vector> |
9 | 9 |
10 #include "base/big_endian.h" | 10 #include "base/big_endian.h" |
11 #include "base/bind.h" | 11 #include "base/bind.h" |
12 #include "base/bind_helpers.h" | 12 #include "base/bind_helpers.h" |
13 #include "base/location.h" | 13 #include "base/location.h" |
14 #include "base/logging.h" | 14 #include "base/logging.h" |
15 #include "base/macros.h" | 15 #include "base/macros.h" |
16 #include "base/synchronization/lock.h" | |
16 #include "media/base/mac/corevideo_glue.h" | 17 #include "media/base/mac/corevideo_glue.h" |
17 #include "media/base/mac/video_frame_mac.h" | 18 #include "media/base/mac/video_frame_mac.h" |
18 #include "media/cast/sender/video_frame_factory.h" | 19 #include "media/cast/sender/video_frame_factory.h" |
19 | 20 |
20 namespace media { | 21 namespace media { |
21 namespace cast { | 22 namespace cast { |
22 | 23 |
23 namespace { | 24 namespace { |
24 | 25 |
25 // Container for the associated data of a video frame being processed. | 26 // Container for the associated data of a video frame being processed. |
26 struct InProgressFrameEncode { | 27 struct InProgressFrameEncode { |
27 const RtpTimestamp rtp_timestamp; | 28 const RtpTimestamp rtp_timestamp; |
28 const base::TimeTicks reference_time; | 29 const base::TimeTicks reference_time; |
29 const VideoEncoder::FrameEncodedCallback frame_encoded_callback; | 30 const VideoEncoder::FrameEncodedCallback frame_encoded_callback; |
30 | 31 |
31 InProgressFrameEncode(RtpTimestamp rtp, | 32 InProgressFrameEncode(RtpTimestamp rtp, |
32 base::TimeTicks r_time, | 33 base::TimeTicks r_time, |
33 VideoEncoder::FrameEncodedCallback callback) | 34 VideoEncoder::FrameEncodedCallback callback) |
34 : rtp_timestamp(rtp), | 35 : rtp_timestamp(rtp), |
35 reference_time(r_time), | 36 reference_time(r_time), |
36 frame_encoded_callback(callback) {} | 37 frame_encoded_callback(callback) {} |
37 }; | 38 }; |
38 | 39 |
40 base::ScopedCFTypeRef<CFDictionaryRef> DictionaryWithKeysAndValues( | |
41 CFTypeRef* keys, | |
jfroy
2015/02/11 01:00:34
nit: const CFTypeRef
miu
2015/02/11 02:14:24
I tried, and the compiler didn't like it. Either
| |
42 CFTypeRef* values, | |
jfroy
2015/02/11 01:00:34
nit: const CFTypeRef
| |
43 size_t size) { | |
44 return base::ScopedCFTypeRef<CFDictionaryRef>(CFDictionaryCreate( | |
45 kCFAllocatorDefault, | |
46 keys, | |
47 values, | |
48 size, | |
49 &kCFTypeDictionaryKeyCallBacks, | |
50 &kCFTypeDictionaryValueCallBacks)); | |
51 } | |
52 | |
39 base::ScopedCFTypeRef<CFDictionaryRef> DictionaryWithKeyValue(CFTypeRef key, | 53 base::ScopedCFTypeRef<CFDictionaryRef> DictionaryWithKeyValue(CFTypeRef key, |
40 CFTypeRef value) { | 54 CFTypeRef value) { |
41 CFTypeRef keys[1] = {key}; | 55 CFTypeRef keys[1] = {key}; |
42 CFTypeRef values[1] = {value}; | 56 CFTypeRef values[1] = {value}; |
43 return base::ScopedCFTypeRef<CFDictionaryRef>(CFDictionaryCreate( | 57 return DictionaryWithKeysAndValues(keys, values, 1); |
44 kCFAllocatorDefault, keys, values, 1, &kCFTypeDictionaryKeyCallBacks, | |
45 &kCFTypeDictionaryValueCallBacks)); | |
46 } | 58 } |
47 | 59 |
48 base::ScopedCFTypeRef<CFArrayRef> ArrayWithIntegers(const std::vector<int>& v) { | 60 base::ScopedCFTypeRef<CFArrayRef> ArrayWithIntegers(const int* v, size_t size) { |
49 std::vector<CFNumberRef> numbers; | 61 std::vector<CFNumberRef> numbers; |
50 numbers.reserve(v.size()); | 62 numbers.reserve(size); |
51 for (const int i : v) { | 63 for (const int* end = v + size; v < end; ++v) |
52 numbers.push_back(CFNumberCreate(nullptr, kCFNumberSInt32Type, &i)); | 64 numbers.push_back(CFNumberCreate(nullptr, kCFNumberSInt32Type, v)); |
53 } | |
54 base::ScopedCFTypeRef<CFArrayRef> array(CFArrayCreate( | 65 base::ScopedCFTypeRef<CFArrayRef> array(CFArrayCreate( |
55 kCFAllocatorDefault, reinterpret_cast<const void**>(&numbers[0]), | 66 kCFAllocatorDefault, reinterpret_cast<const void**>(&numbers[0]), |
56 numbers.size(), &kCFTypeArrayCallBacks)); | 67 numbers.size(), &kCFTypeArrayCallBacks)); |
57 for (CFNumberRef number : numbers) { | 68 for (auto& number : numbers) { |
58 CFRelease(number); | 69 CFRelease(number); |
59 } | 70 } |
60 return array; | 71 return array; |
61 } | 72 } |
62 | 73 |
63 template <typename NalSizeType> | 74 template <typename NalSizeType> |
64 void CopyNalsToAnnexB(char* avcc_buffer, | 75 void CopyNalsToAnnexB(char* avcc_buffer, |
65 const size_t avcc_size, | 76 const size_t avcc_size, |
66 std::string* annexb_buffer) { | 77 std::string* annexb_buffer) { |
67 static_assert(sizeof(NalSizeType) == 1 || sizeof(NalSizeType) == 2 || | 78 static_assert(sizeof(NalSizeType) == 1 || sizeof(NalSizeType) == 2 || |
(...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
195 CopyNalsToAnnexB<uint32_t>(bb_data, bb_size, annexb_buffer); | 206 CopyNalsToAnnexB<uint32_t>(bb_data, bb_size, annexb_buffer); |
196 } else { | 207 } else { |
197 NOTREACHED(); | 208 NOTREACHED(); |
198 } | 209 } |
199 } | 210 } |
200 | 211 |
201 // Implementation of the VideoFrameFactory interface using |CVPixelBufferPool|. | 212 // Implementation of the VideoFrameFactory interface using |CVPixelBufferPool|. |
202 class VideoFrameFactoryCVPixelBufferPoolImpl : public VideoFrameFactory { | 213 class VideoFrameFactoryCVPixelBufferPoolImpl : public VideoFrameFactory { |
203 public: | 214 public: |
204 VideoFrameFactoryCVPixelBufferPoolImpl( | 215 VideoFrameFactoryCVPixelBufferPoolImpl( |
205 const base::ScopedCFTypeRef<CVPixelBufferPoolRef>& pool) | 216 const base::ScopedCFTypeRef<CVPixelBufferPoolRef>& pool, |
206 : pool_(pool) {} | 217 const gfx::Size& frame_size) |
218 : pool_(pool), | |
219 frame_size_(frame_size) {} | |
207 | 220 |
208 ~VideoFrameFactoryCVPixelBufferPoolImpl() override {} | 221 ~VideoFrameFactoryCVPixelBufferPoolImpl() override {} |
209 | 222 |
210 scoped_refptr<VideoFrame> CreateFrame(base::TimeDelta timestamp) override { | 223 scoped_refptr<VideoFrame> MaybeCreateFrame( |
224 const gfx::Size& frame_size, base::TimeDelta timestamp) override { | |
225 if (frame_size != frame_size_) | |
226 return nullptr; // Buffer pool is not a match for requested frame size. | |
227 | |
211 base::ScopedCFTypeRef<CVPixelBufferRef> buffer; | 228 base::ScopedCFTypeRef<CVPixelBufferRef> buffer; |
212 CHECK_EQ(kCVReturnSuccess, | 229 if (CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pool_, |
213 CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pool_, | 230 buffer.InitializeInto()) != |
214 buffer.InitializeInto())); | 231 kCVReturnSuccess) |
232 return nullptr; // Buffer pool has run out of pixel buffers. | |
233 DCHECK(buffer); | |
234 | |
215 return VideoFrame::WrapCVPixelBuffer(buffer, timestamp); | 235 return VideoFrame::WrapCVPixelBuffer(buffer, timestamp); |
216 } | 236 } |
217 | 237 |
218 private: | 238 private: |
219 base::ScopedCFTypeRef<CVPixelBufferPoolRef> pool_; | 239 const base::ScopedCFTypeRef<CVPixelBufferPoolRef> pool_; |
240 const gfx::Size frame_size_; | |
220 | 241 |
221 DISALLOW_COPY_AND_ASSIGN(VideoFrameFactoryCVPixelBufferPoolImpl); | 242 DISALLOW_COPY_AND_ASSIGN(VideoFrameFactoryCVPixelBufferPoolImpl); |
222 }; | 243 }; |
223 | 244 |
224 } // namespace | 245 } // namespace |
225 | 246 |
247 // static | |
248 bool H264VideoToolboxEncoder::IsSupported( | |
249 const VideoSenderConfig& video_config) { | |
250 return video_config.codec == CODEC_VIDEO_H264 && VideoToolboxGlue::Get(); | |
251 } | |
252 | |
226 H264VideoToolboxEncoder::H264VideoToolboxEncoder( | 253 H264VideoToolboxEncoder::H264VideoToolboxEncoder( |
227 const scoped_refptr<CastEnvironment>& cast_environment, | 254 const scoped_refptr<CastEnvironment>& cast_environment, |
228 const VideoSenderConfig& video_config, | 255 const VideoSenderConfig& video_config, |
229 const gfx::Size& frame_size, | 256 const gfx::Size& frame_size, |
257 uint32 first_frame_id, | |
230 const StatusChangeCallback& status_change_cb) | 258 const StatusChangeCallback& status_change_cb) |
231 : cast_environment_(cast_environment), | 259 : cast_environment_(cast_environment), |
232 videotoolbox_glue_(VideoToolboxGlue::Get()), | 260 videotoolbox_glue_(VideoToolboxGlue::Get()), |
233 frame_id_(kStartFrameId), | 261 frame_size_(frame_size), |
262 status_change_cb_(status_change_cb), | |
263 next_frame_id_(first_frame_id), | |
234 encode_next_frame_as_keyframe_(false) { | 264 encode_next_frame_as_keyframe_(false) { |
235 DCHECK(!frame_size.IsEmpty()); | 265 DCHECK(!frame_size_.IsEmpty()); |
236 DCHECK(!status_change_cb.is_null()); | 266 DCHECK(!status_change_cb_.is_null()); |
237 | 267 |
238 OperationalStatus operational_status; | 268 OperationalStatus operational_status; |
239 if (video_config.codec == CODEC_VIDEO_H264 && videotoolbox_glue_) { | 269 if (video_config.codec == CODEC_VIDEO_H264 && videotoolbox_glue_) { |
240 operational_status = Initialize(video_config, frame_size) ? | 270 operational_status = Initialize(video_config) ? |
241 STATUS_INITIALIZED : STATUS_INVALID_CONFIGURATION; | 271 STATUS_INITIALIZED : STATUS_INVALID_CONFIGURATION; |
242 } else { | 272 } else { |
243 operational_status = STATUS_UNSUPPORTED_CODEC; | 273 operational_status = STATUS_UNSUPPORTED_CODEC; |
244 } | 274 } |
245 cast_environment_->PostTask( | 275 cast_environment_->PostTask( |
246 CastEnvironment::MAIN, | 276 CastEnvironment::MAIN, |
247 FROM_HERE, | 277 FROM_HERE, |
248 base::Bind(status_change_cb, operational_status)); | 278 base::Bind(status_change_cb_, operational_status)); |
249 } | 279 } |
250 | 280 |
251 H264VideoToolboxEncoder::~H264VideoToolboxEncoder() { | 281 H264VideoToolboxEncoder::~H264VideoToolboxEncoder() { |
252 Teardown(); | 282 Teardown(); |
253 } | 283 } |
254 | 284 |
255 bool H264VideoToolboxEncoder::Initialize( | 285 bool H264VideoToolboxEncoder::Initialize( |
256 const VideoSenderConfig& video_config, | 286 const VideoSenderConfig& video_config) { |
257 const gfx::Size& frame_size) { | |
258 DCHECK(thread_checker_.CalledOnValidThread()); | 287 DCHECK(thread_checker_.CalledOnValidThread()); |
259 DCHECK(!compression_session_); | 288 DCHECK(!compression_session_); |
260 | 289 |
261 // Note that the encoder object is given to the compression session as the | 290 // Note that the encoder object is given to the compression session as the |
262 // callback context using a raw pointer. The C API does not allow us to use | 291 // callback context using a raw pointer. The C API does not allow us to use |
263 // a smart pointer, nor is this encoder ref counted. However, this is still | 292 // a smart pointer, nor is this encoder ref counted. However, this is still |
264 // safe, because we 1) we own the compression session and 2) we tear it down | 293 // safe, because we 1) we own the compression session and 2) we tear it down |
265 // safely. When destructing the encoder, the compression session is flushed | 294 // safely. When destructing the encoder, the compression session is flushed |
266 // and invalidated. Internally, VideoToolbox will join all of its threads | 295 // and invalidated. Internally, VideoToolbox will join all of its threads |
267 // before returning to the client. Therefore, when control returns to us, we | 296 // before returning to the client. Therefore, when control returns to us, we |
268 // are guaranteed that the output callback will not execute again. | 297 // are guaranteed that the output callback will not execute again. |
269 | 298 |
270 // On OS X, allow the hardware encoder. Don't require it, it does not support | 299 // On OS X, allow the hardware encoder. Don't require it, it does not support |
271 // all configurations (some of which are used for testing). | 300 // all configurations (some of which are used for testing). |
272 base::ScopedCFTypeRef<CFDictionaryRef> encoder_spec; | 301 base::ScopedCFTypeRef<CFDictionaryRef> encoder_spec; |
273 #if !defined(OS_IOS) | 302 #if !defined(OS_IOS) |
274 encoder_spec = DictionaryWithKeyValue( | 303 encoder_spec = DictionaryWithKeyValue( |
275 videotoolbox_glue_ | 304 videotoolbox_glue_ |
276 ->kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder() , | 305 ->kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder() , |
277 kCFBooleanTrue); | 306 kCFBooleanTrue); |
278 #endif | 307 #endif |
279 | 308 |
280 // Certain encoders prefer kCVPixelFormatType_422YpCbCr8, which is not | 309 // Certain encoders prefer kCVPixelFormatType_422YpCbCr8, which is not |
281 // supported through VideoFrame. We can force 420 formats to be used instead. | 310 // supported through VideoFrame. We can force 420 formats to be used instead. |
282 const int formats[] = { | 311 const int formats[] = { |
283 kCVPixelFormatType_420YpCbCr8Planar, | 312 kCVPixelFormatType_420YpCbCr8Planar, |
284 CoreVideoGlue::kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange}; | 313 CoreVideoGlue::kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange |
285 base::ScopedCFTypeRef<CFArrayRef> formats_array = ArrayWithIntegers( | 314 }; |
286 std::vector<int>(formats, formats + arraysize(formats))); | 315 // Keep these attachment settings in-sync with those in ConfigureSession(). |
287 base::ScopedCFTypeRef<CFDictionaryRef> buffer_attributes = | 316 CFTypeRef attachments_keys[] = { |
288 DictionaryWithKeyValue(kCVPixelBufferPixelFormatTypeKey, formats_array); | 317 kCVImageBufferColorPrimariesKey, |
318 kCVImageBufferTransferFunctionKey, | |
319 kCVImageBufferYCbCrMatrixKey | |
320 }; | |
321 CFTypeRef attachments_values[] = { | |
322 kCVImageBufferColorPrimaries_ITU_R_709_2, | |
323 kCVImageBufferTransferFunction_ITU_R_709_2, | |
324 kCVImageBufferYCbCrMatrix_ITU_R_709_2 | |
325 }; | |
326 CFTypeRef buffer_attributes_keys[] = { | |
327 kCVPixelBufferPixelFormatTypeKey, | |
328 kCVBufferPropagatedAttachmentsKey | |
329 }; | |
330 CFTypeRef buffer_attributes_values[] = { | |
331 ArrayWithIntegers(formats, arraysize(formats)).release(), | |
332 DictionaryWithKeysAndValues(attachments_keys, | |
333 attachments_values, | |
334 arraysize(attachments_keys)).release() | |
335 }; | |
336 const base::ScopedCFTypeRef<CFDictionaryRef> buffer_attributes = | |
337 DictionaryWithKeysAndValues(buffer_attributes_keys, | |
338 buffer_attributes_values, | |
339 arraysize(buffer_attributes_keys)); | |
340 for (auto& v : buffer_attributes_values) | |
341 CFRelease(v); | |
289 | 342 |
290 VTCompressionSessionRef session; | 343 VTCompressionSessionRef session; |
291 OSStatus status = videotoolbox_glue_->VTCompressionSessionCreate( | 344 OSStatus status = videotoolbox_glue_->VTCompressionSessionCreate( |
292 kCFAllocatorDefault, frame_size.width(), frame_size.height(), | 345 kCFAllocatorDefault, frame_size_.width(), frame_size_.height(), |
293 CoreMediaGlue::kCMVideoCodecType_H264, encoder_spec, buffer_attributes, | 346 CoreMediaGlue::kCMVideoCodecType_H264, encoder_spec, buffer_attributes, |
294 nullptr /* compressedDataAllocator */, | 347 nullptr /* compressedDataAllocator */, |
295 &H264VideoToolboxEncoder::CompressionCallback, | 348 &H264VideoToolboxEncoder::CompressionCallback, |
296 reinterpret_cast<void*>(this), &session); | 349 reinterpret_cast<void*>(this), &session); |
297 if (status != noErr) { | 350 if (status != noErr) { |
298 DLOG(ERROR) << " VTCompressionSessionCreate failed: " << status; | 351 DLOG(ERROR) << " VTCompressionSessionCreate failed: " << status; |
299 return false; | 352 return false; |
300 } | 353 } |
301 compression_session_.reset(session); | 354 compression_session_.reset(session); |
302 | 355 |
(...skipping 19 matching lines...) Expand all Loading... | |
322 ->kVTCompressionPropertyKey_MaxKeyFrameIntervalDuration(), | 375 ->kVTCompressionPropertyKey_MaxKeyFrameIntervalDuration(), |
323 240); | 376 240); |
324 // TODO(jfroy): implement better bitrate control | 377 // TODO(jfroy): implement better bitrate control |
325 // https://crbug.com/425352 | 378 // https://crbug.com/425352 |
326 SetSessionProperty( | 379 SetSessionProperty( |
327 videotoolbox_glue_->kVTCompressionPropertyKey_AverageBitRate(), | 380 videotoolbox_glue_->kVTCompressionPropertyKey_AverageBitRate(), |
328 (video_config.min_bitrate + video_config.max_bitrate) / 2); | 381 (video_config.min_bitrate + video_config.max_bitrate) / 2); |
329 SetSessionProperty( | 382 SetSessionProperty( |
330 videotoolbox_glue_->kVTCompressionPropertyKey_ExpectedFrameRate(), | 383 videotoolbox_glue_->kVTCompressionPropertyKey_ExpectedFrameRate(), |
331 video_config.max_frame_rate); | 384 video_config.max_frame_rate); |
385 // Keep these attachment settings in-sync with those in Initialize(). | |
332 SetSessionProperty( | 386 SetSessionProperty( |
333 videotoolbox_glue_->kVTCompressionPropertyKey_ColorPrimaries(), | 387 videotoolbox_glue_->kVTCompressionPropertyKey_ColorPrimaries(), |
334 kCVImageBufferColorPrimaries_ITU_R_709_2); | 388 kCVImageBufferColorPrimaries_ITU_R_709_2); |
335 SetSessionProperty( | 389 SetSessionProperty( |
336 videotoolbox_glue_->kVTCompressionPropertyKey_TransferFunction(), | 390 videotoolbox_glue_->kVTCompressionPropertyKey_TransferFunction(), |
337 kCVImageBufferTransferFunction_ITU_R_709_2); | 391 kCVImageBufferTransferFunction_ITU_R_709_2); |
338 SetSessionProperty( | 392 SetSessionProperty( |
339 videotoolbox_glue_->kVTCompressionPropertyKey_YCbCrMatrix(), | 393 videotoolbox_glue_->kVTCompressionPropertyKey_YCbCrMatrix(), |
340 kCVImageBufferYCbCrMatrix_ITU_R_709_2); | 394 kCVImageBufferYCbCrMatrix_ITU_R_709_2); |
341 if (video_config.max_number_of_video_buffers_used > 0) { | 395 if (video_config.max_number_of_video_buffers_used > 0) { |
342 SetSessionProperty( | 396 SetSessionProperty( |
343 videotoolbox_glue_->kVTCompressionPropertyKey_MaxFrameDelayCount(), | 397 videotoolbox_glue_->kVTCompressionPropertyKey_MaxFrameDelayCount(), |
344 video_config.max_number_of_video_buffers_used); | 398 video_config.max_number_of_video_buffers_used); |
345 } | 399 } |
346 } | 400 } |
347 | 401 |
348 void H264VideoToolboxEncoder::Teardown() { | 402 void H264VideoToolboxEncoder::Teardown() { |
349 DCHECK(thread_checker_.CalledOnValidThread()); | 403 DCHECK(thread_checker_.CalledOnValidThread()); |
350 | 404 |
351 // If the compression session exists, invalidate it. This blocks until all | 405 // If the compression session exists, invalidate it. This blocks until all |
352 // pending output callbacks have returned and any internal threads have | 406 // pending output callbacks have returned and any internal threads have |
353 // joined, ensuring no output callback ever sees a dangling encoder pointer. | 407 // joined, ensuring no output callback ever sees a dangling encoder pointer. |
354 if (compression_session_) { | 408 if (compression_session_) { |
355 videotoolbox_glue_->VTCompressionSessionInvalidate(compression_session_); | 409 videotoolbox_glue_->VTCompressionSessionInvalidate(compression_session_); |
356 compression_session_.reset(); | 410 compression_session_.reset(); |
357 } | 411 } |
358 } | 412 } |
359 | 413 |
360 bool H264VideoToolboxEncoder::CanEncodeVariedFrameSizes() const { | |
361 return false; | |
362 } | |
363 | |
364 bool H264VideoToolboxEncoder::EncodeVideoFrame( | 414 bool H264VideoToolboxEncoder::EncodeVideoFrame( |
365 const scoped_refptr<media::VideoFrame>& video_frame, | 415 const scoped_refptr<media::VideoFrame>& video_frame, |
366 const base::TimeTicks& reference_time, | 416 const base::TimeTicks& reference_time, |
367 const FrameEncodedCallback& frame_encoded_callback) { | 417 const FrameEncodedCallback& frame_encoded_callback) { |
368 DCHECK(thread_checker_.CalledOnValidThread()); | 418 DCHECK(thread_checker_.CalledOnValidThread()); |
369 DCHECK(!video_frame->visible_rect().IsEmpty()); | |
370 DCHECK(!frame_encoded_callback.is_null()); | 419 DCHECK(!frame_encoded_callback.is_null()); |
371 | 420 |
372 if (!compression_session_) { | 421 if (!compression_session_) { |
373 DLOG(ERROR) << " compression session is null"; | 422 DLOG(ERROR) << " compression session is null"; |
374 return false; | 423 return false; |
375 } | 424 } |
376 | 425 |
426 if (video_frame->visible_rect().size() != frame_size_) | |
427 return false; | |
428 | |
377 // Wrap the VideoFrame in a CVPixelBuffer. In all cases, no data will be | 429 // Wrap the VideoFrame in a CVPixelBuffer. In all cases, no data will be |
378 // copied. If the VideoFrame was created by this encoder's video frame | 430 // copied. If the VideoFrame was created by this encoder's video frame |
379 // factory, then the returned CVPixelBuffer will have been obtained from the | 431 // factory, then the returned CVPixelBuffer will have been obtained from the |
380 // compression session's pixel buffer pool. This will eliminate a copy of the | 432 // compression session's pixel buffer pool. This will eliminate a copy of the |
381 // frame into memory visible by the hardware encoder. The VideoFrame's | 433 // frame into memory visible by the hardware encoder. The VideoFrame's |
382 // lifetime is extended for the lifetime of the returned CVPixelBuffer. | 434 // lifetime is extended for the lifetime of the returned CVPixelBuffer. |
383 auto pixel_buffer = media::WrapVideoFrameInCVPixelBuffer(*video_frame); | 435 auto pixel_buffer = media::WrapVideoFrameInCVPixelBuffer(*video_frame); |
384 if (!pixel_buffer) { | 436 if (!pixel_buffer) { |
385 return false; | 437 return false; |
386 } | 438 } |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
428 | 480 |
429 encode_next_frame_as_keyframe_ = true; | 481 encode_next_frame_as_keyframe_ = true; |
430 } | 482 } |
431 | 483 |
432 void H264VideoToolboxEncoder::LatestFrameIdToReference(uint32 /*frame_id*/) { | 484 void H264VideoToolboxEncoder::LatestFrameIdToReference(uint32 /*frame_id*/) { |
433 // Not supported by VideoToolbox in any meaningful manner. | 485 // Not supported by VideoToolbox in any meaningful manner. |
434 } | 486 } |
435 | 487 |
436 scoped_ptr<VideoFrameFactory> | 488 scoped_ptr<VideoFrameFactory> |
437 H264VideoToolboxEncoder::CreateVideoFrameFactory() { | 489 H264VideoToolboxEncoder::CreateVideoFrameFactory() { |
490 if (!videotoolbox_glue_ || !compression_session_) | |
491 return nullptr; | |
438 base::ScopedCFTypeRef<CVPixelBufferPoolRef> pool( | 492 base::ScopedCFTypeRef<CVPixelBufferPoolRef> pool( |
439 videotoolbox_glue_->VTCompressionSessionGetPixelBufferPool( | 493 videotoolbox_glue_->VTCompressionSessionGetPixelBufferPool( |
440 compression_session_), | 494 compression_session_), |
441 base::scoped_policy::RETAIN); | 495 base::scoped_policy::RETAIN); |
442 return scoped_ptr<VideoFrameFactory>( | 496 return scoped_ptr<VideoFrameFactory>( |
443 new VideoFrameFactoryCVPixelBufferPoolImpl(pool)); | 497 new VideoFrameFactoryCVPixelBufferPoolImpl(pool, frame_size_)); |
444 } | 498 } |
445 | 499 |
446 void H264VideoToolboxEncoder::EmitFrames() { | 500 void H264VideoToolboxEncoder::EmitFrames() { |
447 DCHECK(thread_checker_.CalledOnValidThread()); | 501 DCHECK(thread_checker_.CalledOnValidThread()); |
448 | 502 |
449 if (!compression_session_) { | 503 if (!compression_session_) { |
450 DLOG(ERROR) << " compression session is null"; | 504 DLOG(ERROR) << " compression session is null"; |
451 return; | 505 return; |
452 } | 506 } |
453 | 507 |
(...skipping 22 matching lines...) Expand all Loading... | |
476 CFStringRef value) { | 530 CFStringRef value) { |
477 return videotoolbox_glue_->VTSessionSetProperty(compression_session_, key, | 531 return videotoolbox_glue_->VTSessionSetProperty(compression_session_, key, |
478 value) == noErr; | 532 value) == noErr; |
479 } | 533 } |
480 | 534 |
481 void H264VideoToolboxEncoder::CompressionCallback(void* encoder_opaque, | 535 void H264VideoToolboxEncoder::CompressionCallback(void* encoder_opaque, |
482 void* request_opaque, | 536 void* request_opaque, |
483 OSStatus status, | 537 OSStatus status, |
484 VTEncodeInfoFlags info, | 538 VTEncodeInfoFlags info, |
485 CMSampleBufferRef sbuf) { | 539 CMSampleBufferRef sbuf) { |
486 if (status != noErr) { | |
487 DLOG(ERROR) << " encoding failed: " << status; | |
488 return; | |
489 } | |
490 if ((info & VideoToolboxGlue::kVTEncodeInfo_FrameDropped)) { | |
491 DVLOG(2) << " frame dropped"; | |
492 return; | |
493 } | |
494 | |
495 auto encoder = reinterpret_cast<H264VideoToolboxEncoder*>(encoder_opaque); | 540 auto encoder = reinterpret_cast<H264VideoToolboxEncoder*>(encoder_opaque); |
496 const scoped_ptr<InProgressFrameEncode> request( | 541 const scoped_ptr<InProgressFrameEncode> request( |
497 reinterpret_cast<InProgressFrameEncode*>(request_opaque)); | 542 reinterpret_cast<InProgressFrameEncode*>(request_opaque)); |
498 auto sample_attachments = static_cast<CFDictionaryRef>(CFArrayGetValueAtIndex( | 543 bool keyframe = false; |
499 CoreMediaGlue::CMSampleBufferGetSampleAttachmentsArray(sbuf, true), 0)); | 544 bool has_frame_data = false; |
500 | 545 |
501 // If the NotSync key is not present, it implies Sync, which indicates a | 546 if (status != noErr) { |
502 // keyframe (at least I think, VT documentation is, erm, sparse). Could | 547 DLOG(ERROR) << " encoding failed: " << status; |
503 // alternatively use kCMSampleAttachmentKey_DependsOnOthers == false. | 548 encoder->cast_environment_->PostTask( |
504 bool keyframe = | 549 CastEnvironment::MAIN, |
505 !CFDictionaryContainsKey(sample_attachments, | 550 FROM_HERE, |
506 CoreMediaGlue::kCMSampleAttachmentKey_NotSync()); | 551 base::Bind(encoder->status_change_cb_, STATUS_CODEC_RUNTIME_ERROR)); |
552 } else if ((info & VideoToolboxGlue::kVTEncodeInfo_FrameDropped)) { | |
553 DVLOG(2) << " frame dropped"; | |
554 } else { | |
555 auto sample_attachments = static_cast<CFDictionaryRef>( | |
556 CFArrayGetValueAtIndex( | |
557 CoreMediaGlue::CMSampleBufferGetSampleAttachmentsArray(sbuf, true), | |
558 0)); | |
559 | |
560 // If the NotSync key is not present, it implies Sync, which indicates a | |
561 // keyframe (at least I think, VT documentation is, erm, sparse). Could | |
562 // alternatively use kCMSampleAttachmentKey_DependsOnOthers == false. | |
563 keyframe = !CFDictionaryContainsKey( | |
564 sample_attachments, | |
565 CoreMediaGlue::kCMSampleAttachmentKey_NotSync()); | |
566 has_frame_data = true; | |
567 } | |
507 | 568 |
508 // Increment the encoder-scoped frame id and assign the new value to this | 569 // Increment the encoder-scoped frame id and assign the new value to this |
509 // frame. VideoToolbox calls the output callback serially, so this is safe. | 570 // frame. VideoToolbox calls the output callback serially, so this is safe. |
510 uint32 frame_id = ++encoder->frame_id_; | 571 const uint32 frame_id = encoder->next_frame_id_++; |
511 | 572 |
512 scoped_ptr<EncodedFrame> encoded_frame(new EncodedFrame()); | 573 scoped_ptr<EncodedFrame> encoded_frame(new EncodedFrame()); |
513 encoded_frame->frame_id = frame_id; | 574 encoded_frame->frame_id = frame_id; |
514 encoded_frame->reference_time = request->reference_time; | 575 encoded_frame->reference_time = request->reference_time; |
515 encoded_frame->rtp_timestamp = request->rtp_timestamp; | 576 encoded_frame->rtp_timestamp = request->rtp_timestamp; |
516 if (keyframe) { | 577 if (keyframe) { |
517 encoded_frame->dependency = EncodedFrame::KEY; | 578 encoded_frame->dependency = EncodedFrame::KEY; |
518 encoded_frame->referenced_frame_id = frame_id; | 579 encoded_frame->referenced_frame_id = frame_id; |
519 } else { | 580 } else { |
520 encoded_frame->dependency = EncodedFrame::DEPENDENT; | 581 encoded_frame->dependency = EncodedFrame::DEPENDENT; |
521 // H.264 supports complex frame reference schemes (multiple reference | 582 // H.264 supports complex frame reference schemes (multiple reference |
522 // frames, slice references, backward and forward references, etc). Cast | 583 // frames, slice references, backward and forward references, etc). Cast |
523 // doesn't support the concept of forward-referencing frame dependencies or | 584 // doesn't support the concept of forward-referencing frame dependencies or |
524 // multiple frame dependencies; so pretend that all frames are only | 585 // multiple frame dependencies; so pretend that all frames are only |
525 // decodable after their immediately preceding frame is decoded. This will | 586 // decodable after their immediately preceding frame is decoded. This will |
526 // ensure a Cast receiver only attempts to decode the frames sequentially | 587 // ensure a Cast receiver only attempts to decode the frames sequentially |
527 // and in order. Furthermore, the encoder is configured to never use forward | 588 // and in order. Furthermore, the encoder is configured to never use forward |
528 // references (see |kVTCompressionPropertyKey_AllowFrameReordering|). There | 589 // references (see |kVTCompressionPropertyKey_AllowFrameReordering|). There |
529 // is no way to prevent multiple reference frames. | 590 // is no way to prevent multiple reference frames. |
530 encoded_frame->referenced_frame_id = frame_id - 1; | 591 encoded_frame->referenced_frame_id = frame_id - 1; |
531 } | 592 } |
532 | 593 |
533 CopySampleBufferToAnnexBBuffer(sbuf, &encoded_frame->data, keyframe); | 594 if (has_frame_data) |
595 CopySampleBufferToAnnexBBuffer(sbuf, &encoded_frame->data, keyframe); | |
534 | 596 |
535 encoder->cast_environment_->PostTask( | 597 encoder->cast_environment_->PostTask( |
536 CastEnvironment::MAIN, FROM_HERE, | 598 CastEnvironment::MAIN, FROM_HERE, |
537 base::Bind(request->frame_encoded_callback, | 599 base::Bind(request->frame_encoded_callback, |
538 base::Passed(&encoded_frame))); | 600 base::Passed(&encoded_frame))); |
539 } | 601 } |
540 | 602 |
603 // A ref-counted structure that is shared to provide concurrent access to the | |
604 // VideoFrameFactory instance for the current encoder. OnEncoderReplaced() can | |
605 // change |factory| whenever an encoder instance has been replaced, while users | |
606 // of CreateVideoFrameFactory() may attempt to read/use |factory| by any thread | |
607 // at any time. | |
608 struct SizeAdaptableH264VideoToolboxVideoEncoder::FactoryHolder | |
609 : public base::RefCountedThreadSafe<FactoryHolder> { | |
610 base::Lock lock; | |
611 scoped_ptr<VideoFrameFactory> factory; | |
612 | |
613 private: | |
614 friend class base::RefCountedThreadSafe<FactoryHolder>; | |
615 ~FactoryHolder() {} | |
616 }; | |
617 | |
618 SizeAdaptableH264VideoToolboxVideoEncoder:: | |
619 SizeAdaptableH264VideoToolboxVideoEncoder( | |
620 const scoped_refptr<CastEnvironment>& cast_environment, | |
621 const VideoSenderConfig& video_config, | |
622 const StatusChangeCallback& status_change_cb) | |
623 : SizeAdaptableVideoEncoderBase(cast_environment, | |
624 video_config, | |
625 status_change_cb), | |
626 holder_(new FactoryHolder()) {} | |
627 | |
628 SizeAdaptableH264VideoToolboxVideoEncoder:: | |
629 ~SizeAdaptableH264VideoToolboxVideoEncoder() {} | |
630 | |
631 scoped_ptr<VideoFrameFactory> | |
632 SizeAdaptableH264VideoToolboxVideoEncoder::CreateVideoFrameFactory() { | |
633 // A proxy allowing SizeAdaptableH264VideoToolboxVideoEncoder to swap out the | |
634 // VideoFrameFactory instance to match one appropriate for the current encoder | |
635 // instance. | |
636 class VideoFrameFactoryProxy : public VideoFrameFactory { | |
637 public: | |
638 explicit VideoFrameFactoryProxy(const scoped_refptr<FactoryHolder>& holder) | |
639 : holder_(holder) {} | |
640 | |
641 ~VideoFrameFactoryProxy() override {} | |
642 | |
643 scoped_refptr<VideoFrame> MaybeCreateFrame( | |
644 const gfx::Size& frame_size, base::TimeDelta timestamp) override { | |
645 base::AutoLock auto_lock(holder_->lock); | |
646 return holder_->factory ? | |
647 holder_->factory->MaybeCreateFrame(frame_size, timestamp) : nullptr; | |
648 } | |
649 | |
650 private: | |
651 const scoped_refptr<FactoryHolder> holder_; | |
652 | |
653 DISALLOW_COPY_AND_ASSIGN(VideoFrameFactoryProxy); | |
654 }; | |
655 | |
656 return scoped_ptr<VideoFrameFactory>(new VideoFrameFactoryProxy(holder_)); | |
657 } | |
658 | |
659 scoped_ptr<VideoEncoder> | |
660 SizeAdaptableH264VideoToolboxVideoEncoder::CreateReplacementEncoder() { | |
661 return scoped_ptr<VideoEncoder>(new H264VideoToolboxEncoder( | |
662 cast_environment(), | |
663 video_config(), | |
664 next_encoder_frame_size(), | |
665 last_frame_id() + 1, | |
666 CreateEncoderStatusChangeCallback())); | |
667 } | |
668 | |
669 void SizeAdaptableH264VideoToolboxVideoEncoder::OnEncoderReplaced( | |
670 VideoEncoder* replacement_encoder) { | |
671 scoped_ptr<VideoFrameFactory> current_factory( | |
672 replacement_encoder->CreateVideoFrameFactory()); | |
673 base::AutoLock auto_lock(holder_->lock); | |
674 holder_->factory = current_factory.Pass(); | |
675 } | |
676 | |
677 void SizeAdaptableH264VideoToolboxVideoEncoder::DestroyCurrentEncoder() { | |
678 { | |
679 base::AutoLock auto_lock(holder_->lock); | |
680 holder_->factory.reset(); | |
681 } | |
682 SizeAdaptableVideoEncoderBase::DestroyCurrentEncoder(); | |
683 } | |
684 | |
541 } // namespace cast | 685 } // namespace cast |
542 } // namespace media | 686 } // namespace media |
OLD | NEW |