Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(234)

Side by Side Diff: content/common/gpu/media/vt_video_encode_accelerator_mac.cc

Issue 1636083003: H264 HW encode using VideoToolbox (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: miu@ comments. Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2014 The Chromium Authors. All rights reserved. 1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "media/cast/sender/h264_vt_encoder.h" 5 #include "content/common/gpu/media/vt_video_encode_accelerator_mac.h"
6 6
7 #include <stddef.h> 7 #include "base/thread_task_runner_handle.h"
8 8 #include "media/base/mac/coremedia_glue.h"
9 #include <string>
10 #include <vector>
11
12 #include "base/big_endian.h"
13 #include "base/bind.h"
14 #include "base/bind_helpers.h"
15 #include "base/location.h"
16 #include "base/logging.h"
17 #include "base/macros.h"
18 #include "base/power_monitor/power_monitor.h"
19 #include "base/synchronization/lock.h"
20 #include "build/build_config.h"
21 #include "media/base/mac/corevideo_glue.h" 9 #include "media/base/mac/corevideo_glue.h"
22 #include "media/base/mac/video_frame_mac.h" 10 #include "media/base/mac/video_frame_mac.h"
23 #include "media/cast/common/rtp_time.h" 11
24 #include "media/cast/constants.h" 12 namespace content {
25 #include "media/cast/sender/video_frame_factory.h"
26
27 namespace media {
28 namespace cast {
29 13
30 namespace { 14 namespace {
31 15
32 // Container for the associated data of a video frame being processed. 16 // TODO(emircan): Check if we can find the actual system capabilities via
33 struct InProgressFrameEncode { 17 // creating VTCompressionSessions with varying requirements.
34 const RtpTimeTicks rtp_timestamp; 18 // See crbug.com/584784.
19 const size_t kBitsPerByte = 8;
20 const size_t kDefaultResolutionWidth = 640;
21 const size_t kDefaultResolutionHeight = 480;
22 const size_t kMaxFrameRateNumerator = 30;
23 const size_t kMaxFrameRateDenominator = 1;
24 const size_t kMaxResolutionWidth = 4096;
25 const size_t kMaxResolutionHeight = 2160;
26 const size_t kNumInputBuffers = 3;
27
28 } // namespace
29
30 struct VTVideoEncodeAccelerator::InProgressFrameEncode {
31 InProgressFrameEncode(base::TimeDelta rtp_timestamp, base::TimeTicks ref_time)
32 : timestamp(rtp_timestamp), reference_time(ref_time) {}
33 const base::TimeDelta timestamp;
35 const base::TimeTicks reference_time; 34 const base::TimeTicks reference_time;
36 const VideoEncoder::FrameEncodedCallback frame_encoded_callback; 35
37 36 private:
38 InProgressFrameEncode(RtpTimeTicks rtp, 37 DISALLOW_IMPLICIT_CONSTRUCTORS(InProgressFrameEncode);
39 base::TimeTicks r_time,
40 VideoEncoder::FrameEncodedCallback callback)
41 : rtp_timestamp(rtp),
42 reference_time(r_time),
43 frame_encoded_callback(callback) {}
44 }; 38 };
45 39
46 base::ScopedCFTypeRef<CFDictionaryRef> 40 struct VTVideoEncodeAccelerator::EncodeOutput {
47 DictionaryWithKeysAndValues(CFTypeRef* keys, CFTypeRef* values, size_t size) { 41 EncodeOutput(VTEncodeInfoFlags info_flags, CMSampleBufferRef sbuf)
48 return base::ScopedCFTypeRef<CFDictionaryRef>(CFDictionaryCreate( 42 : info(info_flags), sample_buffer(sbuf, base::scoped_policy::RETAIN) {}
49 kCFAllocatorDefault, keys, values, size, &kCFTypeDictionaryKeyCallBacks, 43 const VTEncodeInfoFlags info;
50 &kCFTypeDictionaryValueCallBacks)); 44 const base::ScopedCFTypeRef<CMSampleBufferRef> sample_buffer;
51 } 45
52 46 private:
53 base::ScopedCFTypeRef<CFDictionaryRef> DictionaryWithKeyValue(CFTypeRef key, 47 DISALLOW_IMPLICIT_CONSTRUCTORS(EncodeOutput);
54 CFTypeRef value) { 48 };
55 CFTypeRef keys[1] = {key}; 49
56 CFTypeRef values[1] = {value}; 50 struct VTVideoEncodeAccelerator::BitstreamBufferRef {
57 return DictionaryWithKeysAndValues(keys, values, 1); 51 BitstreamBufferRef(int32_t id,
58 } 52 scoped_ptr<base::SharedMemory> shm,
59 53 size_t size)
60 base::ScopedCFTypeRef<CFArrayRef> ArrayWithIntegers(const int* v, size_t size) { 54 : id(id), shm(std::move(shm)), size(size) {}
61 std::vector<CFNumberRef> numbers; 55 const int32_t id;
62 numbers.reserve(size); 56 const scoped_ptr<base::SharedMemory> shm;
63 for (const int* end = v + size; v < end; ++v) 57 const size_t size;
64 numbers.push_back(CFNumberCreate(nullptr, kCFNumberSInt32Type, v)); 58
65 base::ScopedCFTypeRef<CFArrayRef> array(CFArrayCreate( 59 private:
66 kCFAllocatorDefault, reinterpret_cast<const void**>(&numbers[0]), 60 DISALLOW_IMPLICIT_CONSTRUCTORS(BitstreamBufferRef);
67 numbers.size(), &kCFTypeArrayCallBacks)); 61 };
68 for (auto& number : numbers) { 62
69 CFRelease(number); 63 VTVideoEncodeAccelerator::VTVideoEncodeAccelerator()
70 } 64 : client_task_runner_(base::ThreadTaskRunnerHandle::Get()),
71 return array; 65 encoder_thread_("VTEncoderThread"),
72 } 66 encoder_task_weak_factory_(this) {
73 67 }
74 template <typename NalSizeType> 68
75 void CopyNalsToAnnexB(char* avcc_buffer, 69 VTVideoEncodeAccelerator::~VTVideoEncodeAccelerator() {
76 const size_t avcc_size, 70 DVLOG(3) << __FUNCTION__;
77 std::string* annexb_buffer) { 71 DCHECK(thread_checker_.CalledOnValidThread());
78 static_assert(sizeof(NalSizeType) == 1 || sizeof(NalSizeType) == 2 || 72
79 sizeof(NalSizeType) == 4, 73 Destroy();
80 "NAL size type has unsupported size"); 74 DCHECK(!encoder_thread_.IsRunning());
81 static const char startcode_3[3] = {0, 0, 1}; 75 DCHECK(!encoder_task_weak_factory_.HasWeakPtrs());
82 DCHECK(avcc_buffer); 76 }
83 DCHECK(annexb_buffer); 77
84 size_t bytes_left = avcc_size; 78 media::VideoEncodeAccelerator::SupportedProfiles
85 while (bytes_left > 0) { 79 VTVideoEncodeAccelerator::GetSupportedProfiles() {
86 DCHECK_GT(bytes_left, sizeof(NalSizeType)); 80 DVLOG(3) << __FUNCTION__;
87 NalSizeType nal_size; 81 DCHECK(thread_checker_.CalledOnValidThread());
88 base::ReadBigEndian(avcc_buffer, &nal_size); 82
89 bytes_left -= sizeof(NalSizeType); 83 SupportedProfiles profiles;
90 avcc_buffer += sizeof(NalSizeType); 84 // We want to check if HW encoder is supported initially.
91 85 videotoolbox_glue_ = VideoToolboxGlue::Get();
92 DCHECK_GE(bytes_left, nal_size); 86 if (!videotoolbox_glue_) {
93 annexb_buffer->append(startcode_3, sizeof(startcode_3)); 87 DLOG(ERROR) << "Failed creating VideoToolbox glue.";
94 annexb_buffer->append(avcc_buffer, nal_size); 88 return profiles;
95 bytes_left -= nal_size; 89 }
96 avcc_buffer += nal_size; 90 const bool rv = CreateCompressionSession(
97 } 91 media::video_toolbox::DictionaryWithKeysAndValues(nullptr, nullptr, 0),
98 } 92 gfx::Size(kDefaultResolutionWidth, kDefaultResolutionHeight), true);
99 93 DestroyCompressionSession();
100 // Copy a H.264 frame stored in a CM sample buffer to an Annex B buffer. Copies 94 if (!rv) {
101 // parameter sets for keyframes before the frame data as well. 95 DLOG(ERROR) << "Failed creating compression session with hardware support.";
miu 2016/03/09 01:48:50 This would be a good thing to have as VLOG(1) inst
emircan 2016/03/09 02:26:03 Done.
102 void CopySampleBufferToAnnexBBuffer(CoreMediaGlue::CMSampleBufferRef sbuf, 96 return profiles;
103 std::string* annexb_buffer, 97 }
104 bool keyframe) { 98
105 // Perform two pass, one to figure out the total output size, and another to 99 SupportedProfile profile;
106 // copy the data after having performed a single output allocation. Note that 100 profile.profile = media::H264PROFILE_BASELINE;
107 // we'll allocate a bit more because we'll count 4 bytes instead of 3 for 101 profile.max_framerate_numerator = kMaxFrameRateNumerator;
108 // video NALs. 102 profile.max_framerate_denominator = kMaxFrameRateDenominator;
109 103 profile.max_resolution = gfx::Size(kMaxResolutionWidth, kMaxResolutionHeight);
110 OSStatus status; 104 profiles.push_back(profile);
111 105 return profiles;
112 // Get the sample buffer's block buffer and format description. 106 }
113 auto bb = CoreMediaGlue::CMSampleBufferGetDataBuffer(sbuf); 107
114 DCHECK(bb); 108 bool VTVideoEncodeAccelerator::Initialize(
115 auto fdesc = CoreMediaGlue::CMSampleBufferGetFormatDescription(sbuf); 109 media::VideoPixelFormat format,
116 DCHECK(fdesc); 110 const gfx::Size& input_visible_size,
117 111 media::VideoCodecProfile output_profile,
118 size_t bb_size = CoreMediaGlue::CMBlockBufferGetDataLength(bb); 112 uint32_t initial_bitrate,
119 size_t total_bytes = bb_size; 113 Client* client) {
120 114 DVLOG(3) << __FUNCTION__
121 size_t pset_count; 115 << ": input_format=" << media::VideoPixelFormatToString(format)
122 int nal_size_field_bytes; 116 << ", input_visible_size=" << input_visible_size.ToString()
123 status = CoreMediaGlue::CMVideoFormatDescriptionGetH264ParameterSetAtIndex( 117 << ", output_profile=" << output_profile
124 fdesc, 0, nullptr, nullptr, &pset_count, &nal_size_field_bytes); 118 << ", initial_bitrate=" << initial_bitrate;
125 if (status == 119 DCHECK(thread_checker_.CalledOnValidThread());
126 CoreMediaGlue::kCMFormatDescriptionBridgeError_InvalidParameter) { 120 DCHECK(client);
127 DLOG(WARNING) << " assuming 2 parameter sets and 4 bytes NAL length header"; 121
128 pset_count = 2; 122 if (media::PIXEL_FORMAT_I420 != format) {
129 nal_size_field_bytes = 4; 123 DLOG(ERROR) << "Input format not supported= "
130 } else if (status != noErr) { 124 << media::VideoPixelFormatToString(format);
131 DLOG(ERROR) 125 return false;
132 << " CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed: " 126 }
133 << status; 127 if (media::H264PROFILE_BASELINE != output_profile) {
134 return; 128 DLOG(ERROR) << "Output profile not supported= "
135 } 129 << output_profile;
136 130 return false;
137 if (keyframe) { 131 }
138 const uint8_t* pset; 132
139 size_t pset_size; 133 videotoolbox_glue_ = VideoToolboxGlue::Get();
140 for (size_t pset_i = 0; pset_i < pset_count; ++pset_i) { 134 if (!videotoolbox_glue_) {
141 status = 135 DLOG(ERROR) << "Failed creating VideoToolbox glue.";
142 CoreMediaGlue::CMVideoFormatDescriptionGetH264ParameterSetAtIndex( 136 return false;
143 fdesc, pset_i, &pset, &pset_size, nullptr, nullptr); 137 }
144 if (status != noErr) { 138
145 DLOG(ERROR) 139 client_ptr_factory_.reset(new base::WeakPtrFactory<Client>(client));
146 << " CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed: " 140 client_ = client_ptr_factory_->GetWeakPtr();
147 << status; 141 input_visible_size_ = input_visible_size;
148 return; 142 frame_rate_ = kMaxFrameRateNumerator / kMaxFrameRateDenominator;
149 } 143 target_bitrate_ = initial_bitrate;
150 total_bytes += pset_size + nal_size_field_bytes; 144 bitstream_buffer_size_ = input_visible_size.GetArea();
151 } 145
152 } 146 if (!ResetCompressionSession()) {
153 147 DLOG(ERROR) << "Failed creating compression session.";
154 annexb_buffer->reserve(total_bytes); 148 return false;
155 149 }
156 // Copy all parameter sets before keyframes. 150
157 if (keyframe) { 151 if (!encoder_thread_.Start()) {
158 const uint8_t* pset; 152 DLOG(ERROR) << "Failed spawning encoder thread.";
159 size_t pset_size; 153 return false;
160 for (size_t pset_i = 0; pset_i < pset_count; ++pset_i) { 154 }
161 status = 155
162 CoreMediaGlue::CMVideoFormatDescriptionGetH264ParameterSetAtIndex( 156 client_task_runner_->PostTask(
163 fdesc, pset_i, &pset, &pset_size, nullptr, nullptr); 157 FROM_HERE,
164 if (status != noErr) { 158 base::Bind(&Client::RequireBitstreamBuffers, client_, kNumInputBuffers,
165 DLOG(ERROR) 159 input_visible_size_, bitstream_buffer_size_));
166 << " CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed: " 160 return true;
167 << status; 161 }
168 return; 162
169 } 163 void VTVideoEncodeAccelerator::Encode(
170 static const char startcode_4[4] = {0, 0, 0, 1}; 164 const scoped_refptr<media::VideoFrame>& frame,
171 annexb_buffer->append(startcode_4, sizeof(startcode_4)); 165 bool force_keyframe) {
172 annexb_buffer->append(reinterpret_cast<const char*>(pset), pset_size); 166 DVLOG(3) << __FUNCTION__;
173 } 167 DCHECK(thread_checker_.CalledOnValidThread());
174 } 168
175 169 encoder_thread_.message_loop()->PostTask(
176 // Block buffers can be composed of non-contiguous chunks. For the sake of 170 FROM_HERE,
177 // keeping this code simple, flatten non-contiguous block buffers. 171 base::Bind(&VTVideoEncodeAccelerator::EncodeTask,
178 base::ScopedCFTypeRef<CoreMediaGlue::CMBlockBufferRef> contiguous_bb( 172 base::Unretained(this),
179 bb, base::scoped_policy::RETAIN); 173 frame,
180 if (!CoreMediaGlue::CMBlockBufferIsRangeContiguous(bb, 0, 0)) { 174 force_keyframe));
181 contiguous_bb.reset(); 175 }
182 status = CoreMediaGlue::CMBlockBufferCreateContiguous( 176
183 kCFAllocatorDefault, bb, kCFAllocatorDefault, nullptr, 0, 0, 0, 177 void VTVideoEncodeAccelerator::UseOutputBitstreamBuffer(
184 contiguous_bb.InitializeInto()); 178 const media::BitstreamBuffer& buffer) {
185 if (status != noErr) { 179 DVLOG(3) << __FUNCTION__ << ": buffer size=" << buffer.size();
186 DLOG(ERROR) << " CMBlockBufferCreateContiguous failed: " << status; 180 DCHECK(thread_checker_.CalledOnValidThread());
187 return; 181
188 } 182 if (buffer.size() < bitstream_buffer_size_) {
189 } 183 DLOG(ERROR) << "Output BitstreamBuffer isn't big enough: " << buffer.size()
190 184 << " vs. " << bitstream_buffer_size_;
191 // Copy all the NAL units. In the process convert them from AVCC format 185 client_->NotifyError(kInvalidArgumentError);
192 // (length header) to AnnexB format (start code). 186 return;
193 char* bb_data; 187 }
194 status = CoreMediaGlue::CMBlockBufferGetDataPointer(contiguous_bb, 0, nullptr, 188
195 nullptr, &bb_data); 189 scoped_ptr<base::SharedMemory> shm(
190 new base::SharedMemory(buffer.handle(), false));
191 if (!shm->Map(buffer.size())) {
192 DLOG(ERROR) << "Failed mapping shared memory.";
193 client_->NotifyError(kPlatformFailureError);
194 return;
195 }
196
197 scoped_ptr<BitstreamBufferRef> buffer_ref(
198 new BitstreamBufferRef(buffer.id(), std::move(shm), buffer.size()));
199
200 encoder_thread_.message_loop()->PostTask(
201 FROM_HERE,
202 base::Bind(&VTVideoEncodeAccelerator::UseOutputBitstreamBufferTask,
203 base::Unretained(this),
204 base::Passed(&buffer_ref)));
205 }
206
207 void VTVideoEncodeAccelerator::RequestEncodingParametersChange(
208 uint32_t bitrate,
209 uint32_t framerate) {
210 DVLOG(3) << __FUNCTION__ << ": bitrate=" << bitrate
211 << ": framerate=" << framerate;
212 DCHECK(thread_checker_.CalledOnValidThread());
213
214 frame_rate_ = framerate > 1 ? framerate : 1;
215 target_bitrate_ = bitrate > 1 ? bitrate : 1;
216
217 if (!compression_session_) {
218 client_->NotifyError(kPlatformFailureError);
219 return;
220 }
221
222 media::video_toolbox::SessionPropertySetter session_property_setter(
223 compression_session_, videotoolbox_glue_);
224 // TODO(emircan): See crbug.com/425352.
225 bool rv = session_property_setter.Set(
226 videotoolbox_glue_->kVTCompressionPropertyKey_AverageBitRate(),
227 target_bitrate_);
228 rv &= session_property_setter.Set(
229 videotoolbox_glue_->kVTCompressionPropertyKey_ExpectedFrameRate(),
230 frame_rate_);
231 rv &= session_property_setter.Set(
232 videotoolbox_glue_->kVTCompressionPropertyKey_DataRateLimits(),
233 media::video_toolbox::ArrayWithIntegerAndFloat(
234 target_bitrate_ / kBitsPerByte, 1.0f));
235 DLOG_IF(ERROR, !rv) << "Couldn't change session encoding parameters.";
236 }
237
238 void VTVideoEncodeAccelerator::Destroy() {
239 DVLOG(3) << __FUNCTION__;
240 DCHECK(thread_checker_.CalledOnValidThread());
241
242 // Cancel all callbacks.
243 client_ptr_factory_.reset();
244
245 if (encoder_thread_.IsRunning()) {
246 encoder_thread_.message_loop()->PostTask(
247 FROM_HERE,
248 base::Bind(&VTVideoEncodeAccelerator::DestroyTask,
249 base::Unretained(this)));
250 encoder_thread_.Stop();
251 } else {
252 DestroyTask();
253 }
254 }
255
256 void VTVideoEncodeAccelerator::EncodeTask(
257 const scoped_refptr<media::VideoFrame>& frame,
258 bool force_keyframe) {
259 DCHECK(encoder_thread_.task_runner()->BelongsToCurrentThread());
260 DCHECK(compression_session_);
261 DCHECK(frame);
262
263 // TODO(emircan): See if we can eliminate a copy here by using
264 // CVPixelBufferPool for the allocation of incoming VideoFrames.
265 base::ScopedCFTypeRef<CVPixelBufferRef> pixel_buffer =
266 media::WrapVideoFrameInCVPixelBuffer(*frame);
267 base::ScopedCFTypeRef<CFDictionaryRef> frame_props =
268 media::video_toolbox::DictionaryWithKeyValue(
269 videotoolbox_glue_->kVTEncodeFrameOptionKey_ForceKeyFrame(),
270 force_keyframe ? kCFBooleanTrue : kCFBooleanFalse);
271
272 base::TimeTicks ref_time;
273 if (!frame->metadata()->GetTimeTicks(
274 media::VideoFrameMetadata::REFERENCE_TIME, &ref_time)) {
275 ref_time = base::TimeTicks::Now();
276 }
277 auto timestamp_cm = CoreMediaGlue::CMTimeMake(
278 frame->timestamp().InMicroseconds(), USEC_PER_SEC);
279 // Wrap information we'll need after the frame is encoded in a heap object.
280 // We'll get the pointer back from the VideoToolbox completion callback.
281 scoped_ptr<InProgressFrameEncode> request(new InProgressFrameEncode(
282 frame->timestamp(), ref_time));
283
284 OSStatus status = videotoolbox_glue_->VTCompressionSessionEncodeFrame(
285 compression_session_, pixel_buffer, timestamp_cm,
286 CoreMediaGlue::CMTime{0, 0, 0, 0}, frame_props,
287 reinterpret_cast<void*>(request.release()), nullptr);
196 if (status != noErr) { 288 if (status != noErr) {
197 DLOG(ERROR) << " CMBlockBufferGetDataPointer failed: " << status; 289 DLOG(ERROR) << " VTCompressionSessionEncodeFrame failed: " << status;
198 return; 290 NotifyError(kPlatformFailureError);
199 } 291 }
200 292 }
201 if (nal_size_field_bytes == 1) { 293
202 CopyNalsToAnnexB<uint8_t>(bb_data, bb_size, annexb_buffer); 294 void VTVideoEncodeAccelerator::UseOutputBitstreamBufferTask(
203 } else if (nal_size_field_bytes == 2) { 295 scoped_ptr<BitstreamBufferRef> buffer_ref) {
204 CopyNalsToAnnexB<uint16_t>(bb_data, bb_size, annexb_buffer); 296 DCHECK(encoder_thread_.task_runner()->BelongsToCurrentThread());
205 } else if (nal_size_field_bytes == 4) { 297
206 CopyNalsToAnnexB<uint32_t>(bb_data, bb_size, annexb_buffer); 298 // If there is already EncodeOutput waiting, copy its output first.
207 } else { 299 if (!encoder_output_queue_.empty()) {
208 NOTREACHED(); 300 scoped_ptr<VTVideoEncodeAccelerator::EncodeOutput> encode_output =
209 } 301 std::move(encoder_output_queue_.front());
210 } 302 encoder_output_queue_.pop_front();
211 303 ReturnBitstreamBuffer(std::move(encode_output), std::move(buffer_ref));
212 } // namespace 304 return;
213 305 }
214 class H264VideoToolboxEncoder::VideoFrameFactoryImpl 306
215 : public base::RefCountedThreadSafe<VideoFrameFactoryImpl>, 307 bitstream_buffer_queue_.push_back(std::move(buffer_ref));
216 public VideoFrameFactory { 308 }
217 public: 309
218 // Type that proxies the VideoFrameFactory interface to this class. 310 void VTVideoEncodeAccelerator::DestroyTask() {
219 class Proxy; 311 // This thread runs on |encoder_thread_| if it is alive, otherwise on GPU
220 312 // child thread.
221 VideoFrameFactoryImpl(const base::WeakPtr<H264VideoToolboxEncoder>& encoder, 313
222 const scoped_refptr<CastEnvironment>& cast_environment) 314 // Cancel all encoder thread callbacks.
223 : encoder_(encoder), cast_environment_(cast_environment) {} 315 encoder_task_weak_factory_.InvalidateWeakPtrs();
224 316
225 scoped_refptr<VideoFrame> MaybeCreateFrame( 317 // This call blocks until all pending frames are flushed out.
226 const gfx::Size& frame_size, 318 DestroyCompressionSession();
227 base::TimeDelta timestamp) final { 319 }
228 if (frame_size.IsEmpty()) { 320
229 DVLOG(1) << "Rejecting empty video frame."; 321 void VTVideoEncodeAccelerator::NotifyError(
230 return nullptr; 322 media::VideoEncodeAccelerator::Error error) {
231 } 323 DCHECK(encoder_thread_.task_runner()->BelongsToCurrentThread());
232 324 client_task_runner_->PostTask(
233 base::AutoLock auto_lock(lock_); 325 FROM_HERE, base::Bind(&Client::NotifyError, client_, error));
234 326 }
235 // If the pool size does not match, speculatively reset the encoder to use
236 // the new size and return null. Cache the new frame size right away and
237 // toss away the pixel buffer pool to avoid spurious tasks until the encoder
238 // is done resetting.
239 if (frame_size != pool_frame_size_) {
240 DVLOG(1) << "MaybeCreateFrame: Detected frame size change.";
241 cast_environment_->PostTask(
242 CastEnvironment::MAIN, FROM_HERE,
243 base::Bind(&H264VideoToolboxEncoder::UpdateFrameSize, encoder_,
244 frame_size));
245 pool_frame_size_ = frame_size;
246 pool_.reset();
247 return nullptr;
248 }
249
250 if (!pool_) {
251 DVLOG(1) << "MaybeCreateFrame: No pixel buffer pool.";
252 return nullptr;
253 }
254
255 // Allocate a pixel buffer from the pool and return a wrapper VideoFrame.
256 base::ScopedCFTypeRef<CVPixelBufferRef> buffer;
257 auto status = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pool_,
258 buffer.InitializeInto());
259 if (status != kCVReturnSuccess) {
260 DLOG(ERROR) << "CVPixelBufferPoolCreatePixelBuffer failed: " << status;
261 return nullptr;
262 }
263
264 DCHECK(buffer);
265 return VideoFrame::WrapCVPixelBuffer(buffer, timestamp);
266 }
267
268 void Update(const base::ScopedCFTypeRef<CVPixelBufferPoolRef>& pool,
269 const gfx::Size& frame_size) {
270 base::AutoLock auto_lock(lock_);
271 pool_ = pool;
272 pool_frame_size_ = frame_size;
273 }
274
275 private:
276 friend class base::RefCountedThreadSafe<VideoFrameFactoryImpl>;
277 ~VideoFrameFactoryImpl() final {}
278
279 base::Lock lock_;
280 base::ScopedCFTypeRef<CVPixelBufferPoolRef> pool_;
281 gfx::Size pool_frame_size_;
282
283 // Weak back reference to the encoder and the cast envrionment so we can
284 // message the encoder when the frame size changes.
285 const base::WeakPtr<H264VideoToolboxEncoder> encoder_;
286 const scoped_refptr<CastEnvironment> cast_environment_;
287
288 DISALLOW_COPY_AND_ASSIGN(VideoFrameFactoryImpl);
289 };
290
291 class H264VideoToolboxEncoder::VideoFrameFactoryImpl::Proxy
292 : public VideoFrameFactory {
293 public:
294 explicit Proxy(
295 const scoped_refptr<VideoFrameFactoryImpl>& video_frame_factory)
296 : video_frame_factory_(video_frame_factory) {
297 DCHECK(video_frame_factory_);
298 }
299
300 scoped_refptr<VideoFrame> MaybeCreateFrame(
301 const gfx::Size& frame_size,
302 base::TimeDelta timestamp) final {
303 return video_frame_factory_->MaybeCreateFrame(frame_size, timestamp);
304 }
305
306 private:
307 ~Proxy() final {}
308
309 const scoped_refptr<VideoFrameFactoryImpl> video_frame_factory_;
310
311 DISALLOW_COPY_AND_ASSIGN(Proxy);
312 };
313 327
314 // static 328 // static
315 bool H264VideoToolboxEncoder::IsSupported( 329 void VTVideoEncodeAccelerator::CompressionCallback(void* encoder_opaque,
316 const VideoSenderConfig& video_config) { 330 void* request_opaque,
317 return video_config.codec == CODEC_VIDEO_H264 && VideoToolboxGlue::Get(); 331 OSStatus status,
318 } 332 VTEncodeInfoFlags info,
319 333 CMSampleBufferRef sbuf) {
320 H264VideoToolboxEncoder::H264VideoToolboxEncoder( 334 // This function may be called asynchronously, on a different thread from the
321 const scoped_refptr<CastEnvironment>& cast_environment, 335 // one that calls VTCompressionSessionEncodeFrame.
322 const VideoSenderConfig& video_config, 336 DVLOG(3) << __FUNCTION__;
323 const StatusChangeCallback& status_change_cb) 337
324 : cast_environment_(cast_environment), 338 auto encoder = reinterpret_cast<VTVideoEncodeAccelerator*>(encoder_opaque);
325 videotoolbox_glue_(VideoToolboxGlue::Get()), 339 DCHECK(encoder);
326 video_config_(video_config), 340
327 status_change_cb_(status_change_cb), 341 // Release InProgressFrameEncode, since we don't have support to return
328 last_frame_id_(kFirstFrameId - 1), 342 // timestamps at this point.
329 encode_next_frame_as_keyframe_(false), 343 scoped_ptr<InProgressFrameEncode> request(
330 power_suspended_(false), 344 reinterpret_cast<InProgressFrameEncode*>(request_opaque));
331 weak_factory_(this) { 345 request.reset();
332 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); 346
333 DCHECK(!status_change_cb_.is_null()); 347 // EncodeOutput holds onto CMSampleBufferRef when posting task between
334 348 // threads.
335 OperationalStatus operational_status = 349 scoped_ptr<EncodeOutput> encode_output(new EncodeOutput(info, sbuf));
336 H264VideoToolboxEncoder::IsSupported(video_config) 350
337 ? STATUS_INITIALIZED 351 // This method is NOT called on |encoder_thread_|, so we still need to
338 : STATUS_UNSUPPORTED_CODEC; 352 // post a task back to it to do work.
339 cast_environment_->PostTask( 353 encoder->encoder_thread_.task_runner()->PostTask(
340 CastEnvironment::MAIN, FROM_HERE, 354 FROM_HERE, base::Bind(&VTVideoEncodeAccelerator::CompressionCallbackTask,
341 base::Bind(status_change_cb_, operational_status)); 355 encoder->encoder_task_weak_factory_.GetWeakPtr(),
342 356 status, base::Passed(&encode_output)));
343 if (operational_status == STATUS_INITIALIZED) { 357 }
344 // Create the shared video frame factory. It persists for the combined 358
345 // lifetime of the encoder and all video frame factory proxies created by 359 void VTVideoEncodeAccelerator::CompressionCallbackTask(
346 // |CreateVideoFrameFactory| that reference it. 360 OSStatus status,
347 video_frame_factory_ = 361 scoped_ptr<EncodeOutput> encode_output) {
348 scoped_refptr<VideoFrameFactoryImpl>(new VideoFrameFactoryImpl( 362 DCHECK(encoder_thread_.task_runner()->BelongsToCurrentThread());
349 weak_factory_.GetWeakPtr(), cast_environment_)); 363
350 364 if (status != noErr) {
351 // Register for power state changes. 365 DLOG(ERROR) << " encode failed: " << status;
352 auto power_monitor = base::PowerMonitor::Get(); 366 NotifyError(kPlatformFailureError);
353 if (power_monitor) { 367 return;
354 power_monitor->AddObserver(this); 368 }
355 VLOG(1) << "Registered for power state changes."; 369
356 } else { 370 // If there isn't any BitstreamBuffer to copy into, add it to a queue for
357 DLOG(WARNING) << "No power monitor. Process suspension will invalidate " 371 // later use.
358 "the encoder."; 372 if (bitstream_buffer_queue_.empty()) {
359 } 373 encoder_output_queue_.push_back(std::move(encode_output));
360 } 374 return;
361 } 375 }
362 376
363 H264VideoToolboxEncoder::~H264VideoToolboxEncoder() { 377 scoped_ptr<VTVideoEncodeAccelerator::BitstreamBufferRef> buffer_ref =
378 std::move(bitstream_buffer_queue_.front());
379 bitstream_buffer_queue_.pop_front();
380 ReturnBitstreamBuffer(std::move(encode_output), std::move(buffer_ref));
381 }
382
383 void VTVideoEncodeAccelerator::ReturnBitstreamBuffer(
384 scoped_ptr<EncodeOutput> encode_output,
385 scoped_ptr<VTVideoEncodeAccelerator::BitstreamBufferRef> buffer_ref) {
386 DVLOG(3) << __FUNCTION__;
387 DCHECK(encoder_thread_.task_runner()->BelongsToCurrentThread());
388
389 if (encode_output->info & VideoToolboxGlue::kVTEncodeInfo_FrameDropped) {
390 DVLOG(2) << " frame dropped";
391 client_task_runner_->PostTask(
392 FROM_HERE, base::Bind(&Client::BitstreamBufferReady, client_,
393 buffer_ref->id, 0, false));
394 return;
395 }
396
397 auto sample_attachments = static_cast<CFDictionaryRef>(CFArrayGetValueAtIndex(
398 CoreMediaGlue::CMSampleBufferGetSampleAttachmentsArray(
399 encode_output->sample_buffer.get(), true),
400 0));
401 const bool keyframe =
402 !CFDictionaryContainsKey(sample_attachments,
403 CoreMediaGlue::kCMSampleAttachmentKey_NotSync());
404
405 size_t used_buffer_size = 0;
406 const bool copy_rv = media::video_toolbox::CopySampleBufferToAnnexBBuffer(
407 encode_output->sample_buffer.get(), keyframe, buffer_ref->size,
408 reinterpret_cast<char*>(buffer_ref->shm->memory()), &used_buffer_size);
409 if (!copy_rv) {
410 DLOG(ERROR) << "Cannot copy output from SampleBuffer to AnnexBBuffer.";
411 used_buffer_size = 0;
412 }
413
414 client_task_runner_->PostTask(
415 FROM_HERE, base::Bind(&Client::BitstreamBufferReady, client_,
416 buffer_ref->id, used_buffer_size, keyframe));
417 }
418
419 bool VTVideoEncodeAccelerator::ResetCompressionSession() {
420 DCHECK(thread_checker_.CalledOnValidThread());
421
364 DestroyCompressionSession(); 422 DestroyCompressionSession();
365 423
366 // If video_frame_factory_ is not null, the encoder registered for power state 424 CFTypeRef attributes_keys[] = {
367 // changes in the ctor and it must now unregister. 425 kCVPixelBufferOpenGLCompatibilityKey,
368 if (video_frame_factory_) { 426 kCVPixelBufferIOSurfacePropertiesKey,
369 auto power_monitor = base::PowerMonitor::Get(); 427 kCVPixelBufferPixelFormatTypeKey
370 if (power_monitor) 428 };
371 power_monitor->RemoveObserver(this);
372 }
373 }
374
375 void H264VideoToolboxEncoder::ResetCompressionSession() {
376 DCHECK(thread_checker_.CalledOnValidThread());
377
378 // Ignore reset requests while power suspended.
379 if (power_suspended_)
380 return;
381
382 // Notify that we're resetting the encoder.
383 cast_environment_->PostTask(
384 CastEnvironment::MAIN, FROM_HERE,
385 base::Bind(status_change_cb_, STATUS_CODEC_REINIT_PENDING));
386
387 // Destroy the current session, if any.
388 DestroyCompressionSession();
389
390 // On OS X, allow the hardware encoder. Don't require it, it does not support
391 // all configurations (some of which are used for testing).
392 base::ScopedCFTypeRef<CFDictionaryRef> encoder_spec;
393 #if !defined(OS_IOS)
394 encoder_spec = DictionaryWithKeyValue(
395 videotoolbox_glue_
396 ->kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder() ,
397 kCFBooleanTrue);
398 #endif
399
400 // Force 420v so that clients can easily use these buffers as GPU textures.
401 const int format[] = { 429 const int format[] = {
402 CoreVideoGlue::kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange}; 430 CoreVideoGlue::kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange};
403 431 CFTypeRef attributes_values[] = {
404 // Keep these attachment settings in-sync with those in ConfigureSession(). 432 kCFBooleanTrue,
405 CFTypeRef attachments_keys[] = {kCVImageBufferColorPrimariesKey, 433 media::video_toolbox::DictionaryWithKeysAndValues(nullptr, nullptr, 0)
406 kCVImageBufferTransferFunctionKey, 434 .release(),
407 kCVImageBufferYCbCrMatrixKey}; 435 media::video_toolbox::ArrayWithIntegers(format, arraysize(format))
408 CFTypeRef attachments_values[] = {kCVImageBufferColorPrimaries_ITU_R_709_2, 436 .release()};
409 kCVImageBufferTransferFunction_ITU_R_709_2, 437 const base::ScopedCFTypeRef<CFDictionaryRef> attributes =
410 kCVImageBufferYCbCrMatrix_ITU_R_709_2}; 438 media::video_toolbox::DictionaryWithKeysAndValues(
411 CFTypeRef buffer_attributes_keys[] = {kCVPixelBufferPixelFormatTypeKey, 439 attributes_keys, attributes_values, arraysize(attributes_keys));
412 kCVBufferPropagatedAttachmentsKey}; 440 for (auto& v : attributes_values)
413 CFTypeRef buffer_attributes_values[] = {
414 ArrayWithIntegers(format, arraysize(format)).release(),
415 DictionaryWithKeysAndValues(attachments_keys, attachments_values,
416 arraysize(attachments_keys)).release()};
417 const base::ScopedCFTypeRef<CFDictionaryRef> buffer_attributes =
418 DictionaryWithKeysAndValues(buffer_attributes_keys,
419 buffer_attributes_values,
420 arraysize(buffer_attributes_keys));
421 for (auto& v : buffer_attributes_values)
422 CFRelease(v); 441 CFRelease(v);
423 442
443 bool session_rv =
444 CreateCompressionSession(attributes, input_visible_size_, true);
445 if (!session_rv) {
446 // Try creating session again without forcing HW encode.
miu 2016/03/09 01:48:50 nit: Maybe add to this comment that this is seems
emircan 2016/03/09 02:26:04 Done.
447 DestroyCompressionSession();
448 session_rv =
449 CreateCompressionSession(attributes, input_visible_size_, false);
450 if (!session_rv) {
451 DestroyCompressionSession();
452 return false;
453 }
454 }
455
456 const bool configure_rv = ConfigureCompressionSession();
457 RequestEncodingParametersChange(target_bitrate_, frame_rate_);
458 return configure_rv;
459 }
460
461 bool VTVideoEncodeAccelerator::CreateCompressionSession(
462 base::ScopedCFTypeRef<CFDictionaryRef> attributes,
463 const gfx::Size& input_size,
464 bool require_hw_encoding) {
465 DCHECK(thread_checker_.CalledOnValidThread());
466
467 std::vector<CFTypeRef> encoder_keys;
468 std::vector<CFTypeRef> encoder_values;
469 encoder_keys.push_back(videotoolbox_glue_
470 ->kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder());
471 encoder_values.push_back(kCFBooleanTrue);
472
473 if (require_hw_encoding) {
474 encoder_keys.push_back(
475 videotoolbox_glue_
476 ->kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder());
477 encoder_values.push_back(kCFBooleanTrue);
478 }
479 base::ScopedCFTypeRef<CFDictionaryRef> encoder_spec =
480 media::video_toolbox::DictionaryWithKeysAndValues(
481 encoder_keys.data(), encoder_values.data(), encoder_keys.size());
482
424 // Create the compression session. 483 // Create the compression session.
425
426 // Note that the encoder object is given to the compression session as the 484 // Note that the encoder object is given to the compression session as the
427 // callback context using a raw pointer. The C API does not allow us to use a 485 // callback context using a raw pointer. The C API does not allow us to use a
428 // smart pointer, nor is this encoder ref counted. However, this is still 486 // smart pointer, nor is this encoder ref counted. However, this is still
429 // safe, because we 1) we own the compression session and 2) we tear it down 487 // safe, because we 1) we own the compression session and 2) we tear it down
430 // safely. When destructing the encoder, the compression session is flushed 488 // safely. When destructing the encoder, the compression session is flushed
431 // and invalidated. Internally, VideoToolbox will join all of its threads 489 // and invalidated. Internally, VideoToolbox will join all of its threads
432 // before returning to the client. Therefore, when control returns to us, we 490 // before returning to the client. Therefore, when control returns to us, we
433 // are guaranteed that the output callback will not execute again. 491 // are guaranteed that the output callback will not execute again.
434 OSStatus status = videotoolbox_glue_->VTCompressionSessionCreate( 492 OSStatus status = videotoolbox_glue_->VTCompressionSessionCreate(
435 kCFAllocatorDefault, frame_size_.width(), frame_size_.height(), 493 kCFAllocatorDefault,
436 CoreMediaGlue::kCMVideoCodecType_H264, encoder_spec, buffer_attributes, 494 input_size.width(),
495 input_size.height(),
496 CoreMediaGlue::kCMVideoCodecType_H264,
497 encoder_spec,
498 attributes,
437 nullptr /* compressedDataAllocator */, 499 nullptr /* compressedDataAllocator */,
438 &H264VideoToolboxEncoder::CompressionCallback, 500 &VTVideoEncodeAccelerator::CompressionCallback,
439 reinterpret_cast<void*>(this), compression_session_.InitializeInto()); 501 reinterpret_cast<void*>(this),
502 compression_session_.InitializeInto());
440 if (status != noErr) { 503 if (status != noErr) {
441 DLOG(ERROR) << " VTCompressionSessionCreate failed: " << status; 504 DLOG(ERROR) << " VTCompressionSessionCreate failed: " << status;
442 // Notify that reinitialization has failed. 505 return false;
443 cast_environment_->PostTask( 506 }
444 CastEnvironment::MAIN, FROM_HERE, 507 DVLOG(3) << " VTCompressionSession created with HW encode: "
445 base::Bind(status_change_cb_, STATUS_CODEC_INIT_FAILED)); 508 << require_hw_encoding << ", input size=" << input_size.ToString();
446 return; 509 return true;
447 } 510 }
448 511
449 // Configure the session (apply session properties based on the current state 512 bool VTVideoEncodeAccelerator::ConfigureCompressionSession() {
450 // of the encoder, experimental tuning and requirements). 513 DCHECK(thread_checker_.CalledOnValidThread());
451 ConfigureCompressionSession(); 514 DCHECK(compression_session_);
452 515
453 // Update the video frame factory. 516 media::video_toolbox::SessionPropertySetter session_property_setter(
454 base::ScopedCFTypeRef<CVPixelBufferPoolRef> pool( 517 compression_session_, videotoolbox_glue_);
455 videotoolbox_glue_->VTCompressionSessionGetPixelBufferPool( 518 bool rv = true;
456 compression_session_), 519 rv &= session_property_setter.Set(
457 base::scoped_policy::RETAIN);
458 video_frame_factory_->Update(pool, frame_size_);
459
460 // Notify that reinitialization is done.
461 cast_environment_->PostTask(
462 CastEnvironment::MAIN, FROM_HERE,
463 base::Bind(status_change_cb_, STATUS_INITIALIZED));
464 }
465
466 void H264VideoToolboxEncoder::ConfigureCompressionSession() {
467 SetSessionProperty(
468 videotoolbox_glue_->kVTCompressionPropertyKey_ProfileLevel(), 520 videotoolbox_glue_->kVTCompressionPropertyKey_ProfileLevel(),
469 videotoolbox_glue_->kVTProfileLevel_H264_Main_AutoLevel()); 521 videotoolbox_glue_->kVTProfileLevel_H264_Baseline_AutoLevel());
470 SetSessionProperty(videotoolbox_glue_->kVTCompressionPropertyKey_RealTime(), 522 rv &= session_property_setter.Set(
471 true); 523 videotoolbox_glue_->kVTCompressionPropertyKey_RealTime(), true);
472 SetSessionProperty( 524 rv &= session_property_setter.Set(
473 videotoolbox_glue_->kVTCompressionPropertyKey_AllowFrameReordering(), 525 videotoolbox_glue_->kVTCompressionPropertyKey_AllowFrameReordering(),
474 false); 526 false);
475 SetSessionProperty( 527 DLOG_IF(ERROR, !rv) << " Setting session property failed.";
476 videotoolbox_glue_->kVTCompressionPropertyKey_MaxKeyFrameInterval(), 240); 528 return rv;
477 SetSessionProperty( 529 }
478 videotoolbox_glue_ 530
479 ->kVTCompressionPropertyKey_MaxKeyFrameIntervalDuration(), 531 void VTVideoEncodeAccelerator::DestroyCompressionSession() {
480 240); 532 // This method may be called on |encoder thread| or GPU child thread.
481 // TODO(jfroy): implement better bitrate control 533
482 // https://crbug.com/425352
483 SetSessionProperty(
484 videotoolbox_glue_->kVTCompressionPropertyKey_AverageBitRate(),
485 (video_config_.min_bitrate + video_config_.max_bitrate) / 2);
486 SetSessionProperty(
487 videotoolbox_glue_->kVTCompressionPropertyKey_ExpectedFrameRate(),
488 video_config_.max_frame_rate);
489 // Keep these attachment settings in-sync with those in Initialize().
490 SetSessionProperty(
491 videotoolbox_glue_->kVTCompressionPropertyKey_ColorPrimaries(),
492 kCVImageBufferColorPrimaries_ITU_R_709_2);
493 SetSessionProperty(
494 videotoolbox_glue_->kVTCompressionPropertyKey_TransferFunction(),
495 kCVImageBufferTransferFunction_ITU_R_709_2);
496 SetSessionProperty(
497 videotoolbox_glue_->kVTCompressionPropertyKey_YCbCrMatrix(),
498 kCVImageBufferYCbCrMatrix_ITU_R_709_2);
499 if (video_config_.max_number_of_video_buffers_used > 0) {
500 SetSessionProperty(
501 videotoolbox_glue_->kVTCompressionPropertyKey_MaxFrameDelayCount(),
502 video_config_.max_number_of_video_buffers_used);
503 }
504 }
505
506 void H264VideoToolboxEncoder::DestroyCompressionSession() {
507 DCHECK(thread_checker_.CalledOnValidThread());
508
509 // If the compression session exists, invalidate it. This blocks until all
510 // pending output callbacks have returned and any internal threads have
511 // joined, ensuring no output callback ever sees a dangling encoder pointer.
512 //
513 // Before destroying the compression session, the video frame factory's pool
514 // is updated to null so that no thread will produce new video frames via the
515 // factory until a new compression session is created. The current frame size
516 // is passed to prevent the video frame factory from posting |UpdateFrameSize|
517 // tasks. Indeed, |DestroyCompressionSession| is either called from
518 // |ResetCompressionSession|, in which case a new pool and frame size will be
519 // set, or from callsites that require that there be no compression session
520 // (ex: the dtor).
521 if (compression_session_) { 534 if (compression_session_) {
522 video_frame_factory_->Update(
523 base::ScopedCFTypeRef<CVPixelBufferPoolRef>(nullptr), frame_size_);
524 videotoolbox_glue_->VTCompressionSessionInvalidate(compression_session_); 535 videotoolbox_glue_->VTCompressionSessionInvalidate(compression_session_);
525 compression_session_.reset(); 536 compression_session_.reset();
526 } 537 }
527 } 538 }
528 539
529 bool H264VideoToolboxEncoder::EncodeVideoFrame( 540 } // namespace content
530 const scoped_refptr<media::VideoFrame>& video_frame,
531 const base::TimeTicks& reference_time,
532 const FrameEncodedCallback& frame_encoded_callback) {
533 DCHECK(thread_checker_.CalledOnValidThread());
534 DCHECK(!frame_encoded_callback.is_null());
535
536 // Reject empty video frames.
537 const gfx::Size frame_size = video_frame->visible_rect().size();
538 if (frame_size.IsEmpty()) {
539 DVLOG(1) << "Rejecting empty video frame.";
540 return false;
541 }
542
543 // Handle frame size changes. This will reset the compression session.
544 if (frame_size != frame_size_) {
545 DVLOG(1) << "EncodeVideoFrame: Detected frame size change.";
546 UpdateFrameSize(frame_size);
547 }
548
549 // Need a compression session to continue.
550 if (!compression_session_) {
551 DLOG(ERROR) << "No compression session.";
552 return false;
553 }
554
555 // Wrap the VideoFrame in a CVPixelBuffer. In all cases, no data will be
556 // copied. If the VideoFrame was created by this encoder's video frame
557 // factory, then the returned CVPixelBuffer will have been obtained from the
558 // compression session's pixel buffer pool. This will eliminate a copy of the
559 // frame into memory visible by the hardware encoder. The VideoFrame's
560 // lifetime is extended for the lifetime of the returned CVPixelBuffer.
561 auto pixel_buffer = media::WrapVideoFrameInCVPixelBuffer(*video_frame);
562 if (!pixel_buffer) {
563 DLOG(ERROR) << "WrapVideoFrameInCVPixelBuffer failed.";
564 return false;
565 }
566
567 // Convert the frame timestamp to CMTime.
568 auto timestamp_cm = CoreMediaGlue::CMTimeMake(
569 (reference_time - base::TimeTicks()).InMicroseconds(), USEC_PER_SEC);
570
571 // Wrap information we'll need after the frame is encoded in a heap object.
572 // We'll get the pointer back from the VideoToolbox completion callback.
573 scoped_ptr<InProgressFrameEncode> request(new InProgressFrameEncode(
574 RtpTimeTicks::FromTimeDelta(video_frame->timestamp(), kVideoFrequency),
575 reference_time, frame_encoded_callback));
576
577 // Build a suitable frame properties dictionary for keyframes.
578 base::ScopedCFTypeRef<CFDictionaryRef> frame_props;
579 if (encode_next_frame_as_keyframe_) {
580 frame_props = DictionaryWithKeyValue(
581 videotoolbox_glue_->kVTEncodeFrameOptionKey_ForceKeyFrame(),
582 kCFBooleanTrue);
583 encode_next_frame_as_keyframe_ = false;
584 }
585
586 // Submit the frame to the compression session. The function returns as soon
587 // as the frame has been enqueued.
588 OSStatus status = videotoolbox_glue_->VTCompressionSessionEncodeFrame(
589 compression_session_, pixel_buffer, timestamp_cm,
590 CoreMediaGlue::CMTime{0, 0, 0, 0}, frame_props,
591 reinterpret_cast<void*>(request.release()), nullptr);
592 if (status != noErr) {
593 DLOG(ERROR) << " VTCompressionSessionEncodeFrame failed: " << status;
594 return false;
595 }
596
597 return true;
598 }
599
600 void H264VideoToolboxEncoder::UpdateFrameSize(const gfx::Size& size_needed) {
601 DCHECK(thread_checker_.CalledOnValidThread());
602
603 // Our video frame factory posts a task to update the frame size when its
604 // cache of the frame size differs from what the client requested. To avoid
605 // spurious encoder resets, check again here.
606 if (size_needed == frame_size_) {
607 DCHECK(compression_session_);
608 return;
609 }
610
611 VLOG(1) << "Resetting compression session (for frame size change from "
612 << frame_size_.ToString() << " to " << size_needed.ToString() << ").";
613
614 // If there is an existing session, finish every pending frame.
615 if (compression_session_) {
616 EmitFrames();
617 }
618
619 // Store the new frame size.
620 frame_size_ = size_needed;
621
622 // Reset the compression session.
623 ResetCompressionSession();
624 }
625
626 void H264VideoToolboxEncoder::SetBitRate(int /*new_bit_rate*/) {
627 DCHECK(thread_checker_.CalledOnValidThread());
628 // VideoToolbox does not seem to support bitrate reconfiguration.
629 }
630
631 void H264VideoToolboxEncoder::GenerateKeyFrame() {
632 DCHECK(thread_checker_.CalledOnValidThread());
633 encode_next_frame_as_keyframe_ = true;
634 }
635
636 scoped_ptr<VideoFrameFactory>
637 H264VideoToolboxEncoder::CreateVideoFrameFactory() {
638 DCHECK(thread_checker_.CalledOnValidThread());
639 return scoped_ptr<VideoFrameFactory>(
640 new VideoFrameFactoryImpl::Proxy(video_frame_factory_));
641 }
642
643 void H264VideoToolboxEncoder::EmitFrames() {
644 DCHECK(thread_checker_.CalledOnValidThread());
645 if (!compression_session_)
646 return;
647
648 OSStatus status = videotoolbox_glue_->VTCompressionSessionCompleteFrames(
649 compression_session_, CoreMediaGlue::CMTime{0, 0, 0, 0});
650 if (status != noErr) {
651 DLOG(ERROR) << " VTCompressionSessionCompleteFrames failed: " << status;
652 }
653 }
654
655 void H264VideoToolboxEncoder::OnSuspend() {
656 VLOG(1)
657 << "OnSuspend: Emitting all frames and destroying compression session.";
658 EmitFrames();
659 DestroyCompressionSession();
660 power_suspended_ = true;
661 }
662
663 void H264VideoToolboxEncoder::OnResume() {
664 power_suspended_ = false;
665
666 // Reset the compression session only if the frame size is not zero (which
667 // will obviously fail). It is possible for the frame size to be zero if no
668 // frame was submitted for encoding or requested from the video frame factory
669 // before suspension.
670 if (!frame_size_.IsEmpty()) {
671 VLOG(1) << "OnResume: Resetting compression session.";
672 ResetCompressionSession();
673 }
674 }
675
676 bool H264VideoToolboxEncoder::SetSessionProperty(CFStringRef key,
677 int32_t value) {
678 base::ScopedCFTypeRef<CFNumberRef> cfvalue(
679 CFNumberCreate(nullptr, kCFNumberSInt32Type, &value));
680 return videotoolbox_glue_->VTSessionSetProperty(compression_session_, key,
681 cfvalue) == noErr;
682 }
683
684 bool H264VideoToolboxEncoder::SetSessionProperty(CFStringRef key, bool value) {
685 CFBooleanRef cfvalue = (value) ? kCFBooleanTrue : kCFBooleanFalse;
686 return videotoolbox_glue_->VTSessionSetProperty(compression_session_, key,
687 cfvalue) == noErr;
688 }
689
690 bool H264VideoToolboxEncoder::SetSessionProperty(CFStringRef key,
691 CFStringRef value) {
692 return videotoolbox_glue_->VTSessionSetProperty(compression_session_, key,
693 value) == noErr;
694 }
695
696 void H264VideoToolboxEncoder::CompressionCallback(void* encoder_opaque,
697 void* request_opaque,
698 OSStatus status,
699 VTEncodeInfoFlags info,
700 CMSampleBufferRef sbuf) {
701 auto encoder = reinterpret_cast<H264VideoToolboxEncoder*>(encoder_opaque);
702 const scoped_ptr<InProgressFrameEncode> request(
703 reinterpret_cast<InProgressFrameEncode*>(request_opaque));
704 bool keyframe = false;
705 bool has_frame_data = false;
706
707 if (status != noErr) {
708 DLOG(ERROR) << " encoding failed: " << status;
709 encoder->cast_environment_->PostTask(
710 CastEnvironment::MAIN, FROM_HERE,
711 base::Bind(encoder->status_change_cb_, STATUS_CODEC_RUNTIME_ERROR));
712 } else if ((info & VideoToolboxGlue::kVTEncodeInfo_FrameDropped)) {
713 DVLOG(2) << " frame dropped";
714 } else {
715 auto sample_attachments =
716 static_cast<CFDictionaryRef>(CFArrayGetValueAtIndex(
717 CoreMediaGlue::CMSampleBufferGetSampleAttachmentsArray(sbuf, true),
718 0));
719
720 // If the NotSync key is not present, it implies Sync, which indicates a
721 // keyframe (at least I think, VT documentation is, erm, sparse). Could
722 // alternatively use kCMSampleAttachmentKey_DependsOnOthers == false.
723 keyframe = !CFDictionaryContainsKey(
724 sample_attachments,
725 CoreMediaGlue::kCMSampleAttachmentKey_NotSync());
726 has_frame_data = true;
727 }
728
729 // Increment the encoder-scoped frame id and assign the new value to this
730 // frame. VideoToolbox calls the output callback serially, so this is safe.
731 const uint32_t frame_id = ++encoder->last_frame_id_;
732
733 scoped_ptr<SenderEncodedFrame> encoded_frame(new SenderEncodedFrame());
734 encoded_frame->frame_id = frame_id;
735 encoded_frame->reference_time = request->reference_time;
736 encoded_frame->rtp_timestamp = request->rtp_timestamp;
737 if (keyframe) {
738 encoded_frame->dependency = EncodedFrame::KEY;
739 encoded_frame->referenced_frame_id = frame_id;
740 } else {
741 encoded_frame->dependency = EncodedFrame::DEPENDENT;
742 // H.264 supports complex frame reference schemes (multiple reference
743 // frames, slice references, backward and forward references, etc). Cast
744 // doesn't support the concept of forward-referencing frame dependencies or
745 // multiple frame dependencies; so pretend that all frames are only
746 // decodable after their immediately preceding frame is decoded. This will
747 // ensure a Cast receiver only attempts to decode the frames sequentially
748 // and in order. Furthermore, the encoder is configured to never use forward
749 // references (see |kVTCompressionPropertyKey_AllowFrameReordering|). There
750 // is no way to prevent multiple reference frames.
751 encoded_frame->referenced_frame_id = frame_id - 1;
752 }
753
754 if (has_frame_data)
755 CopySampleBufferToAnnexBBuffer(sbuf, &encoded_frame->data, keyframe);
756
757 // TODO(miu): Compute and populate the |deadline_utilization| and
758 // |lossy_utilization| performance metrics in |encoded_frame|.
759
760 encoded_frame->encode_completion_time =
761 encoder->cast_environment_->Clock()->NowTicks();
762 encoder->cast_environment_->PostTask(
763 CastEnvironment::MAIN, FROM_HERE,
764 base::Bind(request->frame_encoded_callback,
765 base::Passed(&encoded_frame)));
766 }
767
768 } // namespace cast
769 } // namespace media
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698