Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(915)

Side by Side Diff: content/common/gpu/media/vt_video_encode_accelerator_mac.cc

Issue 1636083003: H264 HW encode using VideoToolbox (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Updates Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « content/common/gpu/media/vt_video_encode_accelerator_mac.h ('k') | content/content_common.gypi » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2014 The Chromium Authors. All rights reserved. 1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "media/cast/sender/h264_vt_encoder.h" 5 #include "content/common/gpu/media/vt_video_encode_accelerator_mac.h"
6 6
7 #include <stddef.h> 7 #include "base/thread_task_runner_handle.h"
8 8 #include "media/base/mac/coremedia_glue.h"
9 #include <string>
10 #include <vector>
11
12 #include "base/big_endian.h"
13 #include "base/bind.h"
14 #include "base/bind_helpers.h"
15 #include "base/location.h"
16 #include "base/logging.h"
17 #include "base/macros.h"
18 #include "base/power_monitor/power_monitor.h"
19 #include "base/synchronization/lock.h"
20 #include "build/build_config.h"
21 #include "media/base/mac/corevideo_glue.h" 9 #include "media/base/mac/corevideo_glue.h"
22 #include "media/base/mac/video_frame_mac.h" 10 #include "media/base/mac/video_frame_mac.h"
23 #include "media/cast/common/rtp_time.h" 11
24 #include "media/cast/constants.h" 12 namespace content {
25 #include "media/cast/sender/video_frame_factory.h"
26
27 namespace media {
28 namespace cast {
29 13
30 namespace { 14 namespace {
31 15
32 // Container for the associated data of a video frame being processed. 16 // Subjectively chosen.
33 struct InProgressFrameEncode { 17 // TODO(emircan): Check if we can find the actual system capabilities via
34 const RtpTimeTicks rtp_timestamp; 18 // creating VTCompressionSessions with varying requirements.
19 // See crbug.com/584784.
20 const size_t kNumInputBuffers = 3;
21 const size_t kMaxFrameRateNumerator = 30;
22 const size_t kMaxFrameRateDenominator = 1;
23 const size_t kMaxResolutionWidth = 4096;
24 const size_t kMaxResolutionHeight = 2160;
25 const size_t kBitsPerByte = 8;
26
27 } // namespace
28
29 struct VTVideoEncodeAccelerator::InProgressFrameEncode {
30 InProgressFrameEncode(base::TimeDelta rtp_timestamp, base::TimeTicks ref_time)
31 : timestamp(rtp_timestamp), reference_time(ref_time) {}
32 const base::TimeDelta timestamp;
35 const base::TimeTicks reference_time; 33 const base::TimeTicks reference_time;
36 const VideoEncoder::FrameEncodedCallback frame_encoded_callback; 34
37 35 private:
38 InProgressFrameEncode(RtpTimeTicks rtp, 36 DISALLOW_IMPLICIT_CONSTRUCTORS(InProgressFrameEncode);
39 base::TimeTicks r_time,
40 VideoEncoder::FrameEncodedCallback callback)
41 : rtp_timestamp(rtp),
42 reference_time(r_time),
43 frame_encoded_callback(callback) {}
44 }; 37 };
45 38
46 base::ScopedCFTypeRef<CFDictionaryRef> 39 struct VTVideoEncodeAccelerator::EncodeOutput {
47 DictionaryWithKeysAndValues(CFTypeRef* keys, CFTypeRef* values, size_t size) { 40 EncodeOutput(VTEncodeInfoFlags info_flags, CMSampleBufferRef sbuf)
48 return base::ScopedCFTypeRef<CFDictionaryRef>(CFDictionaryCreate( 41 : info(info_flags), sample_buffer(sbuf) {}
49 kCFAllocatorDefault, keys, values, size, &kCFTypeDictionaryKeyCallBacks, 42 const VTEncodeInfoFlags info;
50 &kCFTypeDictionaryValueCallBacks)); 43 const CMSampleBufferRef sample_buffer;
jfroy 2016/03/07 21:57:07 Would it be safer to use a smart pointer to manage
emircan 2016/03/08 03:02:30 Done.
51 } 44
52 45 private:
53 base::ScopedCFTypeRef<CFDictionaryRef> DictionaryWithKeyValue(CFTypeRef key, 46 DISALLOW_IMPLICIT_CONSTRUCTORS(EncodeOutput);
54 CFTypeRef value) { 47 };
55 CFTypeRef keys[1] = {key}; 48
56 CFTypeRef values[1] = {value}; 49 struct VTVideoEncodeAccelerator::BitstreamBufferRef {
57 return DictionaryWithKeysAndValues(keys, values, 1); 50 BitstreamBufferRef(int32_t id,
58 } 51 scoped_ptr<base::SharedMemory> shm,
59 52 size_t size)
60 base::ScopedCFTypeRef<CFArrayRef> ArrayWithIntegers(const int* v, size_t size) { 53 : id(id), shm(std::move(shm)), size(size) {}
61 std::vector<CFNumberRef> numbers; 54 const int32_t id;
62 numbers.reserve(size); 55 const scoped_ptr<base::SharedMemory> shm;
63 for (const int* end = v + size; v < end; ++v) 56 const size_t size;
64 numbers.push_back(CFNumberCreate(nullptr, kCFNumberSInt32Type, v)); 57
65 base::ScopedCFTypeRef<CFArrayRef> array(CFArrayCreate( 58 private:
66 kCFAllocatorDefault, reinterpret_cast<const void**>(&numbers[0]), 59 DISALLOW_IMPLICIT_CONSTRUCTORS(BitstreamBufferRef);
67 numbers.size(), &kCFTypeArrayCallBacks)); 60 };
68 for (auto& number : numbers) { 61
69 CFRelease(number); 62 VTVideoEncodeAccelerator::VTVideoEncodeAccelerator()
70 } 63 : client_task_runner_(base::ThreadTaskRunnerHandle::Get()),
71 return array; 64 encoder_thread_("VTEncoderThread"),
72 } 65 weak_this_factory_(this) {
73 66 }
74 template <typename NalSizeType> 67
75 void CopyNalsToAnnexB(char* avcc_buffer, 68 VTVideoEncodeAccelerator::~VTVideoEncodeAccelerator() {
76 const size_t avcc_size, 69 DVLOG(3) << __FUNCTION__;
77 std::string* annexb_buffer) { 70 DCHECK(thread_checker_.CalledOnValidThread());
78 static_assert(sizeof(NalSizeType) == 1 || sizeof(NalSizeType) == 2 || 71
79 sizeof(NalSizeType) == 4, 72 Destroy();
80 "NAL size type has unsupported size"); 73 }
81 static const char startcode_3[3] = {0, 0, 1}; 74
82 DCHECK(avcc_buffer); 75 media::VideoEncodeAccelerator::SupportedProfiles
83 DCHECK(annexb_buffer); 76 VTVideoEncodeAccelerator::GetSupportedProfiles() {
84 size_t bytes_left = avcc_size; 77 DVLOG(3) << __FUNCTION__;
85 while (bytes_left > 0) { 78 DCHECK(thread_checker_.CalledOnValidThread());
86 DCHECK_GT(bytes_left, sizeof(NalSizeType)); 79
87 NalSizeType nal_size; 80 SupportedProfiles profiles;
88 base::ReadBigEndian(avcc_buffer, &nal_size); 81 SupportedProfile profile;
89 bytes_left -= sizeof(NalSizeType); 82 profile.profile = media::H264PROFILE_BASELINE;
90 avcc_buffer += sizeof(NalSizeType); 83 profile.max_framerate_numerator = kMaxFrameRateNumerator;
91 84 profile.max_framerate_denominator = kMaxFrameRateDenominator;
92 DCHECK_GE(bytes_left, nal_size); 85 profile.max_resolution = gfx::Size(kMaxResolutionWidth, kMaxResolutionHeight);
93 annexb_buffer->append(startcode_3, sizeof(startcode_3)); 86 profiles.push_back(profile);
94 annexb_buffer->append(avcc_buffer, nal_size); 87 return profiles;
95 bytes_left -= nal_size; 88 }
96 avcc_buffer += nal_size; 89
97 } 90 bool VTVideoEncodeAccelerator::Initialize(
98 } 91 media::VideoPixelFormat format,
99 92 const gfx::Size& input_visible_size,
100 // Copy a H.264 frame stored in a CM sample buffer to an Annex B buffer. Copies 93 media::VideoCodecProfile output_profile,
101 // parameter sets for keyframes before the frame data as well. 94 uint32_t initial_bitrate,
102 void CopySampleBufferToAnnexBBuffer(CoreMediaGlue::CMSampleBufferRef sbuf, 95 Client* client) {
103 std::string* annexb_buffer, 96 DVLOG(3) << __FUNCTION__
104 bool keyframe) { 97 << ": input_format=" << media::VideoPixelFormatToString(format)
105 // Perform two pass, one to figure out the total output size, and another to 98 << ", input_visible_size=" << input_visible_size.ToString()
106 // copy the data after having performed a single output allocation. Note that 99 << ", output_profile=" << output_profile
107 // we'll allocate a bit more because we'll count 4 bytes instead of 3 for 100 << ", initial_bitrate=" << initial_bitrate;
108 // video NALs. 101 DCHECK(thread_checker_.CalledOnValidThread());
109 102 DCHECK(client);
110 OSStatus status; 103
111 104 if (media::PIXEL_FORMAT_I420 != format) {
112 // Get the sample buffer's block buffer and format description. 105 DLOG(ERROR) << "Input format not supported= "
113 auto bb = CoreMediaGlue::CMSampleBufferGetDataBuffer(sbuf); 106 << media::VideoPixelFormatToString(format);
114 DCHECK(bb); 107 return false;
115 auto fdesc = CoreMediaGlue::CMSampleBufferGetFormatDescription(sbuf); 108 }
116 DCHECK(fdesc); 109 if (media::H264PROFILE_BASELINE != output_profile) {
117 110 DLOG(ERROR) << "Output profile not supported= "
118 size_t bb_size = CoreMediaGlue::CMBlockBufferGetDataLength(bb); 111 << output_profile;
119 size_t total_bytes = bb_size; 112 return false;
120 113 }
121 size_t pset_count; 114
122 int nal_size_field_bytes; 115 videotoolbox_glue_ = VideoToolboxGlue::Get();
123 status = CoreMediaGlue::CMVideoFormatDescriptionGetH264ParameterSetAtIndex( 116 if (!videotoolbox_glue_) {
124 fdesc, 0, nullptr, nullptr, &pset_count, &nal_size_field_bytes); 117 DLOG(ERROR) << "Failed creating VideoToolbox glue";
125 if (status == 118 return false;
126 CoreMediaGlue::kCMFormatDescriptionBridgeError_InvalidParameter) { 119 }
127 DLOG(WARNING) << " assuming 2 parameter sets and 4 bytes NAL length header"; 120
128 pset_count = 2; 121 client_ptr_factory_.reset(new base::WeakPtrFactory<Client>(client));
129 nal_size_field_bytes = 4; 122 client_ = client_ptr_factory_->GetWeakPtr();
130 } else if (status != noErr) { 123 input_visible_size_ = input_visible_size;
131 DLOG(ERROR) 124 frame_rate_ = kMaxFrameRateNumerator / kMaxFrameRateDenominator;
132 << " CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed: " 125 target_bitrate_ = initial_bitrate;
133 << status; 126 bitstream_buffer_size_ = input_visible_size.GetArea();
134 return; 127
135 } 128 if (!ResetCompressionSession()) {
136 129 DLOG(ERROR) << "Failed creating compression session";
137 if (keyframe) { 130 return false;
138 const uint8_t* pset; 131 }
139 size_t pset_size; 132
140 for (size_t pset_i = 0; pset_i < pset_count; ++pset_i) { 133 if (!encoder_thread_.Start()) {
141 status = 134 DLOG(ERROR) << "Failed spawning encoder thread";
142 CoreMediaGlue::CMVideoFormatDescriptionGetH264ParameterSetAtIndex( 135 return false;
143 fdesc, pset_i, &pset, &pset_size, nullptr, nullptr); 136 }
144 if (status != noErr) { 137
145 DLOG(ERROR) 138 client_task_runner_->PostTask(
146 << " CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed: " 139 FROM_HERE,
147 << status; 140 base::Bind(&Client::RequireBitstreamBuffers, client_, kNumInputBuffers,
148 return; 141 input_visible_size_, bitstream_buffer_size_));
149 } 142 return true;
150 total_bytes += pset_size + nal_size_field_bytes; 143 }
151 } 144
152 } 145 void VTVideoEncodeAccelerator::Encode(
153 146 const scoped_refptr<media::VideoFrame>& frame,
154 annexb_buffer->reserve(total_bytes); 147 bool force_keyframe) {
155 148 DVLOG(3) << __FUNCTION__;
156 // Copy all parameter sets before keyframes. 149 DCHECK(thread_checker_.CalledOnValidThread());
157 if (keyframe) { 150
158 const uint8_t* pset; 151 encoder_thread_.message_loop()->PostTask(
159 size_t pset_size; 152 FROM_HERE,
160 for (size_t pset_i = 0; pset_i < pset_count; ++pset_i) { 153 base::Bind(&VTVideoEncodeAccelerator::EncodeTask,
161 status = 154 base::Unretained(this),
162 CoreMediaGlue::CMVideoFormatDescriptionGetH264ParameterSetAtIndex( 155 frame,
163 fdesc, pset_i, &pset, &pset_size, nullptr, nullptr); 156 force_keyframe));
164 if (status != noErr) { 157 }
165 DLOG(ERROR) 158
166 << " CMVideoFormatDescriptionGetH264ParameterSetAtIndex failed: " 159 void VTVideoEncodeAccelerator::UseOutputBitstreamBuffer(
167 << status; 160 const media::BitstreamBuffer& buffer) {
168 return; 161 DVLOG(3) << __FUNCTION__ << ": buffer size=" << buffer.size();
169 } 162 DCHECK(thread_checker_.CalledOnValidThread());
170 static const char startcode_4[4] = {0, 0, 0, 1}; 163
171 annexb_buffer->append(startcode_4, sizeof(startcode_4)); 164 if (buffer.size() < bitstream_buffer_size_) {
172 annexb_buffer->append(reinterpret_cast<const char*>(pset), pset_size); 165 DLOG(ERROR) << "Output BitstreamBuffer isn't big enough: " << buffer.size()
173 } 166 << " vs. " << bitstream_buffer_size_;
174 } 167 client_->NotifyError(kInvalidArgumentError);
175 168 return;
176 // Block buffers can be composed of non-contiguous chunks. For the sake of 169 }
177 // keeping this code simple, flatten non-contiguous block buffers. 170
178 base::ScopedCFTypeRef<CoreMediaGlue::CMBlockBufferRef> contiguous_bb( 171 scoped_ptr<base::SharedMemory> shm(
179 bb, base::scoped_policy::RETAIN); 172 new base::SharedMemory(buffer.handle(), false));
180 if (!CoreMediaGlue::CMBlockBufferIsRangeContiguous(bb, 0, 0)) { 173 if (!shm->Map(buffer.size())) {
181 contiguous_bb.reset(); 174 DLOG(ERROR) << "Failed mapping shared memory.";
182 status = CoreMediaGlue::CMBlockBufferCreateContiguous( 175 client_->NotifyError(kPlatformFailureError);
183 kCFAllocatorDefault, bb, kCFAllocatorDefault, nullptr, 0, 0, 0, 176 return;
184 contiguous_bb.InitializeInto()); 177 }
185 if (status != noErr) { 178
186 DLOG(ERROR) << " CMBlockBufferCreateContiguous failed: " << status; 179 scoped_ptr<BitstreamBufferRef> buffer_ref(
187 return; 180 new BitstreamBufferRef(buffer.id(), std::move(shm), buffer.size()));
188 } 181
189 } 182 encoder_thread_.message_loop()->PostTask(
190 183 FROM_HERE,
191 // Copy all the NAL units. In the process convert them from AVCC format 184 base::Bind(&VTVideoEncodeAccelerator::UseOutputBitstreamBufferTask,
192 // (length header) to AnnexB format (start code). 185 base::Unretained(this),
193 char* bb_data; 186 base::Passed(&buffer_ref)));
194 status = CoreMediaGlue::CMBlockBufferGetDataPointer(contiguous_bb, 0, nullptr, 187 }
195 nullptr, &bb_data); 188
189 void VTVideoEncodeAccelerator::RequestEncodingParametersChange(
190 uint32_t bitrate,
191 uint32_t framerate) {
192 DVLOG(3) << __FUNCTION__ << ": bitrate=" << bitrate
193 << ": framerate=" << framerate;
194 DCHECK(thread_checker_.CalledOnValidThread());
195
196 frame_rate_ = framerate > 1 ? framerate : 1;
197 target_bitrate_ = bitrate > 1 ? bitrate : 1;
198
199 if (!compression_session_) {
200 client_->NotifyError(kPlatformFailureError);
201 return;
202 }
203
204 media::video_toolbox::SessionPropertySetter session_property_setter(
205 compression_session_, videotoolbox_glue_);
206 // TODO(emircan): See crbug.com/425352.
207 bool rv = session_property_setter.SetSessionProperty(
208 videotoolbox_glue_->kVTCompressionPropertyKey_AverageBitRate(),
209 target_bitrate_);
210 rv &= session_property_setter.SetSessionProperty(
211 videotoolbox_glue_->kVTCompressionPropertyKey_ExpectedFrameRate(),
212 frame_rate_);
213 rv &= session_property_setter.SetSessionProperty(
214 videotoolbox_glue_->kVTCompressionPropertyKey_DataRateLimits(),
215 media::video_toolbox::ArrayWithIntegerAndFloat(
216 target_bitrate_ / kBitsPerByte, 1.0f));
217 DLOG_IF(ERROR, !rv) << "Couldn't change session encoding parameters.";
218 }
219
220 void VTVideoEncodeAccelerator::Destroy() {
221 DVLOG(3) << __FUNCTION__;
222 DCHECK(thread_checker_.CalledOnValidThread());
223
224 // Cancel all callbacks.
225 client_ptr_factory_.reset();
226
227 if (encoder_thread_.IsRunning()) {
228 encoder_thread_.message_loop()->PostTask(
229 FROM_HERE,
230 base::Bind(&VTVideoEncodeAccelerator::DestroyTask,
231 base::Unretained(this)));
232 encoder_thread_.Stop();
233 } else {
234 DestroyTask();
235 }
236 }
237
238 void VTVideoEncodeAccelerator::EncodeTask(
239 const scoped_refptr<media::VideoFrame>& frame,
240 bool force_keyframe) {
241 DCHECK(encoder_thread_.task_runner()->BelongsToCurrentThread());
242 DCHECK(compression_session_);
243 DCHECK(frame);
244
245 // TODO(emircan): See if we can eliminate a copy here by using
246 // CVPixelBufferPool for the allocation of incoming VideoFrames.
247 base::ScopedCFTypeRef<CVPixelBufferRef> pixel_buffer =
248 media::WrapVideoFrameInCVPixelBuffer(*frame);
249 base::ScopedCFTypeRef<CFDictionaryRef> frame_props =
250 media::video_toolbox::DictionaryWithKeyValue(
251 videotoolbox_glue_->kVTEncodeFrameOptionKey_ForceKeyFrame(),
252 force_keyframe ? kCFBooleanTrue : kCFBooleanFalse);
253
254 base::TimeTicks ref_time;
255 if (!frame->metadata()->GetTimeTicks(
256 media::VideoFrameMetadata::REFERENCE_TIME, &ref_time)) {
257 ref_time = base::TimeTicks::Now();
258 }
259 auto timestamp_cm = CoreMediaGlue::CMTimeMake(
260 frame->timestamp().InMicroseconds(), USEC_PER_SEC);
261 // Wrap information we'll need after the frame is encoded in a heap object.
262 // We'll get the pointer back from the VideoToolbox completion callback.
263 scoped_ptr<InProgressFrameEncode> request(new InProgressFrameEncode(
264 frame->timestamp(), ref_time));
265
266 OSStatus status = videotoolbox_glue_->VTCompressionSessionEncodeFrame(
267 compression_session_, pixel_buffer, timestamp_cm,
268 CoreMediaGlue::CMTime{0, 0, 0, 0}, frame_props,
269 reinterpret_cast<void*>(request.release()), nullptr);
196 if (status != noErr) { 270 if (status != noErr) {
197 DLOG(ERROR) << " CMBlockBufferGetDataPointer failed: " << status; 271 DLOG(ERROR) << " VTCompressionSessionEncodeFrame failed: " << status;
198 return; 272 NotifyError(kPlatformFailureError);
199 } 273 }
200 274 }
201 if (nal_size_field_bytes == 1) { 275
202 CopyNalsToAnnexB<uint8_t>(bb_data, bb_size, annexb_buffer); 276 void VTVideoEncodeAccelerator::UseOutputBitstreamBufferTask(
203 } else if (nal_size_field_bytes == 2) { 277 scoped_ptr<BitstreamBufferRef> buffer_ref) {
204 CopyNalsToAnnexB<uint16_t>(bb_data, bb_size, annexb_buffer); 278 DCHECK(encoder_thread_.task_runner()->BelongsToCurrentThread());
205 } else if (nal_size_field_bytes == 4) { 279
206 CopyNalsToAnnexB<uint32_t>(bb_data, bb_size, annexb_buffer); 280 // If there is already EncodeOutput waiting, copy its output first.
207 } else { 281 if (!encoder_output_queue_.empty()) {
208 NOTREACHED(); 282 scoped_ptr<VTVideoEncodeAccelerator::EncodeOutput> encode_output =
209 } 283 std::move(encoder_output_queue_.front());
210 } 284 encoder_output_queue_.pop_front();
211 285 ReturnBitstreamBuffer(encode_output->info, encode_output->sample_buffer,
212 } // namespace 286 std::move(buffer_ref));
213 287 return;
214 class H264VideoToolboxEncoder::VideoFrameFactoryImpl 288 }
215 : public base::RefCountedThreadSafe<VideoFrameFactoryImpl>, 289
216 public VideoFrameFactory { 290 bitstream_buffer_queue_.push_back(std::move(buffer_ref));
217 public: 291 }
218 // Type that proxies the VideoFrameFactory interface to this class. 292
219 class Proxy; 293 void VTVideoEncodeAccelerator::DestroyTask() {
220 294 // This thread runs on |encoder_thread_| if it is alive, otherwise on GPU
221 VideoFrameFactoryImpl(const base::WeakPtr<H264VideoToolboxEncoder>& encoder, 295 // child thread.
222 const scoped_refptr<CastEnvironment>& cast_environment) 296
223 : encoder_(encoder), cast_environment_(cast_environment) {} 297 // Cancel all callbacks.
224 298 weak_this_factory_.InvalidateWeakPtrs();
225 scoped_refptr<VideoFrame> MaybeCreateFrame( 299
226 const gfx::Size& frame_size, 300 // This call blocks until all pending frames are flushed out.
227 base::TimeDelta timestamp) final { 301 DestroyCompressionSession();
228 if (frame_size.IsEmpty()) { 302 }
229 DVLOG(1) << "Rejecting empty video frame."; 303
230 return nullptr; 304 void VTVideoEncodeAccelerator::NotifyError(
231 } 305 media::VideoEncodeAccelerator::Error error) {
232 306 DCHECK(encoder_thread_.task_runner()->BelongsToCurrentThread());
233 base::AutoLock auto_lock(lock_); 307 client_task_runner_->PostTask(
234 308 FROM_HERE, base::Bind(&Client::NotifyError, client_, error));
235 // If the pool size does not match, speculatively reset the encoder to use 309 }
236 // the new size and return null. Cache the new frame size right away and
237 // toss away the pixel buffer pool to avoid spurious tasks until the encoder
238 // is done resetting.
239 if (frame_size != pool_frame_size_) {
240 DVLOG(1) << "MaybeCreateFrame: Detected frame size change.";
241 cast_environment_->PostTask(
242 CastEnvironment::MAIN, FROM_HERE,
243 base::Bind(&H264VideoToolboxEncoder::UpdateFrameSize, encoder_,
244 frame_size));
245 pool_frame_size_ = frame_size;
246 pool_.reset();
247 return nullptr;
248 }
249
250 if (!pool_) {
251 DVLOG(1) << "MaybeCreateFrame: No pixel buffer pool.";
252 return nullptr;
253 }
254
255 // Allocate a pixel buffer from the pool and return a wrapper VideoFrame.
256 base::ScopedCFTypeRef<CVPixelBufferRef> buffer;
257 auto status = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pool_,
258 buffer.InitializeInto());
259 if (status != kCVReturnSuccess) {
260 DLOG(ERROR) << "CVPixelBufferPoolCreatePixelBuffer failed: " << status;
261 return nullptr;
262 }
263
264 DCHECK(buffer);
265 return VideoFrame::WrapCVPixelBuffer(buffer, timestamp);
266 }
267
268 void Update(const base::ScopedCFTypeRef<CVPixelBufferPoolRef>& pool,
269 const gfx::Size& frame_size) {
270 base::AutoLock auto_lock(lock_);
271 pool_ = pool;
272 pool_frame_size_ = frame_size;
273 }
274
275 private:
276 friend class base::RefCountedThreadSafe<VideoFrameFactoryImpl>;
277 ~VideoFrameFactoryImpl() final {}
278
279 base::Lock lock_;
280 base::ScopedCFTypeRef<CVPixelBufferPoolRef> pool_;
281 gfx::Size pool_frame_size_;
282
283 // Weak back reference to the encoder and the cast envrionment so we can
284 // message the encoder when the frame size changes.
285 const base::WeakPtr<H264VideoToolboxEncoder> encoder_;
286 const scoped_refptr<CastEnvironment> cast_environment_;
287
288 DISALLOW_COPY_AND_ASSIGN(VideoFrameFactoryImpl);
289 };
290
291 class H264VideoToolboxEncoder::VideoFrameFactoryImpl::Proxy
292 : public VideoFrameFactory {
293 public:
294 explicit Proxy(
295 const scoped_refptr<VideoFrameFactoryImpl>& video_frame_factory)
296 : video_frame_factory_(video_frame_factory) {
297 DCHECK(video_frame_factory_);
298 }
299
300 scoped_refptr<VideoFrame> MaybeCreateFrame(
301 const gfx::Size& frame_size,
302 base::TimeDelta timestamp) final {
303 return video_frame_factory_->MaybeCreateFrame(frame_size, timestamp);
304 }
305
306 private:
307 ~Proxy() final {}
308
309 const scoped_refptr<VideoFrameFactoryImpl> video_frame_factory_;
310
311 DISALLOW_COPY_AND_ASSIGN(Proxy);
312 };
313 310
314 // static 311 // static
315 bool H264VideoToolboxEncoder::IsSupported( 312 void VTVideoEncodeAccelerator::CompressionCallback(void* encoder_opaque,
316 const VideoSenderConfig& video_config) { 313 void* request_opaque,
317 return video_config.codec == CODEC_VIDEO_H264 && VideoToolboxGlue::Get(); 314 OSStatus status,
318 } 315 VTEncodeInfoFlags info,
319 316 CMSampleBufferRef sbuf) {
320 H264VideoToolboxEncoder::H264VideoToolboxEncoder( 317 // This function may be called asynchronously, on a different thread from the
321 const scoped_refptr<CastEnvironment>& cast_environment, 318 // one that calls VTCompressionSessionEncodeFrame.
322 const VideoSenderConfig& video_config, 319 DVLOG(3) << __FUNCTION__;
323 const StatusChangeCallback& status_change_cb) 320
324 : cast_environment_(cast_environment), 321 auto encoder = reinterpret_cast<VTVideoEncodeAccelerator*>(encoder_opaque);
325 videotoolbox_glue_(VideoToolboxGlue::Get()), 322 DCHECK(encoder);
326 video_config_(video_config), 323
327 status_change_cb_(status_change_cb), 324 // Release InProgressFrameEncode, since we don't have support to return
328 last_frame_id_(kFirstFrameId - 1), 325 // timestamps at this point.
329 encode_next_frame_as_keyframe_(false), 326 scoped_ptr<InProgressFrameEncode> request(
330 power_suspended_(false), 327 reinterpret_cast<InProgressFrameEncode*>(request_opaque));
331 weak_factory_(this) { 328 request.reset();
332 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); 329
333 DCHECK(!status_change_cb_.is_null()); 330 // CFRetain is required to hold onto CMSampleBufferRef when posting task
334 331 // between threads. The object should be released later using CFRelease.
335 OperationalStatus operational_status = 332 CFRetain(sbuf);
336 H264VideoToolboxEncoder::IsSupported(video_config) 333
337 ? STATUS_INITIALIZED 334 // This method is NOT called on |encoder_thread_|, so we still need to
338 : STATUS_UNSUPPORTED_CODEC; 335 // post a task back to it to do work.
339 cast_environment_->PostTask( 336 encoder->encoder_thread_.task_runner()->PostTask(
340 CastEnvironment::MAIN, FROM_HERE, 337 FROM_HERE,
341 base::Bind(status_change_cb_, operational_status)); 338 base::Bind(&VTVideoEncodeAccelerator::CompressionCallbackTask,
342 339 encoder->weak_this_factory_.GetWeakPtr(), status, info, sbuf));
jfroy 2016/03/07 21:57:07 If this turns out to be weak, sbuf gets leaked. I'
emircan 2016/03/08 03:02:30 Good catch. I will defer the lifetime of this obje
343 if (operational_status == STATUS_INITIALIZED) { 340 }
344 // Create the shared video frame factory. It persists for the combined 341
345 // lifetime of the encoder and all video frame factory proxies created by 342 void VTVideoEncodeAccelerator::CompressionCallbackTask(OSStatus status,
346 // |CreateVideoFrameFactory| that reference it. 343 VTEncodeInfoFlags info,
347 video_frame_factory_ = 344 CMSampleBufferRef sbuf) {
jfroy 2016/03/07 21:57:07 Document that sbuf is retained and needs to be rel
emircan 2016/03/08 03:02:30 I defer the lifetime of this object to struct Enco
348 scoped_refptr<VideoFrameFactoryImpl>(new VideoFrameFactoryImpl( 345 DCHECK(encoder_thread_.task_runner()->BelongsToCurrentThread());
349 weak_factory_.GetWeakPtr(), cast_environment_)); 346
350 347 if (status != noErr) {
351 // Register for power state changes. 348 DLOG(ERROR) << " encode failed: " << status;
352 auto power_monitor = base::PowerMonitor::Get(); 349 NotifyError(kPlatformFailureError);
353 if (power_monitor) { 350 return;
354 power_monitor->AddObserver(this); 351 }
355 VLOG(1) << "Registered for power state changes."; 352
356 } else { 353 // If there isn't any BitstreamBuffer to copy into, add it to a queue for
357 DLOG(WARNING) << "No power monitor. Process suspension will invalidate " 354 // later use.
358 "the encoder."; 355 if (bitstream_buffer_queue_.empty()) {
359 } 356 scoped_ptr<EncodeOutput> encode_output(new EncodeOutput(info, sbuf));
360 } 357 encoder_output_queue_.push_back(std::move(encode_output));
361 } 358 return;
362 359 }
363 H264VideoToolboxEncoder::~H264VideoToolboxEncoder() { 360
361 scoped_ptr<VTVideoEncodeAccelerator::BitstreamBufferRef> buffer_ref =
362 std::move(bitstream_buffer_queue_.front());
363 bitstream_buffer_queue_.pop_front();
364 ReturnBitstreamBuffer(info, sbuf, std::move(buffer_ref));
365 }
366
367 void VTVideoEncodeAccelerator::ReturnBitstreamBuffer(
368 VTEncodeInfoFlags info,
369 CMSampleBufferRef sbuf,
jfroy 2016/03/07 21:57:07 Document that sbuf is retained and needs to be rel
emircan 2016/03/08 03:02:30 sbuf will go out of scope when struct EncodeOutput
370 scoped_ptr<VTVideoEncodeAccelerator::BitstreamBufferRef> buffer_ref) {
371 DVLOG(3) << __FUNCTION__;
372 DCHECK(encoder_thread_.task_runner()->BelongsToCurrentThread());
373
374 if (info & VideoToolboxGlue::kVTEncodeInfo_FrameDropped) {
375 DVLOG(2) << " frame dropped";
376 CFRelease(sbuf);
377 client_task_runner_->PostTask(
378 FROM_HERE, base::Bind(&Client::BitstreamBufferReady, client_,
379 buffer_ref->id, 0, false));
380 return;
381 }
382
383 auto sample_attachments = static_cast<CFDictionaryRef>(CFArrayGetValueAtIndex(
384 CoreMediaGlue::CMSampleBufferGetSampleAttachmentsArray(sbuf, true), 0));
385 const bool keyframe =
386 !CFDictionaryContainsKey(sample_attachments,
387 CoreMediaGlue::kCMSampleAttachmentKey_NotSync());
388
389 size_t used_buffer_size = 0;
390 const bool copy_rv = media::video_toolbox::CopySampleBufferToAnnexBBuffer(
391 sbuf, keyframe, buffer_ref->size,
392 reinterpret_cast<char*>(buffer_ref->shm->memory()), &used_buffer_size);
393 CFRelease(sbuf);
394 if (!copy_rv) {
395 DLOG(ERROR) << "Cannot copy output from SampleBuffer to AnnexBBuffer.";
396 used_buffer_size = 0;
397 }
398
399 client_task_runner_->PostTask(
400 FROM_HERE, base::Bind(&Client::BitstreamBufferReady, client_,
401 buffer_ref->id, used_buffer_size, keyframe));
402 }
403
404 bool VTVideoEncodeAccelerator::ResetCompressionSession() {
405 DCHECK(thread_checker_.CalledOnValidThread());
406
364 DestroyCompressionSession(); 407 DestroyCompressionSession();
365 408
366 // If video_frame_factory_ is not null, the encoder registered for power state 409 CFTypeRef attributes_keys[] = {
367 // changes in the ctor and it must now unregister. 410 kCVPixelBufferOpenGLCompatibilityKey,
368 if (video_frame_factory_) { 411 kCVPixelBufferIOSurfacePropertiesKey,
369 auto power_monitor = base::PowerMonitor::Get(); 412 kCVPixelBufferPixelFormatTypeKey
370 if (power_monitor) 413 };
371 power_monitor->RemoveObserver(this);
372 }
373 }
374
375 void H264VideoToolboxEncoder::ResetCompressionSession() {
376 DCHECK(thread_checker_.CalledOnValidThread());
377
378 // Ignore reset requests while power suspended.
379 if (power_suspended_)
380 return;
381
382 // Notify that we're resetting the encoder.
383 cast_environment_->PostTask(
384 CastEnvironment::MAIN, FROM_HERE,
385 base::Bind(status_change_cb_, STATUS_CODEC_REINIT_PENDING));
386
387 // Destroy the current session, if any.
388 DestroyCompressionSession();
389
390 // On OS X, allow the hardware encoder. Don't require it, it does not support
391 // all configurations (some of which are used for testing).
392 base::ScopedCFTypeRef<CFDictionaryRef> encoder_spec;
393 #if !defined(OS_IOS)
394 encoder_spec = DictionaryWithKeyValue(
395 videotoolbox_glue_
396 ->kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder() ,
397 kCFBooleanTrue);
398 #endif
399
400 // Force 420v so that clients can easily use these buffers as GPU textures.
401 const int format[] = { 414 const int format[] = {
402 CoreVideoGlue::kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange}; 415 CoreVideoGlue::kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange};
403 416 CFTypeRef attributes_values[] = {
404 // Keep these attachment settings in-sync with those in ConfigureSession(). 417 kCFBooleanTrue,
405 CFTypeRef attachments_keys[] = {kCVImageBufferColorPrimariesKey, 418 media::video_toolbox::DictionaryWithKeysAndValues(nullptr, nullptr, 0)
406 kCVImageBufferTransferFunctionKey, 419 .release(),
407 kCVImageBufferYCbCrMatrixKey}; 420 media::video_toolbox::ArrayWithIntegers(format, arraysize(format))
408 CFTypeRef attachments_values[] = {kCVImageBufferColorPrimaries_ITU_R_709_2, 421 .release()};
409 kCVImageBufferTransferFunction_ITU_R_709_2, 422 const base::ScopedCFTypeRef<CFDictionaryRef> attributes =
410 kCVImageBufferYCbCrMatrix_ITU_R_709_2}; 423 media::video_toolbox::DictionaryWithKeysAndValues(
411 CFTypeRef buffer_attributes_keys[] = {kCVPixelBufferPixelFormatTypeKey, 424 attributes_keys, attributes_values, arraysize(attributes_keys));
412 kCVBufferPropagatedAttachmentsKey}; 425 for (auto& v : attributes_values)
413 CFTypeRef buffer_attributes_values[] = {
414 ArrayWithIntegers(format, arraysize(format)).release(),
415 DictionaryWithKeysAndValues(attachments_keys, attachments_values,
416 arraysize(attachments_keys)).release()};
417 const base::ScopedCFTypeRef<CFDictionaryRef> buffer_attributes =
418 DictionaryWithKeysAndValues(buffer_attributes_keys,
419 buffer_attributes_values,
420 arraysize(buffer_attributes_keys));
421 for (auto& v : buffer_attributes_values)
422 CFRelease(v); 426 CFRelease(v);
423 427
428 bool session_rv = CreateCompressionSession(attributes, true);
429 if (!session_rv) {
430 // Try creating session again without forcing HW encode.
431 DestroyCompressionSession();
432 session_rv = CreateCompressionSession(attributes, false);
433 if (!session_rv) {
434 DestroyCompressionSession();
435 return false;
436 }
437 }
438
439 const bool configure_rv = ConfigureCompressionSession();
440 RequestEncodingParametersChange(target_bitrate_, frame_rate_);
441 return configure_rv;
442 }
443
444 bool VTVideoEncodeAccelerator::CreateCompressionSession(
445 base::ScopedCFTypeRef<CFDictionaryRef> attributes,
446 bool require_hw_encoding) {
447 DCHECK(thread_checker_.CalledOnValidThread());
448
449 std::vector<CFTypeRef> encoder_keys;
450 std::vector<CFTypeRef> encoder_values;
451 encoder_keys.push_back(videotoolbox_glue_
452 ->kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder());
453 encoder_values.push_back(kCFBooleanTrue);
454
455 if (require_hw_encoding) {
456 encoder_keys.push_back(
457 videotoolbox_glue_
458 ->kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder());
459 encoder_values.push_back(kCFBooleanTrue);
460 }
461 base::ScopedCFTypeRef<CFDictionaryRef> encoder_spec =
462 media::video_toolbox::DictionaryWithKeysAndValues(
463 encoder_keys.data(), encoder_values.data(), encoder_keys.size());
464
424 // Create the compression session. 465 // Create the compression session.
425
426 // Note that the encoder object is given to the compression session as the 466 // Note that the encoder object is given to the compression session as the
427 // callback context using a raw pointer. The C API does not allow us to use a 467 // callback context using a raw pointer. The C API does not allow us to use a
428 // smart pointer, nor is this encoder ref counted. However, this is still 468 // smart pointer, nor is this encoder ref counted. However, this is still
429 // safe, because we 1) we own the compression session and 2) we tear it down 469 // safe, because we 1) we own the compression session and 2) we tear it down
430 // safely. When destructing the encoder, the compression session is flushed 470 // safely. When destructing the encoder, the compression session is flushed
431 // and invalidated. Internally, VideoToolbox will join all of its threads 471 // and invalidated. Internally, VideoToolbox will join all of its threads
432 // before returning to the client. Therefore, when control returns to us, we 472 // before returning to the client. Therefore, when control returns to us, we
433 // are guaranteed that the output callback will not execute again. 473 // are guaranteed that the output callback will not execute again.
434 OSStatus status = videotoolbox_glue_->VTCompressionSessionCreate( 474 OSStatus status = videotoolbox_glue_->VTCompressionSessionCreate(
435 kCFAllocatorDefault, frame_size_.width(), frame_size_.height(), 475 kCFAllocatorDefault,
436 CoreMediaGlue::kCMVideoCodecType_H264, encoder_spec, buffer_attributes, 476 input_visible_size_.width(),
477 input_visible_size_.height(),
478 CoreMediaGlue::kCMVideoCodecType_H264,
479 encoder_spec,
480 attributes,
437 nullptr /* compressedDataAllocator */, 481 nullptr /* compressedDataAllocator */,
438 &H264VideoToolboxEncoder::CompressionCallback, 482 &VTVideoEncodeAccelerator::CompressionCallback,
439 reinterpret_cast<void*>(this), compression_session_.InitializeInto()); 483 reinterpret_cast<void*>(this),
484 compression_session_.InitializeInto());
440 if (status != noErr) { 485 if (status != noErr) {
441 DLOG(ERROR) << " VTCompressionSessionCreate failed: " << status; 486 DLOG(ERROR) << " VTCompressionSessionCreate failed: " << status;
442 // Notify that reinitialization has failed. 487 return false;
443 cast_environment_->PostTask( 488 }
444 CastEnvironment::MAIN, FROM_HERE, 489 DVLOG(3) << " VTCompressionSession created with HW encode: "
445 base::Bind(status_change_cb_, STATUS_CODEC_INIT_FAILED)); 490 << require_hw_encoding;
446 return; 491 return true;
447 } 492 }
448 493
449 // Configure the session (apply session properties based on the current state 494 bool VTVideoEncodeAccelerator::ConfigureCompressionSession() {
450 // of the encoder, experimental tuning and requirements). 495 DCHECK(thread_checker_.CalledOnValidThread());
451 ConfigureCompressionSession(); 496 DCHECK(compression_session_);
452 497
453 // Update the video frame factory. 498 media::video_toolbox::SessionPropertySetter session_property_setter(
454 base::ScopedCFTypeRef<CVPixelBufferPoolRef> pool( 499 compression_session_, videotoolbox_glue_);
455 videotoolbox_glue_->VTCompressionSessionGetPixelBufferPool( 500 bool rv = true;
456 compression_session_), 501 rv &= session_property_setter.SetSessionProperty(
457 base::scoped_policy::RETAIN);
458 video_frame_factory_->Update(pool, frame_size_);
459
460 // Notify that reinitialization is done.
461 cast_environment_->PostTask(
462 CastEnvironment::MAIN, FROM_HERE,
463 base::Bind(status_change_cb_, STATUS_INITIALIZED));
464 }
465
466 void H264VideoToolboxEncoder::ConfigureCompressionSession() {
467 SetSessionProperty(
468 videotoolbox_glue_->kVTCompressionPropertyKey_ProfileLevel(), 502 videotoolbox_glue_->kVTCompressionPropertyKey_ProfileLevel(),
469 videotoolbox_glue_->kVTProfileLevel_H264_Main_AutoLevel()); 503 videotoolbox_glue_->kVTProfileLevel_H264_Baseline_AutoLevel());
470 SetSessionProperty(videotoolbox_glue_->kVTCompressionPropertyKey_RealTime(), 504 rv &= session_property_setter.SetSessionProperty(
471 true); 505 videotoolbox_glue_->kVTCompressionPropertyKey_RealTime(), true);
472 SetSessionProperty( 506 rv &= session_property_setter.SetSessionProperty(
473 videotoolbox_glue_->kVTCompressionPropertyKey_AllowFrameReordering(), 507 videotoolbox_glue_->kVTCompressionPropertyKey_AllowFrameReordering(),
474 false); 508 false);
475 SetSessionProperty( 509 DLOG_IF(ERROR, !rv) << " SetSessionProperty failed.";
476 videotoolbox_glue_->kVTCompressionPropertyKey_MaxKeyFrameInterval(), 240); 510 return rv;
477 SetSessionProperty( 511 }
478 videotoolbox_glue_ 512
479 ->kVTCompressionPropertyKey_MaxKeyFrameIntervalDuration(), 513 void VTVideoEncodeAccelerator::DestroyCompressionSession() {
480 240); 514 // This method may be called on |encoder thread| or GPU child thread.
481 // TODO(jfroy): implement better bitrate control 515
482 // https://crbug.com/425352
483 SetSessionProperty(
484 videotoolbox_glue_->kVTCompressionPropertyKey_AverageBitRate(),
485 (video_config_.min_bitrate + video_config_.max_bitrate) / 2);
486 SetSessionProperty(
487 videotoolbox_glue_->kVTCompressionPropertyKey_ExpectedFrameRate(),
488 video_config_.max_frame_rate);
489 // Keep these attachment settings in-sync with those in Initialize().
490 SetSessionProperty(
491 videotoolbox_glue_->kVTCompressionPropertyKey_ColorPrimaries(),
492 kCVImageBufferColorPrimaries_ITU_R_709_2);
493 SetSessionProperty(
494 videotoolbox_glue_->kVTCompressionPropertyKey_TransferFunction(),
495 kCVImageBufferTransferFunction_ITU_R_709_2);
496 SetSessionProperty(
497 videotoolbox_glue_->kVTCompressionPropertyKey_YCbCrMatrix(),
498 kCVImageBufferYCbCrMatrix_ITU_R_709_2);
499 if (video_config_.max_number_of_video_buffers_used > 0) {
500 SetSessionProperty(
501 videotoolbox_glue_->kVTCompressionPropertyKey_MaxFrameDelayCount(),
502 video_config_.max_number_of_video_buffers_used);
503 }
504 }
505
506 void H264VideoToolboxEncoder::DestroyCompressionSession() {
507 DCHECK(thread_checker_.CalledOnValidThread());
508
509 // If the compression session exists, invalidate it. This blocks until all
510 // pending output callbacks have returned and any internal threads have
511 // joined, ensuring no output callback ever sees a dangling encoder pointer.
512 //
513 // Before destroying the compression session, the video frame factory's pool
514 // is updated to null so that no thread will produce new video frames via the
515 // factory until a new compression session is created. The current frame size
516 // is passed to prevent the video frame factory from posting |UpdateFrameSize|
517 // tasks. Indeed, |DestroyCompressionSession| is either called from
518 // |ResetCompressionSession|, in which case a new pool and frame size will be
519 // set, or from callsites that require that there be no compression session
520 // (ex: the dtor).
521 if (compression_session_) { 516 if (compression_session_) {
522 video_frame_factory_->Update(
523 base::ScopedCFTypeRef<CVPixelBufferPoolRef>(nullptr), frame_size_);
524 videotoolbox_glue_->VTCompressionSessionInvalidate(compression_session_); 517 videotoolbox_glue_->VTCompressionSessionInvalidate(compression_session_);
525 compression_session_.reset(); 518 compression_session_.reset();
526 } 519 }
527 } 520 }
528 521
529 bool H264VideoToolboxEncoder::EncodeVideoFrame( 522 } // namespace content
530 const scoped_refptr<media::VideoFrame>& video_frame,
531 const base::TimeTicks& reference_time,
532 const FrameEncodedCallback& frame_encoded_callback) {
533 DCHECK(thread_checker_.CalledOnValidThread());
534 DCHECK(!frame_encoded_callback.is_null());
535
536 // Reject empty video frames.
537 const gfx::Size frame_size = video_frame->visible_rect().size();
538 if (frame_size.IsEmpty()) {
539 DVLOG(1) << "Rejecting empty video frame.";
540 return false;
541 }
542
543 // Handle frame size changes. This will reset the compression session.
544 if (frame_size != frame_size_) {
545 DVLOG(1) << "EncodeVideoFrame: Detected frame size change.";
546 UpdateFrameSize(frame_size);
547 }
548
549 // Need a compression session to continue.
550 if (!compression_session_) {
551 DLOG(ERROR) << "No compression session.";
552 return false;
553 }
554
555 // Wrap the VideoFrame in a CVPixelBuffer. In all cases, no data will be
556 // copied. If the VideoFrame was created by this encoder's video frame
557 // factory, then the returned CVPixelBuffer will have been obtained from the
558 // compression session's pixel buffer pool. This will eliminate a copy of the
559 // frame into memory visible by the hardware encoder. The VideoFrame's
560 // lifetime is extended for the lifetime of the returned CVPixelBuffer.
561 auto pixel_buffer = media::WrapVideoFrameInCVPixelBuffer(*video_frame);
562 if (!pixel_buffer) {
563 DLOG(ERROR) << "WrapVideoFrameInCVPixelBuffer failed.";
564 return false;
565 }
566
567 // Convert the frame timestamp to CMTime.
568 auto timestamp_cm = CoreMediaGlue::CMTimeMake(
569 (reference_time - base::TimeTicks()).InMicroseconds(), USEC_PER_SEC);
570
571 // Wrap information we'll need after the frame is encoded in a heap object.
572 // We'll get the pointer back from the VideoToolbox completion callback.
573 scoped_ptr<InProgressFrameEncode> request(new InProgressFrameEncode(
574 RtpTimeTicks::FromTimeDelta(video_frame->timestamp(), kVideoFrequency),
575 reference_time, frame_encoded_callback));
576
577 // Build a suitable frame properties dictionary for keyframes.
578 base::ScopedCFTypeRef<CFDictionaryRef> frame_props;
579 if (encode_next_frame_as_keyframe_) {
580 frame_props = DictionaryWithKeyValue(
581 videotoolbox_glue_->kVTEncodeFrameOptionKey_ForceKeyFrame(),
582 kCFBooleanTrue);
583 encode_next_frame_as_keyframe_ = false;
584 }
585
586 // Submit the frame to the compression session. The function returns as soon
587 // as the frame has been enqueued.
588 OSStatus status = videotoolbox_glue_->VTCompressionSessionEncodeFrame(
589 compression_session_, pixel_buffer, timestamp_cm,
590 CoreMediaGlue::CMTime{0, 0, 0, 0}, frame_props,
591 reinterpret_cast<void*>(request.release()), nullptr);
592 if (status != noErr) {
593 DLOG(ERROR) << " VTCompressionSessionEncodeFrame failed: " << status;
594 return false;
595 }
596
597 return true;
598 }
599
600 void H264VideoToolboxEncoder::UpdateFrameSize(const gfx::Size& size_needed) {
601 DCHECK(thread_checker_.CalledOnValidThread());
602
603 // Our video frame factory posts a task to update the frame size when its
604 // cache of the frame size differs from what the client requested. To avoid
605 // spurious encoder resets, check again here.
606 if (size_needed == frame_size_) {
607 DCHECK(compression_session_);
608 return;
609 }
610
611 VLOG(1) << "Resetting compression session (for frame size change from "
612 << frame_size_.ToString() << " to " << size_needed.ToString() << ").";
613
614 // If there is an existing session, finish every pending frame.
615 if (compression_session_) {
616 EmitFrames();
617 }
618
619 // Store the new frame size.
620 frame_size_ = size_needed;
621
622 // Reset the compression session.
623 ResetCompressionSession();
624 }
625
626 void H264VideoToolboxEncoder::SetBitRate(int /*new_bit_rate*/) {
627 DCHECK(thread_checker_.CalledOnValidThread());
628 // VideoToolbox does not seem to support bitrate reconfiguration.
629 }
630
631 void H264VideoToolboxEncoder::GenerateKeyFrame() {
632 DCHECK(thread_checker_.CalledOnValidThread());
633 encode_next_frame_as_keyframe_ = true;
634 }
635
636 scoped_ptr<VideoFrameFactory>
637 H264VideoToolboxEncoder::CreateVideoFrameFactory() {
638 DCHECK(thread_checker_.CalledOnValidThread());
639 return scoped_ptr<VideoFrameFactory>(
640 new VideoFrameFactoryImpl::Proxy(video_frame_factory_));
641 }
642
643 void H264VideoToolboxEncoder::EmitFrames() {
644 DCHECK(thread_checker_.CalledOnValidThread());
645 if (!compression_session_)
646 return;
647
648 OSStatus status = videotoolbox_glue_->VTCompressionSessionCompleteFrames(
649 compression_session_, CoreMediaGlue::CMTime{0, 0, 0, 0});
650 if (status != noErr) {
651 DLOG(ERROR) << " VTCompressionSessionCompleteFrames failed: " << status;
652 }
653 }
654
655 void H264VideoToolboxEncoder::OnSuspend() {
656 VLOG(1)
657 << "OnSuspend: Emitting all frames and destroying compression session.";
658 EmitFrames();
659 DestroyCompressionSession();
660 power_suspended_ = true;
661 }
662
663 void H264VideoToolboxEncoder::OnResume() {
664 power_suspended_ = false;
665
666 // Reset the compression session only if the frame size is not zero (which
667 // will obviously fail). It is possible for the frame size to be zero if no
668 // frame was submitted for encoding or requested from the video frame factory
669 // before suspension.
670 if (!frame_size_.IsEmpty()) {
671 VLOG(1) << "OnResume: Resetting compression session.";
672 ResetCompressionSession();
673 }
674 }
675
676 bool H264VideoToolboxEncoder::SetSessionProperty(CFStringRef key,
677 int32_t value) {
678 base::ScopedCFTypeRef<CFNumberRef> cfvalue(
679 CFNumberCreate(nullptr, kCFNumberSInt32Type, &value));
680 return videotoolbox_glue_->VTSessionSetProperty(compression_session_, key,
681 cfvalue) == noErr;
682 }
683
684 bool H264VideoToolboxEncoder::SetSessionProperty(CFStringRef key, bool value) {
685 CFBooleanRef cfvalue = (value) ? kCFBooleanTrue : kCFBooleanFalse;
686 return videotoolbox_glue_->VTSessionSetProperty(compression_session_, key,
687 cfvalue) == noErr;
688 }
689
690 bool H264VideoToolboxEncoder::SetSessionProperty(CFStringRef key,
691 CFStringRef value) {
692 return videotoolbox_glue_->VTSessionSetProperty(compression_session_, key,
693 value) == noErr;
694 }
695
696 void H264VideoToolboxEncoder::CompressionCallback(void* encoder_opaque,
697 void* request_opaque,
698 OSStatus status,
699 VTEncodeInfoFlags info,
700 CMSampleBufferRef sbuf) {
701 auto encoder = reinterpret_cast<H264VideoToolboxEncoder*>(encoder_opaque);
702 const scoped_ptr<InProgressFrameEncode> request(
703 reinterpret_cast<InProgressFrameEncode*>(request_opaque));
704 bool keyframe = false;
705 bool has_frame_data = false;
706
707 if (status != noErr) {
708 DLOG(ERROR) << " encoding failed: " << status;
709 encoder->cast_environment_->PostTask(
710 CastEnvironment::MAIN, FROM_HERE,
711 base::Bind(encoder->status_change_cb_, STATUS_CODEC_RUNTIME_ERROR));
712 } else if ((info & VideoToolboxGlue::kVTEncodeInfo_FrameDropped)) {
713 DVLOG(2) << " frame dropped";
714 } else {
715 auto sample_attachments =
716 static_cast<CFDictionaryRef>(CFArrayGetValueAtIndex(
717 CoreMediaGlue::CMSampleBufferGetSampleAttachmentsArray(sbuf, true),
718 0));
719
720 // If the NotSync key is not present, it implies Sync, which indicates a
721 // keyframe (at least I think, VT documentation is, erm, sparse). Could
722 // alternatively use kCMSampleAttachmentKey_DependsOnOthers == false.
723 keyframe = !CFDictionaryContainsKey(
724 sample_attachments,
725 CoreMediaGlue::kCMSampleAttachmentKey_NotSync());
726 has_frame_data = true;
727 }
728
729 // Increment the encoder-scoped frame id and assign the new value to this
730 // frame. VideoToolbox calls the output callback serially, so this is safe.
731 const uint32_t frame_id = ++encoder->last_frame_id_;
732
733 scoped_ptr<SenderEncodedFrame> encoded_frame(new SenderEncodedFrame());
734 encoded_frame->frame_id = frame_id;
735 encoded_frame->reference_time = request->reference_time;
736 encoded_frame->rtp_timestamp = request->rtp_timestamp;
737 if (keyframe) {
738 encoded_frame->dependency = EncodedFrame::KEY;
739 encoded_frame->referenced_frame_id = frame_id;
740 } else {
741 encoded_frame->dependency = EncodedFrame::DEPENDENT;
742 // H.264 supports complex frame reference schemes (multiple reference
743 // frames, slice references, backward and forward references, etc). Cast
744 // doesn't support the concept of forward-referencing frame dependencies or
745 // multiple frame dependencies; so pretend that all frames are only
746 // decodable after their immediately preceding frame is decoded. This will
747 // ensure a Cast receiver only attempts to decode the frames sequentially
748 // and in order. Furthermore, the encoder is configured to never use forward
749 // references (see |kVTCompressionPropertyKey_AllowFrameReordering|). There
750 // is no way to prevent multiple reference frames.
751 encoded_frame->referenced_frame_id = frame_id - 1;
752 }
753
754 if (has_frame_data)
755 CopySampleBufferToAnnexBBuffer(sbuf, &encoded_frame->data, keyframe);
756
757 // TODO(miu): Compute and populate the |deadline_utilization| and
758 // |lossy_utilization| performance metrics in |encoded_frame|.
759
760 encoded_frame->encode_completion_time =
761 encoder->cast_environment_->Clock()->NowTicks();
762 encoder->cast_environment_->PostTask(
763 CastEnvironment::MAIN, FROM_HERE,
764 base::Bind(request->frame_encoded_callback,
765 base::Passed(&encoded_frame)));
766 }
767
768 } // namespace cast
769 } // namespace media
OLDNEW
« no previous file with comments | « content/common/gpu/media/vt_video_encode_accelerator_mac.h ('k') | content/content_common.gypi » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698