OLD | NEW |
| (Empty) |
1 // Copyright 2014 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "content/common/gpu/media/vt_video_decode_accelerator_mac.h" | |
6 | |
7 #include <CoreVideo/CoreVideo.h> | |
8 #include <OpenGL/CGLIOSurface.h> | |
9 #include <OpenGL/gl.h> | |
10 #include <stddef.h> | |
11 | |
12 #include <algorithm> | |
13 | |
14 #include "base/bind.h" | |
15 #include "base/logging.h" | |
16 #include "base/mac/mac_logging.h" | |
17 #include "base/macros.h" | |
18 #include "base/memory/ptr_util.h" | |
19 #include "base/metrics/histogram_macros.h" | |
20 #include "base/sys_byteorder.h" | |
21 #include "base/sys_info.h" | |
22 #include "base/thread_task_runner_handle.h" | |
23 #include "base/version.h" | |
24 #include "media/base/limits.h" | |
25 #include "ui/gl/gl_context.h" | |
26 #include "ui/gl/gl_image_io_surface.h" | |
27 #include "ui/gl/gl_implementation.h" | |
28 #include "ui/gl/scoped_binders.h" | |
29 | |
30 using content_common_gpu_media::kModuleVt; | |
31 using content_common_gpu_media::InitializeStubs; | |
32 using content_common_gpu_media::IsVtInitialized; | |
33 using content_common_gpu_media::StubPathMap; | |
34 | |
35 #define NOTIFY_STATUS(name, status, session_failure) \ | |
36 do { \ | |
37 OSSTATUS_DLOG(ERROR, status) << name; \ | |
38 NotifyError(PLATFORM_FAILURE, session_failure); \ | |
39 } while (0) | |
40 | |
41 namespace content { | |
42 | |
43 // Only H.264 with 4:2:0 chroma sampling is supported. | |
44 static const media::VideoCodecProfile kSupportedProfiles[] = { | |
45 media::H264PROFILE_BASELINE, | |
46 media::H264PROFILE_MAIN, | |
47 media::H264PROFILE_EXTENDED, | |
48 media::H264PROFILE_HIGH, | |
49 // TODO(hubbe): Try to re-enable this again somehow. Currently it seems | |
50 // that some codecs fail to check the profile during initialization and | |
51 // then fail on the first frame decode, which currently results in a | |
52 // pipeline failure. | |
53 // media::H264PROFILE_HIGH10PROFILE, | |
54 media::H264PROFILE_SCALABLEBASELINE, | |
55 media::H264PROFILE_SCALABLEHIGH, | |
56 media::H264PROFILE_STEREOHIGH, | |
57 media::H264PROFILE_MULTIVIEWHIGH, | |
58 }; | |
59 | |
60 // Size to use for NALU length headers in AVC format (can be 1, 2, or 4). | |
61 static const int kNALUHeaderLength = 4; | |
62 | |
63 // We request 5 picture buffers from the client, each of which has a texture ID | |
64 // that we can bind decoded frames to. We need enough to satisfy preroll, and | |
65 // enough to avoid unnecessary stalling, but no more than that. The resource | |
66 // requirements are low, as we don't need the textures to be backed by storage. | |
67 static const int kNumPictureBuffers = media::limits::kMaxVideoFrames + 1; | |
68 | |
69 // Maximum number of frames to queue for reordering before we stop asking for | |
70 // more. (NotifyEndOfBitstreamBuffer() is called when frames are moved into the | |
71 // reorder queue.) | |
72 static const int kMaxReorderQueueSize = 16; | |
73 | |
74 // Build an |image_config| dictionary for VideoToolbox initialization. | |
75 static base::ScopedCFTypeRef<CFMutableDictionaryRef> | |
76 BuildImageConfig(CMVideoDimensions coded_dimensions) { | |
77 base::ScopedCFTypeRef<CFMutableDictionaryRef> image_config; | |
78 | |
79 // Note that 4:2:0 textures cannot be used directly as RGBA in OpenGL, but are | |
80 // lower power than 4:2:2 when composited directly by CoreAnimation. | |
81 int32_t pixel_format = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange; | |
82 #define CFINT(i) CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &i) | |
83 base::ScopedCFTypeRef<CFNumberRef> cf_pixel_format(CFINT(pixel_format)); | |
84 base::ScopedCFTypeRef<CFNumberRef> cf_width(CFINT(coded_dimensions.width)); | |
85 base::ScopedCFTypeRef<CFNumberRef> cf_height(CFINT(coded_dimensions.height)); | |
86 #undef CFINT | |
87 if (!cf_pixel_format.get() || !cf_width.get() || !cf_height.get()) | |
88 return image_config; | |
89 | |
90 image_config.reset( | |
91 CFDictionaryCreateMutable( | |
92 kCFAllocatorDefault, | |
93 3, // capacity | |
94 &kCFTypeDictionaryKeyCallBacks, | |
95 &kCFTypeDictionaryValueCallBacks)); | |
96 if (!image_config.get()) | |
97 return image_config; | |
98 | |
99 CFDictionarySetValue(image_config, kCVPixelBufferPixelFormatTypeKey, | |
100 cf_pixel_format); | |
101 CFDictionarySetValue(image_config, kCVPixelBufferWidthKey, cf_width); | |
102 CFDictionarySetValue(image_config, kCVPixelBufferHeightKey, cf_height); | |
103 | |
104 return image_config; | |
105 } | |
106 | |
107 // Create a VTDecompressionSession using the provided |pps| and |sps|. If | |
108 // |require_hardware| is true, the session must uses real hardware decoding | |
109 // (as opposed to software decoding inside of VideoToolbox) to be considered | |
110 // successful. | |
111 // | |
112 // TODO(sandersd): Merge with ConfigureDecoder(), as the code is very similar. | |
113 static bool CreateVideoToolboxSession(const uint8_t* sps, size_t sps_size, | |
114 const uint8_t* pps, size_t pps_size, | |
115 bool require_hardware) { | |
116 const uint8_t* data_ptrs[] = {sps, pps}; | |
117 const size_t data_sizes[] = {sps_size, pps_size}; | |
118 | |
119 base::ScopedCFTypeRef<CMFormatDescriptionRef> format; | |
120 OSStatus status = CMVideoFormatDescriptionCreateFromH264ParameterSets( | |
121 kCFAllocatorDefault, | |
122 2, // parameter_set_count | |
123 data_ptrs, // ¶meter_set_pointers | |
124 data_sizes, // ¶meter_set_sizes | |
125 kNALUHeaderLength, // nal_unit_header_length | |
126 format.InitializeInto()); | |
127 if (status) { | |
128 OSSTATUS_DLOG(WARNING, status) | |
129 << "Failed to create CMVideoFormatDescription."; | |
130 return false; | |
131 } | |
132 | |
133 base::ScopedCFTypeRef<CFMutableDictionaryRef> decoder_config( | |
134 CFDictionaryCreateMutable( | |
135 kCFAllocatorDefault, | |
136 1, // capacity | |
137 &kCFTypeDictionaryKeyCallBacks, | |
138 &kCFTypeDictionaryValueCallBacks)); | |
139 if (!decoder_config.get()) | |
140 return false; | |
141 | |
142 if (require_hardware) { | |
143 CFDictionarySetValue( | |
144 decoder_config, | |
145 // kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder | |
146 CFSTR("RequireHardwareAcceleratedVideoDecoder"), | |
147 kCFBooleanTrue); | |
148 } | |
149 | |
150 base::ScopedCFTypeRef<CFMutableDictionaryRef> image_config( | |
151 BuildImageConfig(CMVideoFormatDescriptionGetDimensions(format))); | |
152 if (!image_config.get()) | |
153 return false; | |
154 | |
155 VTDecompressionOutputCallbackRecord callback = {0}; | |
156 | |
157 base::ScopedCFTypeRef<VTDecompressionSessionRef> session; | |
158 status = VTDecompressionSessionCreate( | |
159 kCFAllocatorDefault, | |
160 format, // video_format_description | |
161 decoder_config, // video_decoder_specification | |
162 image_config, // destination_image_buffer_attributes | |
163 &callback, // output_callback | |
164 session.InitializeInto()); | |
165 if (status) { | |
166 OSSTATUS_DLOG(WARNING, status) << "Failed to create VTDecompressionSession"; | |
167 return false; | |
168 } | |
169 | |
170 return true; | |
171 } | |
172 | |
173 // The purpose of this function is to preload the generic and hardware-specific | |
174 // libraries required by VideoToolbox before the GPU sandbox is enabled. | |
175 // VideoToolbox normally loads the hardware-specific libraries lazily, so we | |
176 // must actually create a decompression session. If creating a decompression | |
177 // session fails, hardware decoding will be disabled (Initialize() will always | |
178 // return false). | |
179 static bool InitializeVideoToolboxInternal() { | |
180 if (!IsVtInitialized()) { | |
181 // CoreVideo is also required, but the loader stops after the first path is | |
182 // loaded. Instead we rely on the transitive dependency from VideoToolbox to | |
183 // CoreVideo. | |
184 StubPathMap paths; | |
185 paths[kModuleVt].push_back(FILE_PATH_LITERAL( | |
186 "/System/Library/Frameworks/VideoToolbox.framework/VideoToolbox")); | |
187 if (!InitializeStubs(paths)) { | |
188 LOG(WARNING) << "Failed to initialize VideoToolbox framework. " | |
189 << "Hardware accelerated video decoding will be disabled."; | |
190 return false; | |
191 } | |
192 } | |
193 | |
194 // Create a hardware decoding session. | |
195 // SPS and PPS data are taken from a 480p sample (buck2.mp4). | |
196 const uint8_t sps_normal[] = {0x67, 0x64, 0x00, 0x1e, 0xac, 0xd9, 0x80, 0xd4, | |
197 0x3d, 0xa1, 0x00, 0x00, 0x03, 0x00, 0x01, 0x00, | |
198 0x00, 0x03, 0x00, 0x30, 0x8f, 0x16, 0x2d, 0x9a}; | |
199 const uint8_t pps_normal[] = {0x68, 0xe9, 0x7b, 0xcb}; | |
200 if (!CreateVideoToolboxSession(sps_normal, arraysize(sps_normal), pps_normal, | |
201 arraysize(pps_normal), true)) { | |
202 LOG(WARNING) << "Failed to create hardware VideoToolbox session. " | |
203 << "Hardware accelerated video decoding will be disabled."; | |
204 return false; | |
205 } | |
206 | |
207 // Create a software decoding session. | |
208 // SPS and PPS data are taken from a 18p sample (small2.mp4). | |
209 const uint8_t sps_small[] = {0x67, 0x64, 0x00, 0x0a, 0xac, 0xd9, 0x89, 0x7e, | |
210 0x22, 0x10, 0x00, 0x00, 0x3e, 0x90, 0x00, 0x0e, | |
211 0xa6, 0x08, 0xf1, 0x22, 0x59, 0xa0}; | |
212 const uint8_t pps_small[] = {0x68, 0xe9, 0x79, 0x72, 0xc0}; | |
213 if (!CreateVideoToolboxSession(sps_small, arraysize(sps_small), pps_small, | |
214 arraysize(pps_small), false)) { | |
215 LOG(WARNING) << "Failed to create software VideoToolbox session. " | |
216 << "Hardware accelerated video decoding will be disabled."; | |
217 return false; | |
218 } | |
219 | |
220 return true; | |
221 } | |
222 | |
223 bool InitializeVideoToolbox() { | |
224 // InitializeVideoToolbox() is called only from the GPU process main thread; | |
225 // once for sandbox warmup, and then once each time a VTVideoDecodeAccelerator | |
226 // is initialized. | |
227 static bool attempted = false; | |
228 static bool succeeded = false; | |
229 | |
230 if (!attempted) { | |
231 attempted = true; | |
232 succeeded = InitializeVideoToolboxInternal(); | |
233 } | |
234 | |
235 return succeeded; | |
236 } | |
237 | |
238 // Route decoded frame callbacks back into the VTVideoDecodeAccelerator. | |
239 static void OutputThunk( | |
240 void* decompression_output_refcon, | |
241 void* source_frame_refcon, | |
242 OSStatus status, | |
243 VTDecodeInfoFlags info_flags, | |
244 CVImageBufferRef image_buffer, | |
245 CMTime presentation_time_stamp, | |
246 CMTime presentation_duration) { | |
247 VTVideoDecodeAccelerator* vda = | |
248 reinterpret_cast<VTVideoDecodeAccelerator*>(decompression_output_refcon); | |
249 vda->Output(source_frame_refcon, status, image_buffer); | |
250 } | |
251 | |
252 VTVideoDecodeAccelerator::Task::Task(TaskType type) : type(type) { | |
253 } | |
254 | |
255 VTVideoDecodeAccelerator::Task::Task(const Task& other) = default; | |
256 | |
257 VTVideoDecodeAccelerator::Task::~Task() { | |
258 } | |
259 | |
260 VTVideoDecodeAccelerator::Frame::Frame(int32_t bitstream_id) | |
261 : bitstream_id(bitstream_id), | |
262 pic_order_cnt(0), | |
263 is_idr(false), | |
264 reorder_window(0) { | |
265 } | |
266 | |
267 VTVideoDecodeAccelerator::Frame::~Frame() { | |
268 } | |
269 | |
270 VTVideoDecodeAccelerator::PictureInfo::PictureInfo(uint32_t client_texture_id, | |
271 uint32_t service_texture_id) | |
272 : client_texture_id(client_texture_id), | |
273 service_texture_id(service_texture_id) {} | |
274 | |
275 VTVideoDecodeAccelerator::PictureInfo::~PictureInfo() { | |
276 if (gl_image) | |
277 gl_image->Destroy(false); | |
278 } | |
279 | |
280 bool VTVideoDecodeAccelerator::FrameOrder::operator()( | |
281 const linked_ptr<Frame>& lhs, | |
282 const linked_ptr<Frame>& rhs) const { | |
283 if (lhs->pic_order_cnt != rhs->pic_order_cnt) | |
284 return lhs->pic_order_cnt > rhs->pic_order_cnt; | |
285 // If |pic_order_cnt| is the same, fall back on using the bitstream order. | |
286 // TODO(sandersd): Assign a sequence number in Decode() and use that instead. | |
287 // TODO(sandersd): Using the sequence number, ensure that frames older than | |
288 // |kMaxReorderQueueSize| are ordered first, regardless of |pic_order_cnt|. | |
289 return lhs->bitstream_id > rhs->bitstream_id; | |
290 } | |
291 | |
292 VTVideoDecodeAccelerator::VTVideoDecodeAccelerator( | |
293 const MakeGLContextCurrentCallback& make_context_current_cb, | |
294 const BindGLImageCallback& bind_image_cb) | |
295 : make_context_current_cb_(make_context_current_cb), | |
296 bind_image_cb_(bind_image_cb), | |
297 client_(nullptr), | |
298 state_(STATE_DECODING), | |
299 format_(nullptr), | |
300 session_(nullptr), | |
301 last_sps_id_(-1), | |
302 last_pps_id_(-1), | |
303 config_changed_(false), | |
304 waiting_for_idr_(true), | |
305 missing_idr_logged_(false), | |
306 gpu_task_runner_(base::ThreadTaskRunnerHandle::Get()), | |
307 decoder_thread_("VTDecoderThread"), | |
308 weak_this_factory_(this) { | |
309 callback_.decompressionOutputCallback = OutputThunk; | |
310 callback_.decompressionOutputRefCon = this; | |
311 weak_this_ = weak_this_factory_.GetWeakPtr(); | |
312 } | |
313 | |
314 VTVideoDecodeAccelerator::~VTVideoDecodeAccelerator() { | |
315 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | |
316 } | |
317 | |
318 bool VTVideoDecodeAccelerator::Initialize(const Config& config, | |
319 Client* client) { | |
320 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | |
321 | |
322 if (make_context_current_cb_.is_null() || bind_image_cb_.is_null()) { | |
323 NOTREACHED() << "GL callbacks are required for this VDA"; | |
324 return false; | |
325 } | |
326 | |
327 if (config.is_encrypted) { | |
328 NOTREACHED() << "Encrypted streams are not supported for this VDA"; | |
329 return false; | |
330 } | |
331 | |
332 client_ = client; | |
333 | |
334 if (!InitializeVideoToolbox()) | |
335 return false; | |
336 | |
337 bool profile_supported = false; | |
338 for (const auto& supported_profile : kSupportedProfiles) { | |
339 if (config.profile == supported_profile) { | |
340 profile_supported = true; | |
341 break; | |
342 } | |
343 } | |
344 if (!profile_supported) | |
345 return false; | |
346 | |
347 // Spawn a thread to handle parsing and calling VideoToolbox. | |
348 if (!decoder_thread_.Start()) | |
349 return false; | |
350 | |
351 // Count the session as successfully initialized. | |
352 UMA_HISTOGRAM_ENUMERATION("Media.VTVDA.SessionFailureReason", | |
353 SFT_SUCCESSFULLY_INITIALIZED, | |
354 SFT_MAX + 1); | |
355 return true; | |
356 } | |
357 | |
358 bool VTVideoDecodeAccelerator::FinishDelayedFrames() { | |
359 DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); | |
360 if (session_) { | |
361 OSStatus status = VTDecompressionSessionWaitForAsynchronousFrames(session_); | |
362 if (status) { | |
363 NOTIFY_STATUS("VTDecompressionSessionWaitForAsynchronousFrames()", | |
364 status, SFT_PLATFORM_ERROR); | |
365 return false; | |
366 } | |
367 } | |
368 return true; | |
369 } | |
370 | |
371 bool VTVideoDecodeAccelerator::ConfigureDecoder() { | |
372 DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); | |
373 DCHECK(!last_sps_.empty()); | |
374 DCHECK(!last_pps_.empty()); | |
375 | |
376 // Build the configuration records. | |
377 std::vector<const uint8_t*> nalu_data_ptrs; | |
378 std::vector<size_t> nalu_data_sizes; | |
379 nalu_data_ptrs.reserve(3); | |
380 nalu_data_sizes.reserve(3); | |
381 nalu_data_ptrs.push_back(&last_sps_.front()); | |
382 nalu_data_sizes.push_back(last_sps_.size()); | |
383 if (!last_spsext_.empty()) { | |
384 nalu_data_ptrs.push_back(&last_spsext_.front()); | |
385 nalu_data_sizes.push_back(last_spsext_.size()); | |
386 } | |
387 nalu_data_ptrs.push_back(&last_pps_.front()); | |
388 nalu_data_sizes.push_back(last_pps_.size()); | |
389 | |
390 // Construct a new format description from the parameter sets. | |
391 format_.reset(); | |
392 OSStatus status = CMVideoFormatDescriptionCreateFromH264ParameterSets( | |
393 kCFAllocatorDefault, | |
394 nalu_data_ptrs.size(), // parameter_set_count | |
395 &nalu_data_ptrs.front(), // ¶meter_set_pointers | |
396 &nalu_data_sizes.front(), // ¶meter_set_sizes | |
397 kNALUHeaderLength, // nal_unit_header_length | |
398 format_.InitializeInto()); | |
399 if (status) { | |
400 NOTIFY_STATUS("CMVideoFormatDescriptionCreateFromH264ParameterSets()", | |
401 status, SFT_PLATFORM_ERROR); | |
402 return false; | |
403 } | |
404 | |
405 // Store the new configuration data. | |
406 // TODO(sandersd): Despite the documentation, this seems to return the visible | |
407 // size. However, the output always appears to be top-left aligned, so it | |
408 // makes no difference. Re-verify this and update the variable name. | |
409 CMVideoDimensions coded_dimensions = | |
410 CMVideoFormatDescriptionGetDimensions(format_); | |
411 coded_size_.SetSize(coded_dimensions.width, coded_dimensions.height); | |
412 | |
413 // Prepare VideoToolbox configuration dictionaries. | |
414 base::ScopedCFTypeRef<CFMutableDictionaryRef> decoder_config( | |
415 CFDictionaryCreateMutable( | |
416 kCFAllocatorDefault, | |
417 1, // capacity | |
418 &kCFTypeDictionaryKeyCallBacks, | |
419 &kCFTypeDictionaryValueCallBacks)); | |
420 if (!decoder_config.get()) { | |
421 DLOG(ERROR) << "Failed to create CFMutableDictionary."; | |
422 NotifyError(PLATFORM_FAILURE, SFT_PLATFORM_ERROR); | |
423 return false; | |
424 } | |
425 | |
426 CFDictionarySetValue( | |
427 decoder_config, | |
428 // kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder | |
429 CFSTR("EnableHardwareAcceleratedVideoDecoder"), | |
430 kCFBooleanTrue); | |
431 | |
432 base::ScopedCFTypeRef<CFMutableDictionaryRef> image_config( | |
433 BuildImageConfig(coded_dimensions)); | |
434 if (!image_config.get()) { | |
435 DLOG(ERROR) << "Failed to create decoder image configuration."; | |
436 NotifyError(PLATFORM_FAILURE, SFT_PLATFORM_ERROR); | |
437 return false; | |
438 } | |
439 | |
440 // Ensure that the old decoder emits all frames before the new decoder can | |
441 // emit any. | |
442 if (!FinishDelayedFrames()) | |
443 return false; | |
444 | |
445 session_.reset(); | |
446 status = VTDecompressionSessionCreate( | |
447 kCFAllocatorDefault, | |
448 format_, // video_format_description | |
449 decoder_config, // video_decoder_specification | |
450 image_config, // destination_image_buffer_attributes | |
451 &callback_, // output_callback | |
452 session_.InitializeInto()); | |
453 if (status) { | |
454 NOTIFY_STATUS("VTDecompressionSessionCreate()", status, | |
455 SFT_UNSUPPORTED_STREAM_PARAMETERS); | |
456 return false; | |
457 } | |
458 | |
459 // Report whether hardware decode is being used. | |
460 bool using_hardware = false; | |
461 base::ScopedCFTypeRef<CFBooleanRef> cf_using_hardware; | |
462 if (VTSessionCopyProperty( | |
463 session_, | |
464 // kVTDecompressionPropertyKey_UsingHardwareAcceleratedVideoDecoder | |
465 CFSTR("UsingHardwareAcceleratedVideoDecoder"), | |
466 kCFAllocatorDefault, | |
467 cf_using_hardware.InitializeInto()) == 0) { | |
468 using_hardware = CFBooleanGetValue(cf_using_hardware); | |
469 } | |
470 UMA_HISTOGRAM_BOOLEAN("Media.VTVDA.HardwareAccelerated", using_hardware); | |
471 | |
472 return true; | |
473 } | |
474 | |
475 void VTVideoDecodeAccelerator::DecodeTask( | |
476 const media::BitstreamBuffer& bitstream, | |
477 Frame* frame) { | |
478 DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); | |
479 | |
480 // Map the bitstream buffer. | |
481 base::SharedMemory memory(bitstream.handle(), true); | |
482 size_t size = bitstream.size(); | |
483 if (!memory.Map(size)) { | |
484 DLOG(ERROR) << "Failed to map bitstream buffer"; | |
485 NotifyError(PLATFORM_FAILURE, SFT_PLATFORM_ERROR); | |
486 return; | |
487 } | |
488 const uint8_t* buf = static_cast<uint8_t*>(memory.memory()); | |
489 | |
490 // NALUs are stored with Annex B format in the bitstream buffer (start codes), | |
491 // but VideoToolbox expects AVC format (length headers), so we must rewrite | |
492 // the data. | |
493 // | |
494 // Locate relevant NALUs and compute the size of the rewritten data. Also | |
495 // record any parameter sets for VideoToolbox initialization. | |
496 std::vector<uint8_t> sps; | |
497 std::vector<uint8_t> spsext; | |
498 std::vector<uint8_t> pps; | |
499 bool has_slice = false; | |
500 size_t data_size = 0; | |
501 std::vector<media::H264NALU> nalus; | |
502 parser_.SetStream(buf, size); | |
503 media::H264NALU nalu; | |
504 while (true) { | |
505 media::H264Parser::Result result = parser_.AdvanceToNextNALU(&nalu); | |
506 if (result == media::H264Parser::kEOStream) | |
507 break; | |
508 if (result == media::H264Parser::kUnsupportedStream) { | |
509 DLOG(ERROR) << "Unsupported H.264 stream"; | |
510 NotifyError(PLATFORM_FAILURE, SFT_UNSUPPORTED_STREAM); | |
511 return; | |
512 } | |
513 if (result != media::H264Parser::kOk) { | |
514 DLOG(ERROR) << "Failed to parse H.264 stream"; | |
515 NotifyError(UNREADABLE_INPUT, SFT_INVALID_STREAM); | |
516 return; | |
517 } | |
518 switch (nalu.nal_unit_type) { | |
519 case media::H264NALU::kSPS: | |
520 result = parser_.ParseSPS(&last_sps_id_); | |
521 if (result == media::H264Parser::kUnsupportedStream) { | |
522 DLOG(ERROR) << "Unsupported SPS"; | |
523 NotifyError(PLATFORM_FAILURE, SFT_UNSUPPORTED_STREAM); | |
524 return; | |
525 } | |
526 if (result != media::H264Parser::kOk) { | |
527 DLOG(ERROR) << "Could not parse SPS"; | |
528 NotifyError(UNREADABLE_INPUT, SFT_INVALID_STREAM); | |
529 return; | |
530 } | |
531 sps.assign(nalu.data, nalu.data + nalu.size); | |
532 spsext.clear(); | |
533 break; | |
534 | |
535 case media::H264NALU::kSPSExt: | |
536 // TODO(sandersd): Check that the previous NALU was an SPS. | |
537 spsext.assign(nalu.data, nalu.data + nalu.size); | |
538 break; | |
539 | |
540 case media::H264NALU::kPPS: | |
541 result = parser_.ParsePPS(&last_pps_id_); | |
542 if (result == media::H264Parser::kUnsupportedStream) { | |
543 DLOG(ERROR) << "Unsupported PPS"; | |
544 NotifyError(PLATFORM_FAILURE, SFT_UNSUPPORTED_STREAM); | |
545 return; | |
546 } | |
547 if (result != media::H264Parser::kOk) { | |
548 DLOG(ERROR) << "Could not parse PPS"; | |
549 NotifyError(UNREADABLE_INPUT, SFT_INVALID_STREAM); | |
550 return; | |
551 } | |
552 pps.assign(nalu.data, nalu.data + nalu.size); | |
553 break; | |
554 | |
555 case media::H264NALU::kSliceDataA: | |
556 case media::H264NALU::kSliceDataB: | |
557 case media::H264NALU::kSliceDataC: | |
558 case media::H264NALU::kNonIDRSlice: | |
559 case media::H264NALU::kIDRSlice: | |
560 // Compute the |pic_order_cnt| for the picture from the first slice. | |
561 if (!has_slice) { | |
562 // Verify that we are not trying to decode a slice without an IDR. | |
563 if (waiting_for_idr_) { | |
564 if (nalu.nal_unit_type == media::H264NALU::kIDRSlice) { | |
565 waiting_for_idr_ = false; | |
566 } else { | |
567 // We can't compute anything yet, bail on this frame. | |
568 has_slice = true; | |
569 break; | |
570 } | |
571 } | |
572 | |
573 media::H264SliceHeader slice_hdr; | |
574 result = parser_.ParseSliceHeader(nalu, &slice_hdr); | |
575 if (result == media::H264Parser::kUnsupportedStream) { | |
576 DLOG(ERROR) << "Unsupported slice header"; | |
577 NotifyError(PLATFORM_FAILURE, SFT_UNSUPPORTED_STREAM); | |
578 return; | |
579 } | |
580 if (result != media::H264Parser::kOk) { | |
581 DLOG(ERROR) << "Could not parse slice header"; | |
582 NotifyError(UNREADABLE_INPUT, SFT_INVALID_STREAM); | |
583 return; | |
584 } | |
585 | |
586 // TODO(sandersd): Maintain a cache of configurations and reconfigure | |
587 // when a slice references a new config. | |
588 DCHECK_EQ(slice_hdr.pic_parameter_set_id, last_pps_id_); | |
589 const media::H264PPS* pps = | |
590 parser_.GetPPS(slice_hdr.pic_parameter_set_id); | |
591 if (!pps) { | |
592 DLOG(ERROR) << "Mising PPS referenced by slice"; | |
593 NotifyError(UNREADABLE_INPUT, SFT_INVALID_STREAM); | |
594 return; | |
595 } | |
596 | |
597 DCHECK_EQ(pps->seq_parameter_set_id, last_sps_id_); | |
598 const media::H264SPS* sps = parser_.GetSPS(pps->seq_parameter_set_id); | |
599 if (!sps) { | |
600 DLOG(ERROR) << "Mising SPS referenced by PPS"; | |
601 NotifyError(UNREADABLE_INPUT, SFT_INVALID_STREAM); | |
602 return; | |
603 } | |
604 | |
605 if (!poc_.ComputePicOrderCnt(sps, slice_hdr, &frame->pic_order_cnt)) { | |
606 DLOG(ERROR) << "Unable to compute POC"; | |
607 NotifyError(UNREADABLE_INPUT, SFT_INVALID_STREAM); | |
608 return; | |
609 } | |
610 | |
611 if (nalu.nal_unit_type == media::H264NALU::kIDRSlice) | |
612 frame->is_idr = true; | |
613 | |
614 if (sps->vui_parameters_present_flag && | |
615 sps->bitstream_restriction_flag) { | |
616 frame->reorder_window = std::min(sps->max_num_reorder_frames, | |
617 kMaxReorderQueueSize - 1); | |
618 } | |
619 } | |
620 has_slice = true; | |
621 default: | |
622 nalus.push_back(nalu); | |
623 data_size += kNALUHeaderLength + nalu.size; | |
624 break; | |
625 } | |
626 } | |
627 | |
628 // Initialize VideoToolbox. | |
629 if (!sps.empty() && sps != last_sps_) { | |
630 last_sps_.swap(sps); | |
631 last_spsext_.swap(spsext); | |
632 config_changed_ = true; | |
633 } | |
634 if (!pps.empty() && pps != last_pps_) { | |
635 last_pps_.swap(pps); | |
636 config_changed_ = true; | |
637 } | |
638 if (config_changed_) { | |
639 // Only reconfigure at IDRs to avoid corruption. | |
640 if (frame->is_idr) { | |
641 config_changed_ = false; | |
642 | |
643 if (last_sps_.empty()) { | |
644 DLOG(ERROR) << "Invalid configuration; no SPS"; | |
645 NotifyError(INVALID_ARGUMENT, SFT_INVALID_STREAM); | |
646 return; | |
647 } | |
648 if (last_pps_.empty()) { | |
649 DLOG(ERROR) << "Invalid configuration; no PPS"; | |
650 NotifyError(INVALID_ARGUMENT, SFT_INVALID_STREAM); | |
651 return; | |
652 } | |
653 | |
654 // ConfigureDecoder() calls NotifyError() on failure. | |
655 if (!ConfigureDecoder()) | |
656 return; | |
657 } | |
658 } | |
659 | |
660 // If no IDR has been seen yet, skip decoding. | |
661 if (has_slice && (!session_ || waiting_for_idr_) && config_changed_) { | |
662 if (!missing_idr_logged_) { | |
663 LOG(ERROR) << "Illegal attempt to decode without IDR. " | |
664 << "Discarding decode requests until next IDR."; | |
665 missing_idr_logged_ = true; | |
666 } | |
667 has_slice = false; | |
668 } | |
669 | |
670 // If there is nothing to decode, drop the bitstream buffer by returning an | |
671 // empty frame. | |
672 if (!has_slice) { | |
673 // Keep everything in order by flushing first. | |
674 if (!FinishDelayedFrames()) | |
675 return; | |
676 gpu_task_runner_->PostTask(FROM_HERE, base::Bind( | |
677 &VTVideoDecodeAccelerator::DecodeDone, weak_this_, frame)); | |
678 return; | |
679 } | |
680 | |
681 // If the session is not configured by this point, fail. | |
682 if (!session_) { | |
683 DLOG(ERROR) << "Cannot decode without configuration"; | |
684 NotifyError(INVALID_ARGUMENT, SFT_INVALID_STREAM); | |
685 return; | |
686 } | |
687 | |
688 // Update the frame metadata with configuration data. | |
689 frame->coded_size = coded_size_; | |
690 | |
691 // Create a memory-backed CMBlockBuffer for the translated data. | |
692 // TODO(sandersd): Pool of memory blocks. | |
693 base::ScopedCFTypeRef<CMBlockBufferRef> data; | |
694 OSStatus status = CMBlockBufferCreateWithMemoryBlock( | |
695 kCFAllocatorDefault, | |
696 nullptr, // &memory_block | |
697 data_size, // block_length | |
698 kCFAllocatorDefault, // block_allocator | |
699 nullptr, // &custom_block_source | |
700 0, // offset_to_data | |
701 data_size, // data_length | |
702 0, // flags | |
703 data.InitializeInto()); | |
704 if (status) { | |
705 NOTIFY_STATUS("CMBlockBufferCreateWithMemoryBlock()", status, | |
706 SFT_PLATFORM_ERROR); | |
707 return; | |
708 } | |
709 | |
710 // Make sure that the memory is actually allocated. | |
711 // CMBlockBufferReplaceDataBytes() is documented to do this, but prints a | |
712 // message each time starting in Mac OS X 10.10. | |
713 status = CMBlockBufferAssureBlockMemory(data); | |
714 if (status) { | |
715 NOTIFY_STATUS("CMBlockBufferAssureBlockMemory()", status, | |
716 SFT_PLATFORM_ERROR); | |
717 return; | |
718 } | |
719 | |
720 // Copy NALU data into the CMBlockBuffer, inserting length headers. | |
721 size_t offset = 0; | |
722 for (size_t i = 0; i < nalus.size(); i++) { | |
723 media::H264NALU& nalu = nalus[i]; | |
724 uint32_t header = base::HostToNet32(static_cast<uint32_t>(nalu.size)); | |
725 status = CMBlockBufferReplaceDataBytes( | |
726 &header, data, offset, kNALUHeaderLength); | |
727 if (status) { | |
728 NOTIFY_STATUS("CMBlockBufferReplaceDataBytes()", status, | |
729 SFT_PLATFORM_ERROR); | |
730 return; | |
731 } | |
732 offset += kNALUHeaderLength; | |
733 status = CMBlockBufferReplaceDataBytes(nalu.data, data, offset, nalu.size); | |
734 if (status) { | |
735 NOTIFY_STATUS("CMBlockBufferReplaceDataBytes()", status, | |
736 SFT_PLATFORM_ERROR); | |
737 return; | |
738 } | |
739 offset += nalu.size; | |
740 } | |
741 | |
742 // Package the data in a CMSampleBuffer. | |
743 base::ScopedCFTypeRef<CMSampleBufferRef> sample; | |
744 status = CMSampleBufferCreate( | |
745 kCFAllocatorDefault, | |
746 data, // data_buffer | |
747 true, // data_ready | |
748 nullptr, // make_data_ready_callback | |
749 nullptr, // make_data_ready_refcon | |
750 format_, // format_description | |
751 1, // num_samples | |
752 0, // num_sample_timing_entries | |
753 nullptr, // &sample_timing_array | |
754 1, // num_sample_size_entries | |
755 &data_size, // &sample_size_array | |
756 sample.InitializeInto()); | |
757 if (status) { | |
758 NOTIFY_STATUS("CMSampleBufferCreate()", status, SFT_PLATFORM_ERROR); | |
759 return; | |
760 } | |
761 | |
762 // Send the frame for decoding. | |
763 // Asynchronous Decompression allows for parallel submission of frames | |
764 // (without it, DecodeFrame() does not return until the frame has been | |
765 // decoded). We don't enable Temporal Processing so that frames are always | |
766 // returned in decode order; this makes it easier to avoid deadlock. | |
767 VTDecodeFrameFlags decode_flags = | |
768 kVTDecodeFrame_EnableAsynchronousDecompression; | |
769 status = VTDecompressionSessionDecodeFrame( | |
770 session_, | |
771 sample, // sample_buffer | |
772 decode_flags, // decode_flags | |
773 reinterpret_cast<void*>(frame), // source_frame_refcon | |
774 nullptr); // &info_flags_out | |
775 if (status) { | |
776 NOTIFY_STATUS("VTDecompressionSessionDecodeFrame()", status, | |
777 SFT_DECODE_ERROR); | |
778 return; | |
779 } | |
780 } | |
781 | |
782 // This method may be called on any VideoToolbox thread. | |
783 void VTVideoDecodeAccelerator::Output( | |
784 void* source_frame_refcon, | |
785 OSStatus status, | |
786 CVImageBufferRef image_buffer) { | |
787 if (status) { | |
788 NOTIFY_STATUS("Decoding", status, SFT_DECODE_ERROR); | |
789 return; | |
790 } | |
791 | |
792 // The type of |image_buffer| is CVImageBuffer, but we only handle | |
793 // CVPixelBuffers. This should be guaranteed as we set | |
794 // kCVPixelBufferOpenGLCompatibilityKey in |image_config|. | |
795 // | |
796 // Sometimes, for unknown reasons (http://crbug.com/453050), |image_buffer| is | |
797 // NULL, which causes CFGetTypeID() to crash. While the rest of the code would | |
798 // smoothly handle NULL as a dropped frame, we choose to fail permanantly here | |
799 // until the issue is better understood. | |
800 if (!image_buffer || CFGetTypeID(image_buffer) != CVPixelBufferGetTypeID()) { | |
801 DLOG(ERROR) << "Decoded frame is not a CVPixelBuffer"; | |
802 NotifyError(PLATFORM_FAILURE, SFT_DECODE_ERROR); | |
803 return; | |
804 } | |
805 | |
806 Frame* frame = reinterpret_cast<Frame*>(source_frame_refcon); | |
807 frame->image.reset(image_buffer, base::scoped_policy::RETAIN); | |
808 gpu_task_runner_->PostTask(FROM_HERE, base::Bind( | |
809 &VTVideoDecodeAccelerator::DecodeDone, weak_this_, frame)); | |
810 } | |
811 | |
812 void VTVideoDecodeAccelerator::DecodeDone(Frame* frame) { | |
813 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | |
814 DCHECK_EQ(1u, pending_frames_.count(frame->bitstream_id)); | |
815 Task task(TASK_FRAME); | |
816 task.frame = pending_frames_[frame->bitstream_id]; | |
817 pending_frames_.erase(frame->bitstream_id); | |
818 task_queue_.push(task); | |
819 ProcessWorkQueues(); | |
820 } | |
821 | |
822 void VTVideoDecodeAccelerator::FlushTask(TaskType type) { | |
823 DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); | |
824 FinishDelayedFrames(); | |
825 | |
826 // Always queue a task, even if FinishDelayedFrames() fails, so that | |
827 // destruction always completes. | |
828 gpu_task_runner_->PostTask(FROM_HERE, base::Bind( | |
829 &VTVideoDecodeAccelerator::FlushDone, weak_this_, type)); | |
830 } | |
831 | |
832 void VTVideoDecodeAccelerator::FlushDone(TaskType type) { | |
833 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | |
834 task_queue_.push(Task(type)); | |
835 ProcessWorkQueues(); | |
836 } | |
837 | |
838 void VTVideoDecodeAccelerator::Decode(const media::BitstreamBuffer& bitstream) { | |
839 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | |
840 if (bitstream.id() < 0) { | |
841 DLOG(ERROR) << "Invalid bitstream, id: " << bitstream.id(); | |
842 if (base::SharedMemory::IsHandleValid(bitstream.handle())) | |
843 base::SharedMemory::CloseHandle(bitstream.handle()); | |
844 NotifyError(INVALID_ARGUMENT, SFT_INVALID_STREAM); | |
845 return; | |
846 } | |
847 DCHECK_EQ(0u, assigned_bitstream_ids_.count(bitstream.id())); | |
848 assigned_bitstream_ids_.insert(bitstream.id()); | |
849 Frame* frame = new Frame(bitstream.id()); | |
850 pending_frames_[frame->bitstream_id] = make_linked_ptr(frame); | |
851 decoder_thread_.task_runner()->PostTask( | |
852 FROM_HERE, base::Bind(&VTVideoDecodeAccelerator::DecodeTask, | |
853 base::Unretained(this), bitstream, frame)); | |
854 } | |
855 | |
856 void VTVideoDecodeAccelerator::AssignPictureBuffers( | |
857 const std::vector<media::PictureBuffer>& pictures) { | |
858 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | |
859 | |
860 for (const media::PictureBuffer& picture : pictures) { | |
861 DCHECK(!picture_info_map_.count(picture.id())); | |
862 assigned_picture_ids_.insert(picture.id()); | |
863 available_picture_ids_.push_back(picture.id()); | |
864 DCHECK_LE(1u, picture.internal_texture_ids().size()); | |
865 DCHECK_LE(1u, picture.texture_ids().size()); | |
866 picture_info_map_.insert(std::make_pair( | |
867 picture.id(), | |
868 base::WrapUnique(new PictureInfo(picture.internal_texture_ids()[0], | |
869 picture.texture_ids()[0])))); | |
870 } | |
871 | |
872 // Pictures are not marked as uncleared until after this method returns, and | |
873 // they will be broken if they are used before that happens. So, schedule | |
874 // future work after that happens. | |
875 gpu_task_runner_->PostTask(FROM_HERE, base::Bind( | |
876 &VTVideoDecodeAccelerator::ProcessWorkQueues, weak_this_)); | |
877 } | |
878 | |
879 void VTVideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_id) { | |
880 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | |
881 DCHECK(picture_info_map_.count(picture_id)); | |
882 PictureInfo* picture_info = picture_info_map_.find(picture_id)->second.get(); | |
883 picture_info->cv_image.reset(); | |
884 picture_info->gl_image->Destroy(false); | |
885 picture_info->gl_image = nullptr; | |
886 | |
887 if (assigned_picture_ids_.count(picture_id) != 0) { | |
888 available_picture_ids_.push_back(picture_id); | |
889 ProcessWorkQueues(); | |
890 } else { | |
891 client_->DismissPictureBuffer(picture_id); | |
892 } | |
893 } | |
894 | |
895 void VTVideoDecodeAccelerator::ProcessWorkQueues() { | |
896 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | |
897 switch (state_) { | |
898 case STATE_DECODING: | |
899 // TODO(sandersd): Batch where possible. | |
900 while (state_ == STATE_DECODING) { | |
901 if (!ProcessReorderQueue() && !ProcessTaskQueue()) | |
902 break; | |
903 } | |
904 return; | |
905 | |
906 case STATE_ERROR: | |
907 // Do nothing until Destroy() is called. | |
908 return; | |
909 | |
910 case STATE_DESTROYING: | |
911 // Drop tasks until we are ready to destruct. | |
912 while (!task_queue_.empty()) { | |
913 if (task_queue_.front().type == TASK_DESTROY) { | |
914 delete this; | |
915 return; | |
916 } | |
917 task_queue_.pop(); | |
918 } | |
919 return; | |
920 } | |
921 } | |
922 | |
923 bool VTVideoDecodeAccelerator::ProcessTaskQueue() { | |
924 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | |
925 DCHECK_EQ(state_, STATE_DECODING); | |
926 | |
927 if (task_queue_.empty()) | |
928 return false; | |
929 | |
930 const Task& task = task_queue_.front(); | |
931 switch (task.type) { | |
932 case TASK_FRAME: | |
933 if (reorder_queue_.size() < kMaxReorderQueueSize && | |
934 (!task.frame->is_idr || reorder_queue_.empty())) { | |
935 assigned_bitstream_ids_.erase(task.frame->bitstream_id); | |
936 client_->NotifyEndOfBitstreamBuffer(task.frame->bitstream_id); | |
937 reorder_queue_.push(task.frame); | |
938 task_queue_.pop(); | |
939 return true; | |
940 } | |
941 return false; | |
942 | |
943 case TASK_FLUSH: | |
944 DCHECK_EQ(task.type, pending_flush_tasks_.front()); | |
945 if (reorder_queue_.size() == 0) { | |
946 pending_flush_tasks_.pop(); | |
947 client_->NotifyFlushDone(); | |
948 task_queue_.pop(); | |
949 return true; | |
950 } | |
951 return false; | |
952 | |
953 case TASK_RESET: | |
954 DCHECK_EQ(task.type, pending_flush_tasks_.front()); | |
955 if (reorder_queue_.size() == 0) { | |
956 waiting_for_idr_ = true; | |
957 pending_flush_tasks_.pop(); | |
958 client_->NotifyResetDone(); | |
959 task_queue_.pop(); | |
960 return true; | |
961 } | |
962 return false; | |
963 | |
964 case TASK_DESTROY: | |
965 NOTREACHED() << "Can't destroy while in STATE_DECODING."; | |
966 NotifyError(ILLEGAL_STATE, SFT_PLATFORM_ERROR); | |
967 return false; | |
968 } | |
969 } | |
970 | |
971 bool VTVideoDecodeAccelerator::ProcessReorderQueue() { | |
972 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | |
973 DCHECK_EQ(state_, STATE_DECODING); | |
974 | |
975 if (reorder_queue_.empty()) | |
976 return false; | |
977 | |
978 // If the next task is a flush (because there is a pending flush or becuase | |
979 // the next frame is an IDR), then we don't need a full reorder buffer to send | |
980 // the next frame. | |
981 bool flushing = !task_queue_.empty() && | |
982 (task_queue_.front().type != TASK_FRAME || | |
983 task_queue_.front().frame->is_idr); | |
984 | |
985 size_t reorder_window = std::max(0, reorder_queue_.top()->reorder_window); | |
986 if (flushing || reorder_queue_.size() > reorder_window) { | |
987 if (ProcessFrame(*reorder_queue_.top())) { | |
988 reorder_queue_.pop(); | |
989 return true; | |
990 } | |
991 } | |
992 | |
993 return false; | |
994 } | |
995 | |
996 bool VTVideoDecodeAccelerator::ProcessFrame(const Frame& frame) { | |
997 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | |
998 DCHECK_EQ(state_, STATE_DECODING); | |
999 | |
1000 // If the next pending flush is for a reset, then the frame will be dropped. | |
1001 bool resetting = !pending_flush_tasks_.empty() && | |
1002 pending_flush_tasks_.front() == TASK_RESET; | |
1003 | |
1004 if (!resetting && frame.image.get()) { | |
1005 // If the |coded_size| has changed, request new picture buffers and then | |
1006 // wait for them. | |
1007 // TODO(sandersd): If GpuVideoDecoder didn't specifically check the size of | |
1008 // textures, this would be unnecessary, as the size is actually a property | |
1009 // of the texture binding, not the texture. We rebind every frame, so the | |
1010 // size passed to ProvidePictureBuffers() is meaningless. | |
1011 if (picture_size_ != frame.coded_size) { | |
1012 // Dismiss current pictures. | |
1013 for (int32_t picture_id : assigned_picture_ids_) | |
1014 client_->DismissPictureBuffer(picture_id); | |
1015 assigned_picture_ids_.clear(); | |
1016 available_picture_ids_.clear(); | |
1017 | |
1018 // Request new pictures. | |
1019 picture_size_ = frame.coded_size; | |
1020 client_->ProvidePictureBuffers(kNumPictureBuffers, 1, coded_size_, | |
1021 GL_TEXTURE_RECTANGLE_ARB); | |
1022 return false; | |
1023 } | |
1024 if (!SendFrame(frame)) | |
1025 return false; | |
1026 } | |
1027 | |
1028 return true; | |
1029 } | |
1030 | |
1031 bool VTVideoDecodeAccelerator::SendFrame(const Frame& frame) { | |
1032 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | |
1033 DCHECK_EQ(state_, STATE_DECODING); | |
1034 | |
1035 if (available_picture_ids_.empty()) | |
1036 return false; | |
1037 | |
1038 int32_t picture_id = available_picture_ids_.back(); | |
1039 DCHECK(picture_info_map_.count(picture_id)); | |
1040 PictureInfo* picture_info = picture_info_map_.find(picture_id)->second.get(); | |
1041 DCHECK(!picture_info->cv_image); | |
1042 DCHECK(!picture_info->gl_image); | |
1043 | |
1044 if (!make_context_current_cb_.Run()) { | |
1045 DLOG(ERROR) << "Failed to make GL context current"; | |
1046 NotifyError(PLATFORM_FAILURE, SFT_PLATFORM_ERROR); | |
1047 return false; | |
1048 } | |
1049 | |
1050 scoped_refptr<gl::GLImageIOSurface> gl_image( | |
1051 new gl::GLImageIOSurface(frame.coded_size, GL_BGRA_EXT)); | |
1052 if (!gl_image->InitializeWithCVPixelBuffer( | |
1053 frame.image.get(), gfx::GenericSharedMemoryId(), | |
1054 gfx::BufferFormat::YUV_420_BIPLANAR)) { | |
1055 NOTIFY_STATUS("Failed to initialize GLImageIOSurface", PLATFORM_FAILURE, | |
1056 SFT_PLATFORM_ERROR); | |
1057 } | |
1058 | |
1059 if (!bind_image_cb_.Run(picture_info->client_texture_id, | |
1060 GL_TEXTURE_RECTANGLE_ARB, gl_image, false)) { | |
1061 DLOG(ERROR) << "Failed to bind image"; | |
1062 NotifyError(PLATFORM_FAILURE, SFT_PLATFORM_ERROR); | |
1063 return false; | |
1064 } | |
1065 | |
1066 // Assign the new image(s) to the the picture info. | |
1067 picture_info->gl_image = gl_image; | |
1068 picture_info->cv_image = frame.image; | |
1069 available_picture_ids_.pop_back(); | |
1070 | |
1071 // TODO(sandersd): Currently, the size got from | |
1072 // CMVideoFormatDescriptionGetDimensions is visible size. We pass it to | |
1073 // GpuVideoDecoder so that GpuVideoDecoder can use correct visible size in | |
1074 // resolution changed. We should find the correct API to get the real | |
1075 // coded size and fix it. | |
1076 client_->PictureReady(media::Picture(picture_id, frame.bitstream_id, | |
1077 gfx::Rect(frame.coded_size), | |
1078 true)); | |
1079 return true; | |
1080 } | |
1081 | |
1082 void VTVideoDecodeAccelerator::NotifyError( | |
1083 Error vda_error_type, | |
1084 VTVDASessionFailureType session_failure_type) { | |
1085 DCHECK_LT(session_failure_type, SFT_MAX + 1); | |
1086 if (!gpu_thread_checker_.CalledOnValidThread()) { | |
1087 gpu_task_runner_->PostTask(FROM_HERE, base::Bind( | |
1088 &VTVideoDecodeAccelerator::NotifyError, weak_this_, vda_error_type, | |
1089 session_failure_type)); | |
1090 } else if (state_ == STATE_DECODING) { | |
1091 state_ = STATE_ERROR; | |
1092 UMA_HISTOGRAM_ENUMERATION("Media.VTVDA.SessionFailureReason", | |
1093 session_failure_type, | |
1094 SFT_MAX + 1); | |
1095 client_->NotifyError(vda_error_type); | |
1096 } | |
1097 } | |
1098 | |
1099 void VTVideoDecodeAccelerator::QueueFlush(TaskType type) { | |
1100 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | |
1101 pending_flush_tasks_.push(type); | |
1102 decoder_thread_.task_runner()->PostTask( | |
1103 FROM_HERE, base::Bind(&VTVideoDecodeAccelerator::FlushTask, | |
1104 base::Unretained(this), type)); | |
1105 | |
1106 // If this is a new flush request, see if we can make progress. | |
1107 if (pending_flush_tasks_.size() == 1) | |
1108 ProcessWorkQueues(); | |
1109 } | |
1110 | |
1111 void VTVideoDecodeAccelerator::Flush() { | |
1112 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | |
1113 QueueFlush(TASK_FLUSH); | |
1114 } | |
1115 | |
1116 void VTVideoDecodeAccelerator::Reset() { | |
1117 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | |
1118 QueueFlush(TASK_RESET); | |
1119 } | |
1120 | |
1121 void VTVideoDecodeAccelerator::Destroy() { | |
1122 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | |
1123 | |
1124 // In a forceful shutdown, the decoder thread may be dead already. | |
1125 if (!decoder_thread_.IsRunning()) { | |
1126 delete this; | |
1127 return; | |
1128 } | |
1129 | |
1130 // For a graceful shutdown, return assigned buffers and flush before | |
1131 // destructing |this|. | |
1132 // TODO(sandersd): Prevent the decoder from reading buffers before discarding | |
1133 // them. | |
1134 for (int32_t bitstream_id : assigned_bitstream_ids_) | |
1135 client_->NotifyEndOfBitstreamBuffer(bitstream_id); | |
1136 assigned_bitstream_ids_.clear(); | |
1137 state_ = STATE_DESTROYING; | |
1138 QueueFlush(TASK_DESTROY); | |
1139 } | |
1140 | |
1141 bool VTVideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread( | |
1142 const base::WeakPtr<Client>& decode_client, | |
1143 const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) { | |
1144 return false; | |
1145 } | |
1146 | |
1147 // static | |
1148 media::VideoDecodeAccelerator::SupportedProfiles | |
1149 VTVideoDecodeAccelerator::GetSupportedProfiles() { | |
1150 SupportedProfiles profiles; | |
1151 for (const auto& supported_profile : kSupportedProfiles) { | |
1152 SupportedProfile profile; | |
1153 profile.profile = supported_profile; | |
1154 profile.min_resolution.SetSize(16, 16); | |
1155 profile.max_resolution.SetSize(4096, 2160); | |
1156 profiles.push_back(profile); | |
1157 } | |
1158 return profiles; | |
1159 } | |
1160 | |
1161 } // namespace content | |
OLD | NEW |