OLD | NEW |
| (Empty) |
1 // Copyright 2014 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include <algorithm> | |
6 | |
7 #include <CoreVideo/CoreVideo.h> | |
8 #include <OpenGL/CGLIOSurface.h> | |
9 #include <OpenGL/gl.h> | |
10 | |
11 #include "base/bind.h" | |
12 #include "base/command_line.h" | |
13 #include "base/logging.h" | |
14 #include "base/mac/mac_logging.h" | |
15 #include "base/metrics/histogram_macros.h" | |
16 #include "base/sys_byteorder.h" | |
17 #include "base/sys_info.h" | |
18 #include "base/thread_task_runner_handle.h" | |
19 #include "base/version.h" | |
20 #include "content/common/gpu/media/vt_video_decode_accelerator.h" | |
21 #include "content/public/common/content_switches.h" | |
22 #include "media/base/limits.h" | |
23 #include "ui/gl/gl_context.h" | |
24 #include "ui/gl/gl_image_io_surface.h" | |
25 #include "ui/gl/gl_implementation.h" | |
26 #include "ui/gl/scoped_binders.h" | |
27 | |
28 using content_common_gpu_media::kModuleVt; | |
29 using content_common_gpu_media::InitializeStubs; | |
30 using content_common_gpu_media::IsVtInitialized; | |
31 using content_common_gpu_media::StubPathMap; | |
32 | |
33 #define NOTIFY_STATUS(name, status, session_failure) \ | |
34 do { \ | |
35 OSSTATUS_DLOG(ERROR, status) << name; \ | |
36 NotifyError(PLATFORM_FAILURE, session_failure); \ | |
37 } while (0) | |
38 | |
39 namespace content { | |
40 | |
41 // Only H.264 with 4:2:0 chroma sampling is supported. | |
42 static const media::VideoCodecProfile kSupportedProfiles[] = { | |
43 media::H264PROFILE_BASELINE, | |
44 media::H264PROFILE_MAIN, | |
45 media::H264PROFILE_EXTENDED, | |
46 media::H264PROFILE_HIGH, | |
47 media::H264PROFILE_HIGH10PROFILE, | |
48 media::H264PROFILE_SCALABLEBASELINE, | |
49 media::H264PROFILE_SCALABLEHIGH, | |
50 media::H264PROFILE_STEREOHIGH, | |
51 media::H264PROFILE_MULTIVIEWHIGH, | |
52 }; | |
53 | |
54 // Size to use for NALU length headers in AVC format (can be 1, 2, or 4). | |
55 static const int kNALUHeaderLength = 4; | |
56 | |
57 // We request 5 picture buffers from the client, each of which has a texture ID | |
58 // that we can bind decoded frames to. We need enough to satisfy preroll, and | |
59 // enough to avoid unnecessary stalling, but no more than that. The resource | |
60 // requirements are low, as we don't need the textures to be backed by storage. | |
61 static const int kNumPictureBuffers = media::limits::kMaxVideoFrames + 1; | |
62 | |
63 // Maximum number of frames to queue for reordering before we stop asking for | |
64 // more. (NotifyEndOfBitstreamBuffer() is called when frames are moved into the | |
65 // reorder queue.) | |
66 static const int kMaxReorderQueueSize = 16; | |
67 | |
68 // Build an |image_config| dictionary for VideoToolbox initialization. | |
69 static base::ScopedCFTypeRef<CFMutableDictionaryRef> | |
70 BuildImageConfig(CMVideoDimensions coded_dimensions) { | |
71 base::ScopedCFTypeRef<CFMutableDictionaryRef> image_config; | |
72 | |
73 // 4:2:2 is used over the native 4:2:0 because only 4:2:2 can be directly | |
74 // bound to a texture by CGLTexImageIOSurface2D(). | |
75 int32_t pixel_format = kCVPixelFormatType_422YpCbCr8; | |
76 #define CFINT(i) CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &i) | |
77 base::ScopedCFTypeRef<CFNumberRef> cf_pixel_format(CFINT(pixel_format)); | |
78 base::ScopedCFTypeRef<CFNumberRef> cf_width(CFINT(coded_dimensions.width)); | |
79 base::ScopedCFTypeRef<CFNumberRef> cf_height(CFINT(coded_dimensions.height)); | |
80 #undef CFINT | |
81 if (!cf_pixel_format.get() || !cf_width.get() || !cf_height.get()) | |
82 return image_config; | |
83 | |
84 image_config.reset( | |
85 CFDictionaryCreateMutable( | |
86 kCFAllocatorDefault, | |
87 4, // capacity | |
88 &kCFTypeDictionaryKeyCallBacks, | |
89 &kCFTypeDictionaryValueCallBacks)); | |
90 if (!image_config.get()) | |
91 return image_config; | |
92 | |
93 CFDictionarySetValue(image_config, kCVPixelBufferPixelFormatTypeKey, | |
94 cf_pixel_format); | |
95 CFDictionarySetValue(image_config, kCVPixelBufferWidthKey, cf_width); | |
96 CFDictionarySetValue(image_config, kCVPixelBufferHeightKey, cf_height); | |
97 CFDictionarySetValue(image_config, kCVPixelBufferOpenGLCompatibilityKey, | |
98 kCFBooleanTrue); | |
99 | |
100 return image_config; | |
101 } | |
102 | |
103 // Create a VTDecompressionSession using the provided |pps| and |sps|. If | |
104 // |require_hardware| is true, the session must uses real hardware decoding | |
105 // (as opposed to software decoding inside of VideoToolbox) to be considered | |
106 // successful. | |
107 // | |
108 // TODO(sandersd): Merge with ConfigureDecoder(), as the code is very similar. | |
109 static bool CreateVideoToolboxSession(const uint8_t* sps, size_t sps_size, | |
110 const uint8_t* pps, size_t pps_size, | |
111 bool require_hardware) { | |
112 const uint8_t* data_ptrs[] = {sps, pps}; | |
113 const size_t data_sizes[] = {sps_size, pps_size}; | |
114 | |
115 base::ScopedCFTypeRef<CMFormatDescriptionRef> format; | |
116 OSStatus status = CMVideoFormatDescriptionCreateFromH264ParameterSets( | |
117 kCFAllocatorDefault, | |
118 2, // parameter_set_count | |
119 data_ptrs, // ¶meter_set_pointers | |
120 data_sizes, // ¶meter_set_sizes | |
121 kNALUHeaderLength, // nal_unit_header_length | |
122 format.InitializeInto()); | |
123 if (status) { | |
124 OSSTATUS_DLOG(WARNING, status) | |
125 << "Failed to create CMVideoFormatDescription."; | |
126 return false; | |
127 } | |
128 | |
129 base::ScopedCFTypeRef<CFMutableDictionaryRef> decoder_config( | |
130 CFDictionaryCreateMutable( | |
131 kCFAllocatorDefault, | |
132 1, // capacity | |
133 &kCFTypeDictionaryKeyCallBacks, | |
134 &kCFTypeDictionaryValueCallBacks)); | |
135 if (!decoder_config.get()) | |
136 return false; | |
137 | |
138 if (require_hardware) { | |
139 CFDictionarySetValue( | |
140 decoder_config, | |
141 // kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder | |
142 CFSTR("RequireHardwareAcceleratedVideoDecoder"), | |
143 kCFBooleanTrue); | |
144 } | |
145 | |
146 base::ScopedCFTypeRef<CFMutableDictionaryRef> image_config( | |
147 BuildImageConfig(CMVideoFormatDescriptionGetDimensions(format))); | |
148 if (!image_config.get()) | |
149 return false; | |
150 | |
151 VTDecompressionOutputCallbackRecord callback = {0}; | |
152 | |
153 base::ScopedCFTypeRef<VTDecompressionSessionRef> session; | |
154 status = VTDecompressionSessionCreate( | |
155 kCFAllocatorDefault, | |
156 format, // video_format_description | |
157 decoder_config, // video_decoder_specification | |
158 image_config, // destination_image_buffer_attributes | |
159 &callback, // output_callback | |
160 session.InitializeInto()); | |
161 if (status) { | |
162 OSSTATUS_DLOG(WARNING, status) << "Failed to create VTDecompressionSession"; | |
163 return false; | |
164 } | |
165 | |
166 return true; | |
167 } | |
168 | |
169 // The purpose of this function is to preload the generic and hardware-specific | |
170 // libraries required by VideoToolbox before the GPU sandbox is enabled. | |
171 // VideoToolbox normally loads the hardware-specific libraries lazily, so we | |
172 // must actually create a decompression session. If creating a decompression | |
173 // session fails, hardware decoding will be disabled (Initialize() will always | |
174 // return false). | |
175 static bool InitializeVideoToolboxInternal() { | |
176 if (base::CommandLine::ForCurrentProcess()->HasSwitch( | |
177 switches::kDisableAcceleratedVideoDecode)) { | |
178 return false; | |
179 } | |
180 | |
181 if (!IsVtInitialized()) { | |
182 // CoreVideo is also required, but the loader stops after the first path is | |
183 // loaded. Instead we rely on the transitive dependency from VideoToolbox to | |
184 // CoreVideo. | |
185 StubPathMap paths; | |
186 paths[kModuleVt].push_back(FILE_PATH_LITERAL( | |
187 "/System/Library/Frameworks/VideoToolbox.framework/VideoToolbox")); | |
188 if (!InitializeStubs(paths)) { | |
189 LOG(WARNING) << "Failed to initialize VideoToolbox framework. " | |
190 << "Hardware accelerated video decoding will be disabled."; | |
191 return false; | |
192 } | |
193 } | |
194 | |
195 // Create a hardware decoding session. | |
196 // SPS and PPS data are taken from a 480p sample (buck2.mp4). | |
197 const uint8_t sps_normal[] = {0x67, 0x64, 0x00, 0x1e, 0xac, 0xd9, 0x80, 0xd4, | |
198 0x3d, 0xa1, 0x00, 0x00, 0x03, 0x00, 0x01, 0x00, | |
199 0x00, 0x03, 0x00, 0x30, 0x8f, 0x16, 0x2d, 0x9a}; | |
200 const uint8_t pps_normal[] = {0x68, 0xe9, 0x7b, 0xcb}; | |
201 if (!CreateVideoToolboxSession(sps_normal, arraysize(sps_normal), pps_normal, | |
202 arraysize(pps_normal), true)) { | |
203 LOG(WARNING) << "Failed to create hardware VideoToolbox session. " | |
204 << "Hardware accelerated video decoding will be disabled."; | |
205 return false; | |
206 } | |
207 | |
208 // Create a software decoding session. | |
209 // SPS and PPS data are taken from a 18p sample (small2.mp4). | |
210 const uint8_t sps_small[] = {0x67, 0x64, 0x00, 0x0a, 0xac, 0xd9, 0x89, 0x7e, | |
211 0x22, 0x10, 0x00, 0x00, 0x3e, 0x90, 0x00, 0x0e, | |
212 0xa6, 0x08, 0xf1, 0x22, 0x59, 0xa0}; | |
213 const uint8_t pps_small[] = {0x68, 0xe9, 0x79, 0x72, 0xc0}; | |
214 if (!CreateVideoToolboxSession(sps_small, arraysize(sps_small), pps_small, | |
215 arraysize(pps_small), false)) { | |
216 LOG(WARNING) << "Failed to create software VideoToolbox session. " | |
217 << "Hardware accelerated video decoding will be disabled."; | |
218 return false; | |
219 } | |
220 | |
221 return true; | |
222 } | |
223 | |
224 bool InitializeVideoToolbox() { | |
225 // InitializeVideoToolbox() is called only from the GPU process main thread; | |
226 // once for sandbox warmup, and then once each time a VTVideoDecodeAccelerator | |
227 // is initialized. | |
228 static bool attempted = false; | |
229 static bool succeeded = false; | |
230 | |
231 if (!attempted) { | |
232 attempted = true; | |
233 succeeded = InitializeVideoToolboxInternal(); | |
234 } | |
235 | |
236 return succeeded; | |
237 } | |
238 | |
239 // Route decoded frame callbacks back into the VTVideoDecodeAccelerator. | |
240 static void OutputThunk( | |
241 void* decompression_output_refcon, | |
242 void* source_frame_refcon, | |
243 OSStatus status, | |
244 VTDecodeInfoFlags info_flags, | |
245 CVImageBufferRef image_buffer, | |
246 CMTime presentation_time_stamp, | |
247 CMTime presentation_duration) { | |
248 VTVideoDecodeAccelerator* vda = | |
249 reinterpret_cast<VTVideoDecodeAccelerator*>(decompression_output_refcon); | |
250 vda->Output(source_frame_refcon, status, image_buffer); | |
251 } | |
252 | |
253 VTVideoDecodeAccelerator::Task::Task(TaskType type) : type(type) { | |
254 } | |
255 | |
256 VTVideoDecodeAccelerator::Task::~Task() { | |
257 } | |
258 | |
259 VTVideoDecodeAccelerator::Frame::Frame(int32_t bitstream_id) | |
260 : bitstream_id(bitstream_id), | |
261 pic_order_cnt(0), | |
262 is_idr(false), | |
263 reorder_window(0) { | |
264 } | |
265 | |
266 VTVideoDecodeAccelerator::Frame::~Frame() { | |
267 } | |
268 | |
269 VTVideoDecodeAccelerator::PictureInfo::PictureInfo(uint32_t client_texture_id, | |
270 uint32_t service_texture_id) | |
271 : client_texture_id(client_texture_id), | |
272 service_texture_id(service_texture_id) {} | |
273 | |
274 VTVideoDecodeAccelerator::PictureInfo::~PictureInfo() { | |
275 if (gl_image) | |
276 gl_image->Destroy(false); | |
277 } | |
278 | |
279 bool VTVideoDecodeAccelerator::FrameOrder::operator()( | |
280 const linked_ptr<Frame>& lhs, | |
281 const linked_ptr<Frame>& rhs) const { | |
282 if (lhs->pic_order_cnt != rhs->pic_order_cnt) | |
283 return lhs->pic_order_cnt > rhs->pic_order_cnt; | |
284 // If |pic_order_cnt| is the same, fall back on using the bitstream order. | |
285 // TODO(sandersd): Assign a sequence number in Decode() and use that instead. | |
286 // TODO(sandersd): Using the sequence number, ensure that frames older than | |
287 // |kMaxReorderQueueSize| are ordered first, regardless of |pic_order_cnt|. | |
288 return lhs->bitstream_id > rhs->bitstream_id; | |
289 } | |
290 | |
291 VTVideoDecodeAccelerator::VTVideoDecodeAccelerator( | |
292 const base::Callback<bool(void)>& make_context_current, | |
293 const base::Callback<void(uint32, uint32, scoped_refptr<gl::GLImage>)>& | |
294 bind_image) | |
295 : make_context_current_(make_context_current), | |
296 bind_image_(bind_image), | |
297 client_(nullptr), | |
298 state_(STATE_DECODING), | |
299 format_(nullptr), | |
300 session_(nullptr), | |
301 last_sps_id_(-1), | |
302 last_pps_id_(-1), | |
303 gpu_task_runner_(base::ThreadTaskRunnerHandle::Get()), | |
304 decoder_thread_("VTDecoderThread"), | |
305 weak_this_factory_(this) { | |
306 DCHECK(!make_context_current_.is_null()); | |
307 callback_.decompressionOutputCallback = OutputThunk; | |
308 callback_.decompressionOutputRefCon = this; | |
309 weak_this_ = weak_this_factory_.GetWeakPtr(); | |
310 } | |
311 | |
312 VTVideoDecodeAccelerator::~VTVideoDecodeAccelerator() { | |
313 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | |
314 } | |
315 | |
316 bool VTVideoDecodeAccelerator::Initialize( | |
317 media::VideoCodecProfile profile, | |
318 Client* client) { | |
319 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | |
320 client_ = client; | |
321 | |
322 if (!InitializeVideoToolbox()) | |
323 return false; | |
324 | |
325 bool profile_supported = false; | |
326 for (const auto& supported_profile : kSupportedProfiles) { | |
327 if (profile == supported_profile) { | |
328 profile_supported = true; | |
329 break; | |
330 } | |
331 } | |
332 if (!profile_supported) | |
333 return false; | |
334 | |
335 // Spawn a thread to handle parsing and calling VideoToolbox. | |
336 if (!decoder_thread_.Start()) | |
337 return false; | |
338 | |
339 // Count the session as successfully initialized. | |
340 UMA_HISTOGRAM_ENUMERATION("Media.VTVDA.SessionFailureReason", | |
341 SFT_SUCCESSFULLY_INITIALIZED, | |
342 SFT_MAX + 1); | |
343 return true; | |
344 } | |
345 | |
346 bool VTVideoDecodeAccelerator::FinishDelayedFrames() { | |
347 DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); | |
348 if (session_) { | |
349 OSStatus status = VTDecompressionSessionWaitForAsynchronousFrames(session_); | |
350 if (status) { | |
351 NOTIFY_STATUS("VTDecompressionSessionWaitForAsynchronousFrames()", | |
352 status, SFT_PLATFORM_ERROR); | |
353 return false; | |
354 } | |
355 } | |
356 return true; | |
357 } | |
358 | |
359 bool VTVideoDecodeAccelerator::ConfigureDecoder() { | |
360 DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); | |
361 DCHECK(!last_sps_.empty()); | |
362 DCHECK(!last_pps_.empty()); | |
363 | |
364 // Build the configuration records. | |
365 std::vector<const uint8_t*> nalu_data_ptrs; | |
366 std::vector<size_t> nalu_data_sizes; | |
367 nalu_data_ptrs.reserve(3); | |
368 nalu_data_sizes.reserve(3); | |
369 nalu_data_ptrs.push_back(&last_sps_.front()); | |
370 nalu_data_sizes.push_back(last_sps_.size()); | |
371 if (!last_spsext_.empty()) { | |
372 nalu_data_ptrs.push_back(&last_spsext_.front()); | |
373 nalu_data_sizes.push_back(last_spsext_.size()); | |
374 } | |
375 nalu_data_ptrs.push_back(&last_pps_.front()); | |
376 nalu_data_sizes.push_back(last_pps_.size()); | |
377 | |
378 // Construct a new format description from the parameter sets. | |
379 format_.reset(); | |
380 OSStatus status = CMVideoFormatDescriptionCreateFromH264ParameterSets( | |
381 kCFAllocatorDefault, | |
382 nalu_data_ptrs.size(), // parameter_set_count | |
383 &nalu_data_ptrs.front(), // ¶meter_set_pointers | |
384 &nalu_data_sizes.front(), // ¶meter_set_sizes | |
385 kNALUHeaderLength, // nal_unit_header_length | |
386 format_.InitializeInto()); | |
387 if (status) { | |
388 NOTIFY_STATUS("CMVideoFormatDescriptionCreateFromH264ParameterSets()", | |
389 status, SFT_PLATFORM_ERROR); | |
390 return false; | |
391 } | |
392 | |
393 // Store the new configuration data. | |
394 // TODO(sandersd): Despite the documentation, this seems to return the visible | |
395 // size. However, the output always appears to be top-left aligned, so it | |
396 // makes no difference. Re-verify this and update the variable name. | |
397 CMVideoDimensions coded_dimensions = | |
398 CMVideoFormatDescriptionGetDimensions(format_); | |
399 coded_size_.SetSize(coded_dimensions.width, coded_dimensions.height); | |
400 | |
401 // Prepare VideoToolbox configuration dictionaries. | |
402 base::ScopedCFTypeRef<CFMutableDictionaryRef> decoder_config( | |
403 CFDictionaryCreateMutable( | |
404 kCFAllocatorDefault, | |
405 1, // capacity | |
406 &kCFTypeDictionaryKeyCallBacks, | |
407 &kCFTypeDictionaryValueCallBacks)); | |
408 if (!decoder_config.get()) { | |
409 DLOG(ERROR) << "Failed to create CFMutableDictionary."; | |
410 NotifyError(PLATFORM_FAILURE, SFT_PLATFORM_ERROR); | |
411 return false; | |
412 } | |
413 | |
414 CFDictionarySetValue( | |
415 decoder_config, | |
416 // kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder | |
417 CFSTR("EnableHardwareAcceleratedVideoDecoder"), | |
418 kCFBooleanTrue); | |
419 | |
420 base::ScopedCFTypeRef<CFMutableDictionaryRef> image_config( | |
421 BuildImageConfig(coded_dimensions)); | |
422 if (!image_config.get()) { | |
423 DLOG(ERROR) << "Failed to create decoder image configuration."; | |
424 NotifyError(PLATFORM_FAILURE, SFT_PLATFORM_ERROR); | |
425 return false; | |
426 } | |
427 | |
428 // Ensure that the old decoder emits all frames before the new decoder can | |
429 // emit any. | |
430 if (!FinishDelayedFrames()) | |
431 return false; | |
432 | |
433 session_.reset(); | |
434 status = VTDecompressionSessionCreate( | |
435 kCFAllocatorDefault, | |
436 format_, // video_format_description | |
437 decoder_config, // video_decoder_specification | |
438 image_config, // destination_image_buffer_attributes | |
439 &callback_, // output_callback | |
440 session_.InitializeInto()); | |
441 if (status) { | |
442 NOTIFY_STATUS("VTDecompressionSessionCreate()", status, | |
443 SFT_UNSUPPORTED_STREAM_PARAMETERS); | |
444 return false; | |
445 } | |
446 | |
447 // Report whether hardware decode is being used. | |
448 bool using_hardware = false; | |
449 base::ScopedCFTypeRef<CFBooleanRef> cf_using_hardware; | |
450 if (VTSessionCopyProperty( | |
451 session_, | |
452 // kVTDecompressionPropertyKey_UsingHardwareAcceleratedVideoDecoder | |
453 CFSTR("UsingHardwareAcceleratedVideoDecoder"), | |
454 kCFAllocatorDefault, | |
455 cf_using_hardware.InitializeInto()) == 0) { | |
456 using_hardware = CFBooleanGetValue(cf_using_hardware); | |
457 } | |
458 UMA_HISTOGRAM_BOOLEAN("Media.VTVDA.HardwareAccelerated", using_hardware); | |
459 | |
460 return true; | |
461 } | |
462 | |
463 void VTVideoDecodeAccelerator::DecodeTask( | |
464 const media::BitstreamBuffer& bitstream, | |
465 Frame* frame) { | |
466 DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); | |
467 | |
468 // Map the bitstream buffer. | |
469 base::SharedMemory memory(bitstream.handle(), true); | |
470 size_t size = bitstream.size(); | |
471 if (!memory.Map(size)) { | |
472 DLOG(ERROR) << "Failed to map bitstream buffer"; | |
473 NotifyError(PLATFORM_FAILURE, SFT_PLATFORM_ERROR); | |
474 return; | |
475 } | |
476 const uint8_t* buf = static_cast<uint8_t*>(memory.memory()); | |
477 | |
478 // NALUs are stored with Annex B format in the bitstream buffer (start codes), | |
479 // but VideoToolbox expects AVC format (length headers), so we must rewrite | |
480 // the data. | |
481 // | |
482 // Locate relevant NALUs and compute the size of the rewritten data. Also | |
483 // record any parameter sets for VideoToolbox initialization. | |
484 std::vector<uint8_t> sps; | |
485 std::vector<uint8_t> spsext; | |
486 std::vector<uint8_t> pps; | |
487 bool has_slice = false; | |
488 size_t data_size = 0; | |
489 std::vector<media::H264NALU> nalus; | |
490 parser_.SetStream(buf, size); | |
491 media::H264NALU nalu; | |
492 while (true) { | |
493 media::H264Parser::Result result = parser_.AdvanceToNextNALU(&nalu); | |
494 if (result == media::H264Parser::kEOStream) | |
495 break; | |
496 if (result == media::H264Parser::kUnsupportedStream) { | |
497 DLOG(ERROR) << "Unsupported H.264 stream"; | |
498 NotifyError(PLATFORM_FAILURE, SFT_UNSUPPORTED_STREAM); | |
499 return; | |
500 } | |
501 if (result != media::H264Parser::kOk) { | |
502 DLOG(ERROR) << "Failed to parse H.264 stream"; | |
503 NotifyError(UNREADABLE_INPUT, SFT_INVALID_STREAM); | |
504 return; | |
505 } | |
506 switch (nalu.nal_unit_type) { | |
507 case media::H264NALU::kSPS: | |
508 result = parser_.ParseSPS(&last_sps_id_); | |
509 if (result == media::H264Parser::kUnsupportedStream) { | |
510 DLOG(ERROR) << "Unsupported SPS"; | |
511 NotifyError(PLATFORM_FAILURE, SFT_UNSUPPORTED_STREAM); | |
512 return; | |
513 } | |
514 if (result != media::H264Parser::kOk) { | |
515 DLOG(ERROR) << "Could not parse SPS"; | |
516 NotifyError(UNREADABLE_INPUT, SFT_INVALID_STREAM); | |
517 return; | |
518 } | |
519 sps.assign(nalu.data, nalu.data + nalu.size); | |
520 spsext.clear(); | |
521 break; | |
522 | |
523 case media::H264NALU::kSPSExt: | |
524 // TODO(sandersd): Check that the previous NALU was an SPS. | |
525 spsext.assign(nalu.data, nalu.data + nalu.size); | |
526 break; | |
527 | |
528 case media::H264NALU::kPPS: | |
529 result = parser_.ParsePPS(&last_pps_id_); | |
530 if (result == media::H264Parser::kUnsupportedStream) { | |
531 DLOG(ERROR) << "Unsupported PPS"; | |
532 NotifyError(PLATFORM_FAILURE, SFT_UNSUPPORTED_STREAM); | |
533 return; | |
534 } | |
535 if (result != media::H264Parser::kOk) { | |
536 DLOG(ERROR) << "Could not parse PPS"; | |
537 NotifyError(UNREADABLE_INPUT, SFT_INVALID_STREAM); | |
538 return; | |
539 } | |
540 pps.assign(nalu.data, nalu.data + nalu.size); | |
541 break; | |
542 | |
543 case media::H264NALU::kSliceDataA: | |
544 case media::H264NALU::kSliceDataB: | |
545 case media::H264NALU::kSliceDataC: | |
546 case media::H264NALU::kNonIDRSlice: | |
547 case media::H264NALU::kIDRSlice: | |
548 // Compute the |pic_order_cnt| for the picture from the first slice. | |
549 if (!has_slice) { | |
550 media::H264SliceHeader slice_hdr; | |
551 result = parser_.ParseSliceHeader(nalu, &slice_hdr); | |
552 if (result == media::H264Parser::kUnsupportedStream) { | |
553 DLOG(ERROR) << "Unsupported slice header"; | |
554 NotifyError(PLATFORM_FAILURE, SFT_UNSUPPORTED_STREAM); | |
555 return; | |
556 } | |
557 if (result != media::H264Parser::kOk) { | |
558 DLOG(ERROR) << "Could not parse slice header"; | |
559 NotifyError(UNREADABLE_INPUT, SFT_INVALID_STREAM); | |
560 return; | |
561 } | |
562 | |
563 // TODO(sandersd): Maintain a cache of configurations and reconfigure | |
564 // when a slice references a new config. | |
565 DCHECK_EQ(slice_hdr.pic_parameter_set_id, last_pps_id_); | |
566 const media::H264PPS* pps = | |
567 parser_.GetPPS(slice_hdr.pic_parameter_set_id); | |
568 if (!pps) { | |
569 DLOG(ERROR) << "Mising PPS referenced by slice"; | |
570 NotifyError(UNREADABLE_INPUT, SFT_INVALID_STREAM); | |
571 return; | |
572 } | |
573 | |
574 DCHECK_EQ(pps->seq_parameter_set_id, last_sps_id_); | |
575 const media::H264SPS* sps = parser_.GetSPS(pps->seq_parameter_set_id); | |
576 if (!sps) { | |
577 DLOG(ERROR) << "Mising SPS referenced by PPS"; | |
578 NotifyError(UNREADABLE_INPUT, SFT_INVALID_STREAM); | |
579 return; | |
580 } | |
581 | |
582 if (!poc_.ComputePicOrderCnt(sps, slice_hdr, &frame->pic_order_cnt)) { | |
583 DLOG(ERROR) << "Unable to compute POC"; | |
584 NotifyError(UNREADABLE_INPUT, SFT_INVALID_STREAM); | |
585 return; | |
586 } | |
587 | |
588 if (nalu.nal_unit_type == media::H264NALU::kIDRSlice) | |
589 frame->is_idr = true; | |
590 | |
591 if (sps->vui_parameters_present_flag && | |
592 sps->bitstream_restriction_flag) { | |
593 frame->reorder_window = std::min(sps->max_num_reorder_frames, | |
594 kMaxReorderQueueSize - 1); | |
595 } | |
596 } | |
597 has_slice = true; | |
598 default: | |
599 nalus.push_back(nalu); | |
600 data_size += kNALUHeaderLength + nalu.size; | |
601 break; | |
602 } | |
603 } | |
604 | |
605 // Initialize VideoToolbox. | |
606 bool config_changed = false; | |
607 if (!sps.empty() && sps != last_sps_) { | |
608 last_sps_.swap(sps); | |
609 last_spsext_.swap(spsext); | |
610 config_changed = true; | |
611 } | |
612 if (!pps.empty() && pps != last_pps_) { | |
613 last_pps_.swap(pps); | |
614 config_changed = true; | |
615 } | |
616 if (config_changed) { | |
617 if (last_sps_.empty()) { | |
618 DLOG(ERROR) << "Invalid configuration; no SPS"; | |
619 NotifyError(INVALID_ARGUMENT, SFT_INVALID_STREAM); | |
620 return; | |
621 } | |
622 if (last_pps_.empty()) { | |
623 DLOG(ERROR) << "Invalid configuration; no PPS"; | |
624 NotifyError(INVALID_ARGUMENT, SFT_INVALID_STREAM); | |
625 return; | |
626 } | |
627 | |
628 // If it's not an IDR frame, we can't reconfigure the decoder anyway. We | |
629 // assume that any config change not on an IDR must be compatible. | |
630 if (frame->is_idr && !ConfigureDecoder()) | |
631 return; | |
632 } | |
633 | |
634 // If there are no image slices, drop the bitstream buffer by returning an | |
635 // empty frame. | |
636 if (!has_slice) { | |
637 if (!FinishDelayedFrames()) | |
638 return; | |
639 gpu_task_runner_->PostTask(FROM_HERE, base::Bind( | |
640 &VTVideoDecodeAccelerator::DecodeDone, weak_this_, frame)); | |
641 return; | |
642 } | |
643 | |
644 // If the session is not configured by this point, fail. | |
645 if (!session_) { | |
646 DLOG(ERROR) << "Cannot decode without configuration"; | |
647 NotifyError(INVALID_ARGUMENT, SFT_INVALID_STREAM); | |
648 return; | |
649 } | |
650 | |
651 // Update the frame metadata with configuration data. | |
652 frame->coded_size = coded_size_; | |
653 | |
654 // Create a memory-backed CMBlockBuffer for the translated data. | |
655 // TODO(sandersd): Pool of memory blocks. | |
656 base::ScopedCFTypeRef<CMBlockBufferRef> data; | |
657 OSStatus status = CMBlockBufferCreateWithMemoryBlock( | |
658 kCFAllocatorDefault, | |
659 nullptr, // &memory_block | |
660 data_size, // block_length | |
661 kCFAllocatorDefault, // block_allocator | |
662 nullptr, // &custom_block_source | |
663 0, // offset_to_data | |
664 data_size, // data_length | |
665 0, // flags | |
666 data.InitializeInto()); | |
667 if (status) { | |
668 NOTIFY_STATUS("CMBlockBufferCreateWithMemoryBlock()", status, | |
669 SFT_PLATFORM_ERROR); | |
670 return; | |
671 } | |
672 | |
673 // Make sure that the memory is actually allocated. | |
674 // CMBlockBufferReplaceDataBytes() is documented to do this, but prints a | |
675 // message each time starting in Mac OS X 10.10. | |
676 status = CMBlockBufferAssureBlockMemory(data); | |
677 if (status) { | |
678 NOTIFY_STATUS("CMBlockBufferAssureBlockMemory()", status, | |
679 SFT_PLATFORM_ERROR); | |
680 return; | |
681 } | |
682 | |
683 // Copy NALU data into the CMBlockBuffer, inserting length headers. | |
684 size_t offset = 0; | |
685 for (size_t i = 0; i < nalus.size(); i++) { | |
686 media::H264NALU& nalu = nalus[i]; | |
687 uint32_t header = base::HostToNet32(static_cast<uint32_t>(nalu.size)); | |
688 status = CMBlockBufferReplaceDataBytes( | |
689 &header, data, offset, kNALUHeaderLength); | |
690 if (status) { | |
691 NOTIFY_STATUS("CMBlockBufferReplaceDataBytes()", status, | |
692 SFT_PLATFORM_ERROR); | |
693 return; | |
694 } | |
695 offset += kNALUHeaderLength; | |
696 status = CMBlockBufferReplaceDataBytes(nalu.data, data, offset, nalu.size); | |
697 if (status) { | |
698 NOTIFY_STATUS("CMBlockBufferReplaceDataBytes()", status, | |
699 SFT_PLATFORM_ERROR); | |
700 return; | |
701 } | |
702 offset += nalu.size; | |
703 } | |
704 | |
705 // Package the data in a CMSampleBuffer. | |
706 base::ScopedCFTypeRef<CMSampleBufferRef> sample; | |
707 status = CMSampleBufferCreate( | |
708 kCFAllocatorDefault, | |
709 data, // data_buffer | |
710 true, // data_ready | |
711 nullptr, // make_data_ready_callback | |
712 nullptr, // make_data_ready_refcon | |
713 format_, // format_description | |
714 1, // num_samples | |
715 0, // num_sample_timing_entries | |
716 nullptr, // &sample_timing_array | |
717 1, // num_sample_size_entries | |
718 &data_size, // &sample_size_array | |
719 sample.InitializeInto()); | |
720 if (status) { | |
721 NOTIFY_STATUS("CMSampleBufferCreate()", status, SFT_PLATFORM_ERROR); | |
722 return; | |
723 } | |
724 | |
725 // Send the frame for decoding. | |
726 // Asynchronous Decompression allows for parallel submission of frames | |
727 // (without it, DecodeFrame() does not return until the frame has been | |
728 // decoded). We don't enable Temporal Processing so that frames are always | |
729 // returned in decode order; this makes it easier to avoid deadlock. | |
730 VTDecodeFrameFlags decode_flags = | |
731 kVTDecodeFrame_EnableAsynchronousDecompression; | |
732 status = VTDecompressionSessionDecodeFrame( | |
733 session_, | |
734 sample, // sample_buffer | |
735 decode_flags, // decode_flags | |
736 reinterpret_cast<void*>(frame), // source_frame_refcon | |
737 nullptr); // &info_flags_out | |
738 if (status) { | |
739 NOTIFY_STATUS("VTDecompressionSessionDecodeFrame()", status, | |
740 SFT_DECODE_ERROR); | |
741 return; | |
742 } | |
743 } | |
744 | |
745 // This method may be called on any VideoToolbox thread. | |
746 void VTVideoDecodeAccelerator::Output( | |
747 void* source_frame_refcon, | |
748 OSStatus status, | |
749 CVImageBufferRef image_buffer) { | |
750 if (status) { | |
751 NOTIFY_STATUS("Decoding", status, SFT_DECODE_ERROR); | |
752 return; | |
753 } | |
754 | |
755 // The type of |image_buffer| is CVImageBuffer, but we only handle | |
756 // CVPixelBuffers. This should be guaranteed as we set | |
757 // kCVPixelBufferOpenGLCompatibilityKey in |image_config|. | |
758 // | |
759 // Sometimes, for unknown reasons (http://crbug.com/453050), |image_buffer| is | |
760 // NULL, which causes CFGetTypeID() to crash. While the rest of the code would | |
761 // smoothly handle NULL as a dropped frame, we choose to fail permanantly here | |
762 // until the issue is better understood. | |
763 if (!image_buffer || CFGetTypeID(image_buffer) != CVPixelBufferGetTypeID()) { | |
764 DLOG(ERROR) << "Decoded frame is not a CVPixelBuffer"; | |
765 NotifyError(PLATFORM_FAILURE, SFT_DECODE_ERROR); | |
766 return; | |
767 } | |
768 | |
769 Frame* frame = reinterpret_cast<Frame*>(source_frame_refcon); | |
770 frame->image.reset(image_buffer, base::scoped_policy::RETAIN); | |
771 gpu_task_runner_->PostTask(FROM_HERE, base::Bind( | |
772 &VTVideoDecodeAccelerator::DecodeDone, weak_this_, frame)); | |
773 } | |
774 | |
775 void VTVideoDecodeAccelerator::DecodeDone(Frame* frame) { | |
776 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | |
777 DCHECK_EQ(1u, pending_frames_.count(frame->bitstream_id)); | |
778 Task task(TASK_FRAME); | |
779 task.frame = pending_frames_[frame->bitstream_id]; | |
780 pending_frames_.erase(frame->bitstream_id); | |
781 task_queue_.push(task); | |
782 ProcessWorkQueues(); | |
783 } | |
784 | |
785 void VTVideoDecodeAccelerator::FlushTask(TaskType type) { | |
786 DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); | |
787 FinishDelayedFrames(); | |
788 | |
789 // Always queue a task, even if FinishDelayedFrames() fails, so that | |
790 // destruction always completes. | |
791 gpu_task_runner_->PostTask(FROM_HERE, base::Bind( | |
792 &VTVideoDecodeAccelerator::FlushDone, weak_this_, type)); | |
793 } | |
794 | |
795 void VTVideoDecodeAccelerator::FlushDone(TaskType type) { | |
796 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | |
797 task_queue_.push(Task(type)); | |
798 ProcessWorkQueues(); | |
799 } | |
800 | |
801 void VTVideoDecodeAccelerator::Decode(const media::BitstreamBuffer& bitstream) { | |
802 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | |
803 DCHECK_EQ(0u, assigned_bitstream_ids_.count(bitstream.id())); | |
804 assigned_bitstream_ids_.insert(bitstream.id()); | |
805 Frame* frame = new Frame(bitstream.id()); | |
806 pending_frames_[frame->bitstream_id] = make_linked_ptr(frame); | |
807 decoder_thread_.task_runner()->PostTask( | |
808 FROM_HERE, base::Bind(&VTVideoDecodeAccelerator::DecodeTask, | |
809 base::Unretained(this), bitstream, frame)); | |
810 } | |
811 | |
812 void VTVideoDecodeAccelerator::AssignPictureBuffers( | |
813 const std::vector<media::PictureBuffer>& pictures) { | |
814 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | |
815 | |
816 for (const media::PictureBuffer& picture : pictures) { | |
817 DCHECK(!picture_info_map_.count(picture.id())); | |
818 assigned_picture_ids_.insert(picture.id()); | |
819 available_picture_ids_.push_back(picture.id()); | |
820 picture_info_map_.insert(std::make_pair( | |
821 picture.id(), | |
822 make_scoped_ptr(new PictureInfo(picture.internal_texture_id(), | |
823 picture.texture_id())))); | |
824 } | |
825 | |
826 // Pictures are not marked as uncleared until after this method returns, and | |
827 // they will be broken if they are used before that happens. So, schedule | |
828 // future work after that happens. | |
829 gpu_task_runner_->PostTask(FROM_HERE, base::Bind( | |
830 &VTVideoDecodeAccelerator::ProcessWorkQueues, weak_this_)); | |
831 } | |
832 | |
833 void VTVideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_id) { | |
834 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | |
835 DCHECK(picture_info_map_.count(picture_id)); | |
836 PictureInfo* picture_info = picture_info_map_.find(picture_id)->second.get(); | |
837 DCHECK_EQ(CFGetRetainCount(picture_info->cv_image), 1); | |
838 picture_info->cv_image.reset(); | |
839 picture_info->gl_image->Destroy(false); | |
840 picture_info->gl_image = nullptr; | |
841 | |
842 if (assigned_picture_ids_.count(picture_id) != 0) { | |
843 available_picture_ids_.push_back(picture_id); | |
844 ProcessWorkQueues(); | |
845 } else { | |
846 client_->DismissPictureBuffer(picture_id); | |
847 } | |
848 } | |
849 | |
850 void VTVideoDecodeAccelerator::ProcessWorkQueues() { | |
851 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | |
852 switch (state_) { | |
853 case STATE_DECODING: | |
854 // TODO(sandersd): Batch where possible. | |
855 while (state_ == STATE_DECODING) { | |
856 if (!ProcessReorderQueue() && !ProcessTaskQueue()) | |
857 break; | |
858 } | |
859 return; | |
860 | |
861 case STATE_ERROR: | |
862 // Do nothing until Destroy() is called. | |
863 return; | |
864 | |
865 case STATE_DESTROYING: | |
866 // Drop tasks until we are ready to destruct. | |
867 while (!task_queue_.empty()) { | |
868 if (task_queue_.front().type == TASK_DESTROY) { | |
869 delete this; | |
870 return; | |
871 } | |
872 task_queue_.pop(); | |
873 } | |
874 return; | |
875 } | |
876 } | |
877 | |
878 bool VTVideoDecodeAccelerator::ProcessTaskQueue() { | |
879 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | |
880 DCHECK_EQ(state_, STATE_DECODING); | |
881 | |
882 if (task_queue_.empty()) | |
883 return false; | |
884 | |
885 const Task& task = task_queue_.front(); | |
886 switch (task.type) { | |
887 case TASK_FRAME: | |
888 if (reorder_queue_.size() < kMaxReorderQueueSize && | |
889 (!task.frame->is_idr || reorder_queue_.empty())) { | |
890 assigned_bitstream_ids_.erase(task.frame->bitstream_id); | |
891 client_->NotifyEndOfBitstreamBuffer(task.frame->bitstream_id); | |
892 reorder_queue_.push(task.frame); | |
893 task_queue_.pop(); | |
894 return true; | |
895 } | |
896 return false; | |
897 | |
898 case TASK_FLUSH: | |
899 DCHECK_EQ(task.type, pending_flush_tasks_.front()); | |
900 if (reorder_queue_.size() == 0) { | |
901 pending_flush_tasks_.pop(); | |
902 client_->NotifyFlushDone(); | |
903 task_queue_.pop(); | |
904 return true; | |
905 } | |
906 return false; | |
907 | |
908 case TASK_RESET: | |
909 DCHECK_EQ(task.type, pending_flush_tasks_.front()); | |
910 if (reorder_queue_.size() == 0) { | |
911 last_sps_id_ = -1; | |
912 last_pps_id_ = -1; | |
913 last_sps_.clear(); | |
914 last_spsext_.clear(); | |
915 last_pps_.clear(); | |
916 poc_.Reset(); | |
917 pending_flush_tasks_.pop(); | |
918 client_->NotifyResetDone(); | |
919 task_queue_.pop(); | |
920 return true; | |
921 } | |
922 return false; | |
923 | |
924 case TASK_DESTROY: | |
925 NOTREACHED() << "Can't destroy while in STATE_DECODING."; | |
926 NotifyError(ILLEGAL_STATE, SFT_PLATFORM_ERROR); | |
927 return false; | |
928 } | |
929 } | |
930 | |
931 bool VTVideoDecodeAccelerator::ProcessReorderQueue() { | |
932 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | |
933 DCHECK_EQ(state_, STATE_DECODING); | |
934 | |
935 if (reorder_queue_.empty()) | |
936 return false; | |
937 | |
938 // If the next task is a flush (because there is a pending flush or becuase | |
939 // the next frame is an IDR), then we don't need a full reorder buffer to send | |
940 // the next frame. | |
941 bool flushing = !task_queue_.empty() && | |
942 (task_queue_.front().type != TASK_FRAME || | |
943 task_queue_.front().frame->is_idr); | |
944 | |
945 size_t reorder_window = std::max(0, reorder_queue_.top()->reorder_window); | |
946 if (flushing || reorder_queue_.size() > reorder_window) { | |
947 if (ProcessFrame(*reorder_queue_.top())) { | |
948 reorder_queue_.pop(); | |
949 return true; | |
950 } | |
951 } | |
952 | |
953 return false; | |
954 } | |
955 | |
956 bool VTVideoDecodeAccelerator::ProcessFrame(const Frame& frame) { | |
957 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | |
958 DCHECK_EQ(state_, STATE_DECODING); | |
959 | |
960 // If the next pending flush is for a reset, then the frame will be dropped. | |
961 bool resetting = !pending_flush_tasks_.empty() && | |
962 pending_flush_tasks_.front() == TASK_RESET; | |
963 | |
964 if (!resetting && frame.image.get()) { | |
965 // If the |coded_size| has changed, request new picture buffers and then | |
966 // wait for them. | |
967 // TODO(sandersd): If GpuVideoDecoder didn't specifically check the size of | |
968 // textures, this would be unnecessary, as the size is actually a property | |
969 // of the texture binding, not the texture. We rebind every frame, so the | |
970 // size passed to ProvidePictureBuffers() is meaningless. | |
971 if (picture_size_ != frame.coded_size) { | |
972 // Dismiss current pictures. | |
973 for (int32_t picture_id : assigned_picture_ids_) | |
974 client_->DismissPictureBuffer(picture_id); | |
975 assigned_picture_ids_.clear(); | |
976 available_picture_ids_.clear(); | |
977 | |
978 // Request new pictures. | |
979 picture_size_ = frame.coded_size; | |
980 client_->ProvidePictureBuffers( | |
981 kNumPictureBuffers, coded_size_, GL_TEXTURE_RECTANGLE_ARB); | |
982 return false; | |
983 } | |
984 if (!SendFrame(frame)) | |
985 return false; | |
986 } | |
987 | |
988 return true; | |
989 } | |
990 | |
991 bool VTVideoDecodeAccelerator::SendFrame(const Frame& frame) { | |
992 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | |
993 DCHECK_EQ(state_, STATE_DECODING); | |
994 | |
995 if (available_picture_ids_.empty()) | |
996 return false; | |
997 | |
998 int32_t picture_id = available_picture_ids_.back(); | |
999 DCHECK(picture_info_map_.count(picture_id)); | |
1000 PictureInfo* picture_info = picture_info_map_.find(picture_id)->second.get(); | |
1001 DCHECK(!picture_info->cv_image); | |
1002 DCHECK(!picture_info->gl_image); | |
1003 | |
1004 if (!make_context_current_.Run()) { | |
1005 DLOG(ERROR) << "Failed to make GL context current"; | |
1006 NotifyError(PLATFORM_FAILURE, SFT_PLATFORM_ERROR); | |
1007 return false; | |
1008 } | |
1009 | |
1010 IOSurfaceRef surface = CVPixelBufferGetIOSurface(frame.image.get()); | |
1011 if (gfx::GetGLImplementation() != gfx::kGLImplementationDesktopGLCoreProfile) | |
1012 glEnable(GL_TEXTURE_RECTANGLE_ARB); | |
1013 gfx::ScopedTextureBinder texture_binder(GL_TEXTURE_RECTANGLE_ARB, | |
1014 picture_info->service_texture_id); | |
1015 CGLContextObj cgl_context = | |
1016 static_cast<CGLContextObj>(gfx::GLContext::GetCurrent()->GetHandle()); | |
1017 CGLError status = CGLTexImageIOSurface2D( | |
1018 cgl_context, // ctx | |
1019 GL_TEXTURE_RECTANGLE_ARB, // target | |
1020 GL_RGB, // internal_format | |
1021 frame.coded_size.width(), // width | |
1022 frame.coded_size.height(), // height | |
1023 GL_YCBCR_422_APPLE, // format | |
1024 GL_UNSIGNED_SHORT_8_8_APPLE, // type | |
1025 surface, // io_surface | |
1026 0); // plane | |
1027 if (gfx::GetGLImplementation() != gfx::kGLImplementationDesktopGLCoreProfile) | |
1028 glDisable(GL_TEXTURE_RECTANGLE_ARB); | |
1029 if (status != kCGLNoError) { | |
1030 NOTIFY_STATUS("CGLTexImageIOSurface2D()", status, SFT_PLATFORM_ERROR); | |
1031 return false; | |
1032 } | |
1033 | |
1034 bool allow_overlay = false; | |
1035 scoped_refptr<gl::GLImageIOSurface> gl_image( | |
1036 new gl::GLImageIOSurface(frame.coded_size, GL_BGRA_EXT)); | |
1037 if (gl_image->Initialize(surface, gfx::GenericSharedMemoryId(), | |
1038 gfx::BufferFormat::BGRA_8888)) { | |
1039 allow_overlay = true; | |
1040 } else { | |
1041 gl_image = nullptr; | |
1042 } | |
1043 bind_image_.Run(picture_info->client_texture_id, GL_TEXTURE_RECTANGLE_ARB, | |
1044 gl_image); | |
1045 | |
1046 // Assign the new image(s) to the the picture info. | |
1047 picture_info->gl_image = gl_image; | |
1048 picture_info->cv_image = frame.image; | |
1049 available_picture_ids_.pop_back(); | |
1050 | |
1051 // TODO(sandersd): Currently, the size got from | |
1052 // CMVideoFormatDescriptionGetDimensions is visible size. We pass it to | |
1053 // GpuVideoDecoder so that GpuVideoDecoder can use correct visible size in | |
1054 // resolution changed. We should find the correct API to get the real | |
1055 // coded size and fix it. | |
1056 client_->PictureReady(media::Picture(picture_id, frame.bitstream_id, | |
1057 gfx::Rect(frame.coded_size), | |
1058 allow_overlay)); | |
1059 return true; | |
1060 } | |
1061 | |
1062 void VTVideoDecodeAccelerator::NotifyError( | |
1063 Error vda_error_type, | |
1064 VTVDASessionFailureType session_failure_type) { | |
1065 DCHECK_LT(session_failure_type, SFT_MAX + 1); | |
1066 if (!gpu_thread_checker_.CalledOnValidThread()) { | |
1067 gpu_task_runner_->PostTask(FROM_HERE, base::Bind( | |
1068 &VTVideoDecodeAccelerator::NotifyError, weak_this_, vda_error_type, | |
1069 session_failure_type)); | |
1070 } else if (state_ == STATE_DECODING) { | |
1071 state_ = STATE_ERROR; | |
1072 UMA_HISTOGRAM_ENUMERATION("Media.VTVDA.SessionFailureReason", | |
1073 session_failure_type, | |
1074 SFT_MAX + 1); | |
1075 client_->NotifyError(vda_error_type); | |
1076 } | |
1077 } | |
1078 | |
1079 void VTVideoDecodeAccelerator::QueueFlush(TaskType type) { | |
1080 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | |
1081 pending_flush_tasks_.push(type); | |
1082 decoder_thread_.task_runner()->PostTask( | |
1083 FROM_HERE, base::Bind(&VTVideoDecodeAccelerator::FlushTask, | |
1084 base::Unretained(this), type)); | |
1085 | |
1086 // If this is a new flush request, see if we can make progress. | |
1087 if (pending_flush_tasks_.size() == 1) | |
1088 ProcessWorkQueues(); | |
1089 } | |
1090 | |
1091 void VTVideoDecodeAccelerator::Flush() { | |
1092 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | |
1093 QueueFlush(TASK_FLUSH); | |
1094 } | |
1095 | |
1096 void VTVideoDecodeAccelerator::Reset() { | |
1097 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | |
1098 QueueFlush(TASK_RESET); | |
1099 } | |
1100 | |
1101 void VTVideoDecodeAccelerator::Destroy() { | |
1102 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | |
1103 | |
1104 // In a forceful shutdown, the decoder thread may be dead already. | |
1105 if (!decoder_thread_.IsRunning()) { | |
1106 delete this; | |
1107 return; | |
1108 } | |
1109 | |
1110 // For a graceful shutdown, return assigned buffers and flush before | |
1111 // destructing |this|. | |
1112 // TODO(sandersd): Prevent the decoder from reading buffers before discarding | |
1113 // them. | |
1114 for (int32_t bitstream_id : assigned_bitstream_ids_) | |
1115 client_->NotifyEndOfBitstreamBuffer(bitstream_id); | |
1116 assigned_bitstream_ids_.clear(); | |
1117 state_ = STATE_DESTROYING; | |
1118 QueueFlush(TASK_DESTROY); | |
1119 } | |
1120 | |
1121 bool VTVideoDecodeAccelerator::CanDecodeOnIOThread() { | |
1122 return false; | |
1123 } | |
1124 | |
1125 // static | |
1126 media::VideoDecodeAccelerator::SupportedProfiles | |
1127 VTVideoDecodeAccelerator::GetSupportedProfiles() { | |
1128 SupportedProfiles profiles; | |
1129 for (const auto& supported_profile : kSupportedProfiles) { | |
1130 SupportedProfile profile; | |
1131 profile.profile = supported_profile; | |
1132 profile.min_resolution.SetSize(16, 16); | |
1133 profile.max_resolution.SetSize(4096, 2160); | |
1134 profiles.push_back(profile); | |
1135 } | |
1136 return profiles; | |
1137 } | |
1138 | |
1139 } // namespace content | |
OLD | NEW |