OLD | NEW |
1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include <CoreVideo/CoreVideo.h> | 5 #include <CoreVideo/CoreVideo.h> |
6 #include <OpenGL/CGLIOSurface.h> | 6 #include <OpenGL/CGLIOSurface.h> |
7 | 7 |
8 #include "base/bind.h" | 8 #include "base/bind.h" |
9 #include "base/thread_task_runner_handle.h" | 9 #include "base/thread_task_runner_handle.h" |
10 #include "content/common/gpu/media/vt_video_decode_accelerator.h" | 10 #include "content/common/gpu/media/vt_video_decode_accelerator.h" |
11 #include "media/filters/h264_parser.h" | 11 #include "media/filters/h264_parser.h" |
12 | 12 |
13 using content_common_gpu_media::kModuleVt; | 13 using content_common_gpu_media::kModuleVt; |
14 using content_common_gpu_media::InitializeStubs; | 14 using content_common_gpu_media::InitializeStubs; |
15 using content_common_gpu_media::IsVtInitialized; | 15 using content_common_gpu_media::IsVtInitialized; |
16 using content_common_gpu_media::StubPathMap; | 16 using content_common_gpu_media::StubPathMap; |
17 | 17 |
18 namespace content { | 18 namespace content { |
19 | 19 |
20 // Size of length headers prepended to NALUs in MPEG-4 framing. (1, 2, or 4.) | 20 // Size of NALU length headers in AVCC/MPEG-4 format (can be 1, 2, or 4). |
21 static const int kNALUHeaderLength = 4; | 21 static const int kNALUHeaderLength = 4; |
22 | 22 |
| 23 // We only request 5 picture buffers from the client which are used to hold the |
| 24 // decoded samples. These buffers are then reused when the client tells us that |
| 25 // it is done with the buffer. |
| 26 static const int kNumPictureBuffers = 5; |
| 27 |
23 // Route decoded frame callbacks back into the VTVideoDecodeAccelerator. | 28 // Route decoded frame callbacks back into the VTVideoDecodeAccelerator. |
24 static void OutputThunk( | 29 static void OutputThunk( |
25 void* decompression_output_refcon, | 30 void* decompression_output_refcon, |
26 void* source_frame_refcon, | 31 void* source_frame_refcon, |
27 OSStatus status, | 32 OSStatus status, |
28 VTDecodeInfoFlags info_flags, | 33 VTDecodeInfoFlags info_flags, |
29 CVImageBufferRef image_buffer, | 34 CVImageBufferRef image_buffer, |
30 CMTime presentation_time_stamp, | 35 CMTime presentation_time_stamp, |
31 CMTime presentation_duration) { | 36 CMTime presentation_duration) { |
| 37 // TODO(sandersd): Implement flush-before-delete to guarantee validity. |
32 VTVideoDecodeAccelerator* vda = | 38 VTVideoDecodeAccelerator* vda = |
33 reinterpret_cast<VTVideoDecodeAccelerator*>(decompression_output_refcon); | 39 reinterpret_cast<VTVideoDecodeAccelerator*>(decompression_output_refcon); |
34 int32_t* bitstream_id_ptr = reinterpret_cast<int32_t*>(source_frame_refcon); | 40 intptr_t bitstream_id = reinterpret_cast<intptr_t>(source_frame_refcon); |
35 int32_t bitstream_id = *bitstream_id_ptr; | 41 vda->Output(bitstream_id, status, image_buffer); |
36 delete bitstream_id_ptr; | 42 } |
37 CFRetain(image_buffer); | 43 |
38 vda->Output(bitstream_id, status, info_flags, image_buffer); | 44 VTVideoDecodeAccelerator::DecodedFrame::DecodedFrame( |
| 45 uint32_t bitstream_id, |
| 46 CVImageBufferRef image_buffer) |
| 47 : bitstream_id(bitstream_id), |
| 48 image_buffer(image_buffer) { |
| 49 } |
| 50 |
| 51 VTVideoDecodeAccelerator::DecodedFrame::~DecodedFrame() { |
39 } | 52 } |
40 | 53 |
41 VTVideoDecodeAccelerator::VTVideoDecodeAccelerator(CGLContextObj cgl_context) | 54 VTVideoDecodeAccelerator::VTVideoDecodeAccelerator(CGLContextObj cgl_context) |
42 : cgl_context_(cgl_context), | 55 : cgl_context_(cgl_context), |
43 client_(NULL), | 56 client_(NULL), |
44 decoder_thread_("VTDecoderThread"), | |
45 format_(NULL), | 57 format_(NULL), |
46 session_(NULL), | 58 session_(NULL), |
47 weak_this_factory_(this) { | 59 gpu_task_runner_(base::ThreadTaskRunnerHandle::Get()), |
| 60 weak_this_factory_(this), |
| 61 decoder_thread_("VTDecoderThread") { |
48 callback_.decompressionOutputCallback = OutputThunk; | 62 callback_.decompressionOutputCallback = OutputThunk; |
49 callback_.decompressionOutputRefCon = this; | 63 callback_.decompressionOutputRefCon = this; |
50 } | 64 } |
51 | 65 |
52 VTVideoDecodeAccelerator::~VTVideoDecodeAccelerator() { | 66 VTVideoDecodeAccelerator::~VTVideoDecodeAccelerator() { |
53 } | 67 } |
54 | 68 |
55 bool VTVideoDecodeAccelerator::Initialize( | 69 bool VTVideoDecodeAccelerator::Initialize( |
56 media::VideoCodecProfile profile, | 70 media::VideoCodecProfile profile, |
57 Client* client) { | 71 Client* client) { |
(...skipping 23 matching lines...) Expand all Loading... |
81 return false; | 95 return false; |
82 | 96 |
83 // Note that --ignore-gpu-blacklist is still required to get here. | 97 // Note that --ignore-gpu-blacklist is still required to get here. |
84 return true; | 98 return true; |
85 } | 99 } |
86 | 100 |
87 // TODO(sandersd): Proper error reporting instead of CHECKs. | 101 // TODO(sandersd): Proper error reporting instead of CHECKs. |
88 void VTVideoDecodeAccelerator::ConfigureDecoder( | 102 void VTVideoDecodeAccelerator::ConfigureDecoder( |
89 const std::vector<const uint8_t*>& nalu_data_ptrs, | 103 const std::vector<const uint8_t*>& nalu_data_ptrs, |
90 const std::vector<size_t>& nalu_data_sizes) { | 104 const std::vector<size_t>& nalu_data_sizes) { |
| 105 DCHECK(decoder_thread_.message_loop_proxy()->BelongsToCurrentThread()); |
| 106 // Construct a new format description from the parameter sets. |
| 107 // TODO(sandersd): Replace this with custom code to support OS X < 10.9. |
91 format_.reset(); | 108 format_.reset(); |
92 CHECK(!CMVideoFormatDescriptionCreateFromH264ParameterSets( | 109 CHECK(!CMVideoFormatDescriptionCreateFromH264ParameterSets( |
93 kCFAllocatorDefault, | 110 kCFAllocatorDefault, |
94 nalu_data_ptrs.size(), // parameter_set_count | 111 nalu_data_ptrs.size(), // parameter_set_count |
95 &nalu_data_ptrs.front(), // ¶meter_set_pointers | 112 &nalu_data_ptrs.front(), // ¶meter_set_pointers |
96 &nalu_data_sizes.front(), // ¶meter_set_sizes | 113 &nalu_data_sizes.front(), // ¶meter_set_sizes |
97 kNALUHeaderLength, // nal_unit_header_length | 114 kNALUHeaderLength, // nal_unit_header_length |
98 format_.InitializeInto() | 115 format_.InitializeInto())); |
99 )); | 116 CMVideoDimensions coded_dimensions = |
| 117 CMVideoFormatDescriptionGetDimensions(format_); |
100 | 118 |
101 // TODO(sandersd): Check if the size has changed and handle picture requests. | 119 // Prepare VideoToolbox configuration dictionaries. |
102 CMVideoDimensions coded_size = CMVideoFormatDescriptionGetDimensions(format_); | |
103 coded_size_.SetSize(coded_size.width, coded_size.height); | |
104 | |
105 base::ScopedCFTypeRef<CFMutableDictionaryRef> decoder_config( | 120 base::ScopedCFTypeRef<CFMutableDictionaryRef> decoder_config( |
106 CFDictionaryCreateMutable( | 121 CFDictionaryCreateMutable( |
107 kCFAllocatorDefault, | 122 kCFAllocatorDefault, |
108 1, // capacity | 123 1, // capacity |
109 &kCFTypeDictionaryKeyCallBacks, | 124 &kCFTypeDictionaryKeyCallBacks, |
110 &kCFTypeDictionaryValueCallBacks)); | 125 &kCFTypeDictionaryValueCallBacks)); |
111 | 126 |
112 CFDictionarySetValue( | 127 CFDictionarySetValue( |
113 decoder_config, | 128 decoder_config, |
114 // kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder | 129 // kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder |
115 CFSTR("EnableHardwareAcceleratedVideoDecoder"), | 130 CFSTR("EnableHardwareAcceleratedVideoDecoder"), |
116 kCFBooleanTrue); | 131 kCFBooleanTrue); |
117 | 132 |
118 base::ScopedCFTypeRef<CFMutableDictionaryRef> image_config( | 133 base::ScopedCFTypeRef<CFMutableDictionaryRef> image_config( |
119 CFDictionaryCreateMutable( | 134 CFDictionaryCreateMutable( |
120 kCFAllocatorDefault, | 135 kCFAllocatorDefault, |
121 4, // capacity | 136 4, // capacity |
122 &kCFTypeDictionaryKeyCallBacks, | 137 &kCFTypeDictionaryKeyCallBacks, |
123 &kCFTypeDictionaryValueCallBacks)); | 138 &kCFTypeDictionaryValueCallBacks)); |
124 | 139 |
125 // TODO(sandersd): ARGB for video that is not 4:2:0. | |
126 int32_t pixel_format = '2vuy'; | |
127 #define CFINT(i) CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &i) | 140 #define CFINT(i) CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &i) |
| 141 // TODO(sandersd): RGBA option for 4:4:4 video. |
| 142 int32_t pixel_format = kCVPixelFormatType_422YpCbCr8; |
128 base::ScopedCFTypeRef<CFNumberRef> cf_pixel_format(CFINT(pixel_format)); | 143 base::ScopedCFTypeRef<CFNumberRef> cf_pixel_format(CFINT(pixel_format)); |
129 base::ScopedCFTypeRef<CFNumberRef> cf_width(CFINT(coded_size.width)); | 144 base::ScopedCFTypeRef<CFNumberRef> cf_width(CFINT(coded_dimensions.width)); |
130 base::ScopedCFTypeRef<CFNumberRef> cf_height(CFINT(coded_size.height)); | 145 base::ScopedCFTypeRef<CFNumberRef> cf_height(CFINT(coded_dimensions.height)); |
131 #undef CFINT | 146 #undef CFINT |
132 CFDictionarySetValue( | 147 CFDictionarySetValue( |
133 image_config, kCVPixelBufferPixelFormatTypeKey, cf_pixel_format); | 148 image_config, kCVPixelBufferPixelFormatTypeKey, cf_pixel_format); |
134 CFDictionarySetValue(image_config, kCVPixelBufferWidthKey, cf_width); | 149 CFDictionarySetValue(image_config, kCVPixelBufferWidthKey, cf_width); |
135 CFDictionarySetValue(image_config, kCVPixelBufferHeightKey, cf_height); | 150 CFDictionarySetValue(image_config, kCVPixelBufferHeightKey, cf_height); |
136 CFDictionarySetValue( | 151 CFDictionarySetValue( |
137 image_config, kCVPixelBufferOpenGLCompatibilityKey, kCFBooleanTrue); | 152 image_config, kCVPixelBufferOpenGLCompatibilityKey, kCFBooleanTrue); |
138 | 153 |
139 // TODO(sandersd): Skip if the session is compatible. | 154 // TODO(sandersd): Check if the session is already compatible. |
140 // TODO(sandersd): Flush frames when resetting. | 155 // TODO(sandersd): Flush. |
141 session_.reset(); | 156 session_.reset(); |
142 CHECK(!VTDecompressionSessionCreate( | 157 CHECK(!VTDecompressionSessionCreate( |
143 kCFAllocatorDefault, | 158 kCFAllocatorDefault, |
144 format_, // video_format_description | 159 format_, // video_format_description |
145 decoder_config, // video_decoder_specification | 160 decoder_config, // video_decoder_specification |
146 image_config, // destination_image_buffer_attributes | 161 image_config, // destination_image_buffer_attributes |
147 &callback_, // output_callback | 162 &callback_, // output_callback |
148 session_.InitializeInto() | 163 session_.InitializeInto())); |
149 )); | 164 |
150 DVLOG(2) << "Created VTDecompressionSession"; | 165 // If the size has changed, trigger a request for new picture buffers. |
| 166 gfx::Size new_coded_size(coded_dimensions.width, coded_dimensions.height); |
| 167 if (coded_size_ != new_coded_size) { |
| 168 coded_size_ = new_coded_size; |
| 169 gpu_task_runner_->PostTask(FROM_HERE, base::Bind( |
| 170 &VTVideoDecodeAccelerator::SizeChangedTask, |
| 171 weak_this_factory_.GetWeakPtr(), |
| 172 coded_size_));; |
| 173 } |
151 } | 174 } |
152 | 175 |
153 void VTVideoDecodeAccelerator::Decode(const media::BitstreamBuffer& bitstream) { | 176 void VTVideoDecodeAccelerator::Decode(const media::BitstreamBuffer& bitstream) { |
154 DCHECK(CalledOnValidThread()); | 177 DCHECK(CalledOnValidThread()); |
155 decoder_thread_.message_loop_proxy()->PostTask(FROM_HERE, base::Bind( | 178 decoder_thread_.message_loop_proxy()->PostTask(FROM_HERE, base::Bind( |
156 &VTVideoDecodeAccelerator::DecodeTask, base::Unretained(this), | 179 &VTVideoDecodeAccelerator::DecodeTask, base::Unretained(this), |
157 bitstream)); | 180 bitstream)); |
158 } | 181 } |
159 | 182 |
160 void VTVideoDecodeAccelerator::DecodeTask( | 183 void VTVideoDecodeAccelerator::DecodeTask( |
161 const media::BitstreamBuffer bitstream) { | 184 const media::BitstreamBuffer bitstream) { |
162 DCHECK(decoder_thread_.message_loop_proxy()->BelongsToCurrentThread()); | 185 DCHECK(decoder_thread_.message_loop_proxy()->BelongsToCurrentThread()); |
163 | 186 |
164 // Map the bitstream buffer. | 187 // Map the bitstream buffer. |
165 base::SharedMemory memory(bitstream.handle(), true); | 188 base::SharedMemory memory(bitstream.handle(), true); |
166 size_t size = bitstream.size(); | 189 size_t size = bitstream.size(); |
167 CHECK(memory.Map(size)); | 190 CHECK(memory.Map(size)); |
168 const uint8_t* buf = static_cast<uint8_t*>(memory.memory()); | 191 const uint8_t* buf = static_cast<uint8_t*>(memory.memory()); |
169 | 192 |
170 // Locate relevant NALUs in the buffer. | 193 // NALUs are stored with Annex B format in the bitstream buffer (3-byte start |
| 194 // codes), but VideoToolbox expects AVCC/MPEG-4 format (length headers), so we |
| 195 // must to rewrite the data. |
| 196 // |
| 197 // 1. Locate relevant NALUs and compute the size of the translated data. |
| 198 // Also record any parameter sets for VideoToolbox initialization. |
171 size_t data_size = 0; | 199 size_t data_size = 0; |
172 std::vector<media::H264NALU> nalus; | 200 std::vector<media::H264NALU> nalus; |
173 std::vector<const uint8_t*> config_nalu_data_ptrs; | 201 std::vector<const uint8_t*> config_nalu_data_ptrs; |
174 std::vector<size_t> config_nalu_data_sizes; | 202 std::vector<size_t> config_nalu_data_sizes; |
175 parser_.SetStream(buf, size); | 203 parser_.SetStream(buf, size); |
176 media::H264NALU nalu; | 204 media::H264NALU nalu; |
177 while (true) { | 205 while (true) { |
178 media::H264Parser::Result result = parser_.AdvanceToNextNALU(&nalu); | 206 media::H264Parser::Result result = parser_.AdvanceToNextNALU(&nalu); |
179 if (result == media::H264Parser::kEOStream) | 207 if (result == media::H264Parser::kEOStream) |
180 break; | 208 break; |
181 CHECK_EQ(result, media::H264Parser::kOk); | 209 CHECK_EQ(result, media::H264Parser::kOk); |
| 210 // TODO(sandersd): Check that these are only at the start. |
182 if (nalu.nal_unit_type == media::H264NALU::kSPS || | 211 if (nalu.nal_unit_type == media::H264NALU::kSPS || |
183 nalu.nal_unit_type == media::H264NALU::kPPS || | 212 nalu.nal_unit_type == media::H264NALU::kPPS || |
184 nalu.nal_unit_type == media::H264NALU::kSPSExt) { | 213 nalu.nal_unit_type == media::H264NALU::kSPSExt) { |
| 214 DVLOG(2) << "Parameter set " << nalu.nal_unit_type; |
185 config_nalu_data_ptrs.push_back(nalu.data); | 215 config_nalu_data_ptrs.push_back(nalu.data); |
186 config_nalu_data_sizes.push_back(nalu.size); | 216 config_nalu_data_sizes.push_back(nalu.size); |
| 217 } else { |
| 218 nalus.push_back(nalu); |
| 219 data_size += kNALUHeaderLength + nalu.size; |
187 } | 220 } |
188 nalus.push_back(nalu); | |
189 // Each NALU will have a 4-byte length header prepended. | |
190 data_size += kNALUHeaderLength + nalu.size; | |
191 } | 221 } |
192 | 222 |
193 if (!config_nalu_data_ptrs.empty()) | 223 // 2. Initialize VideoToolbox. |
| 224 // TODO(sandersd): Reinitialize when there are new parameter sets. |
| 225 if (!session_) |
194 ConfigureDecoder(config_nalu_data_ptrs, config_nalu_data_sizes); | 226 ConfigureDecoder(config_nalu_data_ptrs, config_nalu_data_sizes); |
195 | 227 |
196 // TODO(sandersd): Rewrite slice NALU headers and send for decoding. | 228 // 3. Allocate a memory-backed CMBlockBuffer for the translated data. |
| 229 base::ScopedCFTypeRef<CMBlockBufferRef> data; |
| 230 CHECK(!CMBlockBufferCreateWithMemoryBlock( |
| 231 kCFAllocatorDefault, |
| 232 NULL, // &memory_block |
| 233 data_size, // block_length |
| 234 kCFAllocatorDefault, // block_allocator |
| 235 NULL, // &custom_block_source |
| 236 0, // offset_to_data |
| 237 data_size, // data_length |
| 238 0, // flags |
| 239 data.InitializeInto())); |
| 240 |
| 241 // 4. Copy NALU data, inserting length headers. |
| 242 size_t offset = 0; |
| 243 for (size_t i = 0; i < nalus.size(); i++) { |
| 244 media::H264NALU& nalu = nalus[i]; |
| 245 uint8_t header[4] = {0xff & nalu.size >> 24, |
| 246 0xff & nalu.size >> 16, |
| 247 0xff & nalu.size >> 8, |
| 248 0xff & nalu.size}; |
| 249 CHECK(!CMBlockBufferReplaceDataBytes(header, data, offset, 4)); |
| 250 offset += 4; |
| 251 CHECK(!CMBlockBufferReplaceDataBytes(nalu.data, data, offset, nalu.size)); |
| 252 offset += nalu.size; |
| 253 } |
| 254 |
| 255 // 5. Package the data for VideoToolbox and request decoding. |
| 256 base::ScopedCFTypeRef<CMSampleBufferRef> frame; |
| 257 CHECK(!CMSampleBufferCreate( |
| 258 kCFAllocatorDefault, |
| 259 data, // data_buffer |
| 260 true, // data_ready |
| 261 NULL, // make_data_ready_callback |
| 262 NULL, // make_data_ready_refcon |
| 263 format_, // format_description |
| 264 1, // num_samples |
| 265 0, // num_sample_timing_entries |
| 266 NULL, // &sample_timing_array |
| 267 0, // num_sample_size_entries |
| 268 NULL, // &sample_size_array |
| 269 frame.InitializeInto())); |
| 270 |
| 271 VTDecodeFrameFlags decode_flags = |
| 272 kVTDecodeFrame_EnableAsynchronousDecompression | |
| 273 kVTDecodeFrame_EnableTemporalProcessing; |
| 274 |
| 275 intptr_t bitstream_id = bitstream.id(); |
| 276 CHECK(!VTDecompressionSessionDecodeFrame( |
| 277 session_, |
| 278 frame, // sample_buffer |
| 279 decode_flags, // decode_flags |
| 280 reinterpret_cast<void*>(bitstream_id), // source_frame_refcon |
| 281 NULL)); // &info_flags_out |
197 } | 282 } |
198 | 283 |
199 // This method may be called on any VideoToolbox thread. | 284 // This method may be called on any VideoToolbox thread. |
200 void VTVideoDecodeAccelerator::Output( | 285 void VTVideoDecodeAccelerator::Output( |
201 int32_t bitstream_id, | 286 int32_t bitstream_id, |
202 OSStatus status, | 287 OSStatus status, |
203 VTDecodeInfoFlags info_flags, | |
204 CVImageBufferRef image_buffer) { | 288 CVImageBufferRef image_buffer) { |
205 // TODO(sandersd): Store the frame in a queue. | 289 CHECK(!status); |
206 CFRelease(image_buffer); | 290 CHECK_EQ(CFGetTypeID(image_buffer), CVPixelBufferGetTypeID()); |
| 291 CFRetain(image_buffer); |
| 292 gpu_task_runner_->PostTask(FROM_HERE, base::Bind( |
| 293 &VTVideoDecodeAccelerator::OutputTask, |
| 294 weak_this_factory_.GetWeakPtr(), |
| 295 DecodedFrame(bitstream_id, image_buffer))); |
| 296 } |
| 297 |
| 298 void VTVideoDecodeAccelerator::OutputTask(DecodedFrame frame) { |
| 299 DCHECK(CalledOnValidThread()); |
| 300 decoded_frames_.push(frame); |
| 301 SendPictures(); |
| 302 } |
| 303 |
| 304 void VTVideoDecodeAccelerator::SizeChangedTask(gfx::Size coded_size) { |
| 305 texture_size_ = coded_size; |
| 306 // TODO(sandersd): Dismiss existing picture buffers. |
| 307 client_->ProvidePictureBuffers( |
| 308 kNumPictureBuffers, texture_size_, GL_TEXTURE_RECTANGLE_ARB); |
207 } | 309 } |
208 | 310 |
209 void VTVideoDecodeAccelerator::AssignPictureBuffers( | 311 void VTVideoDecodeAccelerator::AssignPictureBuffers( |
210 const std::vector<media::PictureBuffer>& pictures) { | 312 const std::vector<media::PictureBuffer>& pictures) { |
211 DCHECK(CalledOnValidThread()); | 313 DCHECK(CalledOnValidThread()); |
| 314 |
| 315 for (size_t i = 0; i < pictures.size(); i++) { |
| 316 picture_ids_.push(pictures[i].id()); |
| 317 texture_ids_[pictures[i].id()] = pictures[i].texture_id(); |
| 318 } |
| 319 |
| 320 // Pictures are not marked as uncleared until this method returns. They will |
| 321 // become broken if they are used before that happens. |
| 322 gpu_task_runner_->PostTask(FROM_HERE, base::Bind( |
| 323 &VTVideoDecodeAccelerator::SendPictures, |
| 324 weak_this_factory_.GetWeakPtr())); |
212 } | 325 } |
213 | 326 |
214 void VTVideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_id) { | 327 void VTVideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_id) { |
215 DCHECK(CalledOnValidThread()); | 328 DCHECK(CalledOnValidThread()); |
| 329 DCHECK_EQ(CFGetRetainCount(picture_bindings_[picture_id]), 1); |
| 330 picture_bindings_.erase(picture_id); |
| 331 picture_ids_.push(picture_id); |
| 332 SendPictures(); |
| 333 } |
| 334 |
| 335 void VTVideoDecodeAccelerator::SendPictures() { |
| 336 DCHECK(CalledOnValidThread()); |
| 337 if (picture_ids_.empty() || decoded_frames_.empty()) |
| 338 return; |
| 339 |
| 340 CGLContextObj prev_context = CGLGetCurrentContext(); |
| 341 CHECK(!CGLSetCurrentContext(cgl_context_)); |
| 342 glEnable(GL_TEXTURE_RECTANGLE_ARB); |
| 343 |
| 344 while (!picture_ids_.empty() && !decoded_frames_.empty()) { |
| 345 int32_t picture_id = picture_ids_.front(); |
| 346 picture_ids_.pop(); |
| 347 DecodedFrame frame = decoded_frames_.front(); |
| 348 decoded_frames_.pop(); |
| 349 IOSurfaceRef surface = CVPixelBufferGetIOSurface(frame.image_buffer); |
| 350 |
| 351 glBindTexture(GL_TEXTURE_RECTANGLE_ARB, texture_ids_[picture_id]); |
| 352 CHECK(!CGLTexImageIOSurface2D( |
| 353 cgl_context_, // ctx |
| 354 GL_TEXTURE_RECTANGLE_ARB, // target |
| 355 GL_RGB, // internal_format |
| 356 texture_size_.width(), // width |
| 357 texture_size_.height(), // height |
| 358 GL_YCBCR_422_APPLE, // format |
| 359 GL_UNSIGNED_SHORT_8_8_APPLE, // type |
| 360 surface, // io_surface |
| 361 0)); // plane |
| 362 glBindTexture(GL_TEXTURE_RECTANGLE_ARB, 0); |
| 363 |
| 364 picture_bindings_[picture_id] = frame.image_buffer; |
| 365 client_->PictureReady(media::Picture(picture_id, frame.bitstream_id)); |
| 366 client_->NotifyEndOfBitstreamBuffer(frame.bitstream_id); |
| 367 } |
| 368 |
| 369 glDisable(GL_TEXTURE_RECTANGLE_ARB); |
| 370 CHECK(!CGLSetCurrentContext(prev_context)); |
216 } | 371 } |
217 | 372 |
218 void VTVideoDecodeAccelerator::Flush() { | 373 void VTVideoDecodeAccelerator::Flush() { |
219 DCHECK(CalledOnValidThread()); | 374 DCHECK(CalledOnValidThread()); |
220 // TODO(sandersd): Trigger flush, sending frames. | 375 // TODO(sandersd): Trigger flush, sending frames. |
221 } | 376 } |
222 | 377 |
223 void VTVideoDecodeAccelerator::Reset() { | 378 void VTVideoDecodeAccelerator::Reset() { |
224 DCHECK(CalledOnValidThread()); | 379 DCHECK(CalledOnValidThread()); |
225 // TODO(sandersd): Trigger flush, discarding frames. | 380 // TODO(sandersd): Trigger flush, discarding frames. |
226 } | 381 } |
227 | 382 |
228 void VTVideoDecodeAccelerator::Destroy() { | 383 void VTVideoDecodeAccelerator::Destroy() { |
229 DCHECK(CalledOnValidThread()); | 384 DCHECK(CalledOnValidThread()); |
230 // TODO(sandersd): Trigger flush, discarding frames, and wait for them. | 385 // TODO(sandersd): Trigger flush, discarding frames, and wait for them. |
231 delete this; | 386 delete this; |
232 } | 387 } |
233 | 388 |
234 bool VTVideoDecodeAccelerator::CanDecodeOnIOThread() { | 389 bool VTVideoDecodeAccelerator::CanDecodeOnIOThread() { |
235 return false; | 390 return false; |
236 } | 391 } |
237 | 392 |
238 } // namespace content | 393 } // namespace content |
OLD | NEW |