Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(105)

Side by Side Diff: content/common/gpu/media/vt_video_decode_accelerator.cc

Issue 706023004: Collect VTVideoDecodeAccelerator frames into a work queue (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@vt_config_change
Patch Set: Created 6 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2014 The Chromium Authors. All rights reserved. 1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include <CoreVideo/CoreVideo.h> 5 #include <CoreVideo/CoreVideo.h>
6 #include <OpenGL/CGLIOSurface.h> 6 #include <OpenGL/CGLIOSurface.h>
7 #include <OpenGL/gl.h> 7 #include <OpenGL/gl.h>
8 8
9 #include "base/bind.h" 9 #include "base/bind.h"
10 #include "base/callback_helpers.h"
11 #include "base/command_line.h" 10 #include "base/command_line.h"
12 #include "base/sys_byteorder.h" 11 #include "base/sys_byteorder.h"
13 #include "base/thread_task_runner_handle.h" 12 #include "base/thread_task_runner_handle.h"
14 #include "content/common/gpu/media/vt_video_decode_accelerator.h" 13 #include "content/common/gpu/media/vt_video_decode_accelerator.h"
15 #include "content/public/common/content_switches.h" 14 #include "content/public/common/content_switches.h"
16 #include "media/filters/h264_parser.h" 15 #include "media/filters/h264_parser.h"
17 #include "ui/gl/scoped_binders.h" 16 #include "ui/gl/scoped_binders.h"
18 17
19 using content_common_gpu_media::kModuleVt; 18 using content_common_gpu_media::kModuleVt;
20 using content_common_gpu_media::InitializeStubs; 19 using content_common_gpu_media::InitializeStubs;
21 using content_common_gpu_media::IsVtInitialized; 20 using content_common_gpu_media::IsVtInitialized;
22 using content_common_gpu_media::StubPathMap; 21 using content_common_gpu_media::StubPathMap;
23 22
24 #define NOTIFY_STATUS(name, status) \ 23 #define NOTIFY_STATUS(name, status) \
25 do { \ 24 do { \
26 LOG(ERROR) << name << " failed with status " << status; \ 25 LOG(ERROR) << name << " failed with status " << status; \
27 NotifyError(PLATFORM_FAILURE); \ 26 NotifyError(PLATFORM_FAILURE); \
28 } while (0) 27 } while (0)
29 28
30 namespace content { 29 namespace content {
31 30
32 // Size of NALU length headers in AVCC/MPEG-4 format (can be 1, 2, or 4). 31 // Size to use for NALU length headers in AVC format (can be 1, 2, or 4).
33 static const int kNALUHeaderLength = 4; 32 static const int kNALUHeaderLength = 4;
34 33
35 // We only request 5 picture buffers from the client which are used to hold the 34 // We request 5 picture buffers from the client, each of which has a texture ID
36 // decoded samples. These buffers are then reused when the client tells us that 35 // that we can bind decoded frames to. We need enough to satisfy preroll, and
37 // it is done with the buffer. 36 // enough to avoid unnecessary stalling, but no more than that. The resource
37 // requirements are low, as we don't need the textures to be backed by storage.
38 static const int kNumPictureBuffers = 5; 38 static const int kNumPictureBuffers = 5;
39 39
40 // Route decoded frame callbacks back into the VTVideoDecodeAccelerator. 40 // Route decoded frame callbacks back into the VTVideoDecodeAccelerator.
41 static void OutputThunk( 41 static void OutputThunk(
42 void* decompression_output_refcon, 42 void* decompression_output_refcon,
43 void* source_frame_refcon, 43 void* source_frame_refcon,
44 OSStatus status, 44 OSStatus status,
45 VTDecodeInfoFlags info_flags, 45 VTDecodeInfoFlags info_flags,
46 CVImageBufferRef image_buffer, 46 CVImageBufferRef image_buffer,
47 CMTime presentation_time_stamp, 47 CMTime presentation_time_stamp,
48 CMTime presentation_duration) { 48 CMTime presentation_duration) {
49 VTVideoDecodeAccelerator* vda = 49 VTVideoDecodeAccelerator* vda =
50 reinterpret_cast<VTVideoDecodeAccelerator*>(decompression_output_refcon); 50 reinterpret_cast<VTVideoDecodeAccelerator*>(decompression_output_refcon);
51 int32_t bitstream_id = reinterpret_cast<intptr_t>(source_frame_refcon); 51 vda->Output(source_frame_refcon, status, image_buffer);
52 vda->Output(bitstream_id, status, image_buffer);
53 } 52 }
54 53
55 VTVideoDecodeAccelerator::DecodedFrame::DecodedFrame( 54 VTVideoDecodeAccelerator::Task::Task(TaskType type) : type(type) {
56 int32_t bitstream_id,
57 CVImageBufferRef image_buffer)
58 : bitstream_id(bitstream_id),
59 image_buffer(image_buffer) {
60 } 55 }
61 56
62 VTVideoDecodeAccelerator::DecodedFrame::~DecodedFrame() { 57 VTVideoDecodeAccelerator::Task::~Task() {
63 } 58 }
64 59
65 VTVideoDecodeAccelerator::PendingAction::PendingAction( 60 VTVideoDecodeAccelerator::Frame::Frame(int32_t bitstream_id)
66 Action action, 61 : bitstream_id(bitstream_id) {
67 int32_t bitstream_id)
68 : action(action),
69 bitstream_id(bitstream_id) {
70 } 62 }
71 63
72 VTVideoDecodeAccelerator::PendingAction::~PendingAction() { 64 VTVideoDecodeAccelerator::Frame::~Frame() {
73 } 65 }
74 66
75 VTVideoDecodeAccelerator::VTVideoDecodeAccelerator( 67 VTVideoDecodeAccelerator::VTVideoDecodeAccelerator(
76 CGLContextObj cgl_context, 68 CGLContextObj cgl_context,
77 const base::Callback<bool(void)>& make_context_current) 69 const base::Callback<bool(void)>& make_context_current)
78 : cgl_context_(cgl_context), 70 : cgl_context_(cgl_context),
79 make_context_current_(make_context_current), 71 make_context_current_(make_context_current),
80 client_(NULL), 72 client_(NULL),
81 has_error_(false), 73 state_(STATE_NORMAL),
82 format_(NULL), 74 format_(NULL),
83 session_(NULL), 75 session_(NULL),
84 gpu_task_runner_(base::ThreadTaskRunnerHandle::Get()), 76 gpu_task_runner_(base::ThreadTaskRunnerHandle::Get()),
85 weak_this_factory_(this), 77 weak_this_factory_(this),
86 decoder_thread_("VTDecoderThread") { 78 decoder_thread_("VTDecoderThread") {
87 DCHECK(!make_context_current_.is_null()); 79 DCHECK(!make_context_current_.is_null());
88 callback_.decompressionOutputCallback = OutputThunk; 80 callback_.decompressionOutputCallback = OutputThunk;
89 callback_.decompressionOutputRefCon = this; 81 callback_.decompressionOutputRefCon = this;
90 } 82 }
91 83
(...skipping 27 matching lines...) Expand all
119 return false; 111 return false;
120 } 112 }
121 113
122 // Spawn a thread to handle parsing and calling VideoToolbox. 114 // Spawn a thread to handle parsing and calling VideoToolbox.
123 if (!decoder_thread_.Start()) 115 if (!decoder_thread_.Start())
124 return false; 116 return false;
125 117
126 return true; 118 return true;
127 } 119 }
128 120
129 bool VTVideoDecodeAccelerator::ConfigureDecoder( 121 bool VTVideoDecodeAccelerator::FinishDelayedFrames() {
130 const std::vector<const uint8_t*>& nalu_data_ptrs,
131 const std::vector<size_t>& nalu_data_sizes) {
132 DCHECK(decoder_thread_.message_loop_proxy()->BelongsToCurrentThread()); 122 DCHECK(decoder_thread_.message_loop_proxy()->BelongsToCurrentThread());
123 if (session_) {
124 OSStatus status = VTDecompressionSessionFinishDelayedFrames(session_);
125 if (status) {
126 NOTIFY_STATUS("VTDecompressionSessionFinishDelayedFrames()", status);
127 return false;
128 }
129 }
130 return true;
131 }
132
133 bool VTVideoDecodeAccelerator::ConfigureDecoder() {
134 DCHECK(decoder_thread_.message_loop_proxy()->BelongsToCurrentThread());
135 DCHECK(!last_sps_.empty());
136 DCHECK(!last_pps_.empty());
137
138 // Build the configuration records.
139 std::vector<const uint8_t*> nalu_data_ptrs;
DaleCurtis 2014/11/10 20:48:02 Yuck, I guess you could const initialize parallel
sandersd (OOO until July 31) 2014/11/11 18:46:06 Done. I'll see about a refactor to remove unnecess
140 std::vector<size_t> nalu_data_sizes;
141 nalu_data_ptrs.push_back(&last_sps_.front());
142 nalu_data_sizes.push_back(last_sps_.size());
143 if (!last_spsext_.empty()) {
144 nalu_data_ptrs.push_back(&last_spsext_.front());
145 nalu_data_sizes.push_back(last_spsext_.size());
146 }
147 nalu_data_ptrs.push_back(&last_pps_.front());
148 nalu_data_sizes.push_back(last_pps_.size());
133 149
134 // Construct a new format description from the parameter sets. 150 // Construct a new format description from the parameter sets.
135 // TODO(sandersd): Replace this with custom code to support OS X < 10.9. 151 // TODO(sandersd): Replace this with custom code to support OS X < 10.9.
136 format_.reset(); 152 format_.reset();
137 OSStatus status = CMVideoFormatDescriptionCreateFromH264ParameterSets( 153 OSStatus status = CMVideoFormatDescriptionCreateFromH264ParameterSets(
138 kCFAllocatorDefault, 154 kCFAllocatorDefault,
139 nalu_data_ptrs.size(), // parameter_set_count 155 nalu_data_ptrs.size(), // parameter_set_count
140 &nalu_data_ptrs.front(), // &parameter_set_pointers 156 &nalu_data_ptrs.front(), // &parameter_set_pointers
141 &nalu_data_sizes.front(), // &parameter_set_sizes 157 &nalu_data_sizes.front(), // &parameter_set_sizes
142 kNALUHeaderLength, // nal_unit_header_length 158 kNALUHeaderLength, // nal_unit_header_length
143 format_.InitializeInto()); 159 format_.InitializeInto());
144 if (status) { 160 if (status) {
145 NOTIFY_STATUS("CMVideoFormatDescriptionCreateFromH264ParameterSets()", 161 NOTIFY_STATUS("CMVideoFormatDescriptionCreateFromH264ParameterSets()",
146 status); 162 status);
147 return false; 163 return false;
148 } 164 }
149 165
150 // If the session is compatible, there's nothing to do. 166 // Flush all frames using the previous configuration to keep things simple.
DaleCurtis 2014/11/10 20:48:02 Shouldn't this happen in the Reset() that precedes
sandersd (OOO until July 31) 2014/11/11 18:46:07 The decoder does not need to be reset to get a new
167 if (!FinishDelayedFrames())
168 return false;
169
170 // Store the new configuration data.
171 CMVideoDimensions coded_dimensions =
172 CMVideoFormatDescriptionGetDimensions(format_);
173 coded_size_.SetSize(coded_dimensions.width, coded_dimensions.height);
174
175 // If the session is compatible, there's nothing else to do.
151 if (session_ && 176 if (session_ &&
152 VTDecompressionSessionCanAcceptFormatDescription(session_, format_)) { 177 VTDecompressionSessionCanAcceptFormatDescription(session_, format_))
DaleCurtis 2014/11/10 21:10:39 Multiline if should keep {}
sandersd (OOO until July 31) 2014/11/11 18:46:07 Done.
153 return true; 178 return true;
154 }
155 179
156 // Prepare VideoToolbox configuration dictionaries. 180 // Prepare VideoToolbox configuration dictionaries.
157 base::ScopedCFTypeRef<CFMutableDictionaryRef> decoder_config( 181 base::ScopedCFTypeRef<CFMutableDictionaryRef> decoder_config(
158 CFDictionaryCreateMutable( 182 CFDictionaryCreateMutable(
159 kCFAllocatorDefault, 183 kCFAllocatorDefault,
160 1, // capacity 184 1, // capacity
161 &kCFTypeDictionaryKeyCallBacks, 185 &kCFTypeDictionaryKeyCallBacks,
162 &kCFTypeDictionaryValueCallBacks)); 186 &kCFTypeDictionaryValueCallBacks));
163 187
164 CFDictionarySetValue( 188 CFDictionarySetValue(
165 decoder_config, 189 decoder_config,
166 // kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder 190 // kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
167 CFSTR("EnableHardwareAcceleratedVideoDecoder"), 191 CFSTR("EnableHardwareAcceleratedVideoDecoder"),
168 kCFBooleanTrue); 192 kCFBooleanTrue);
169 193
170 base::ScopedCFTypeRef<CFMutableDictionaryRef> image_config( 194 base::ScopedCFTypeRef<CFMutableDictionaryRef> image_config(
171 CFDictionaryCreateMutable( 195 CFDictionaryCreateMutable(
172 kCFAllocatorDefault, 196 kCFAllocatorDefault,
173 4, // capacity 197 4, // capacity
174 &kCFTypeDictionaryKeyCallBacks, 198 &kCFTypeDictionaryKeyCallBacks,
175 &kCFTypeDictionaryValueCallBacks)); 199 &kCFTypeDictionaryValueCallBacks));
176 200
177 CMVideoDimensions coded_dimensions =
178 CMVideoFormatDescriptionGetDimensions(format_);
179 #define CFINT(i) CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &i) 201 #define CFINT(i) CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &i)
180 // TODO(sandersd): RGBA option for 4:4:4 video. 202 // TODO(sandersd): RGBA option for 4:4:4 video.
181 int32_t pixel_format = kCVPixelFormatType_422YpCbCr8; 203 int32_t pixel_format = kCVPixelFormatType_422YpCbCr8;
182 base::ScopedCFTypeRef<CFNumberRef> cf_pixel_format(CFINT(pixel_format)); 204 base::ScopedCFTypeRef<CFNumberRef> cf_pixel_format(CFINT(pixel_format));
183 base::ScopedCFTypeRef<CFNumberRef> cf_width(CFINT(coded_dimensions.width)); 205 base::ScopedCFTypeRef<CFNumberRef> cf_width(CFINT(coded_dimensions.width));
184 base::ScopedCFTypeRef<CFNumberRef> cf_height(CFINT(coded_dimensions.height)); 206 base::ScopedCFTypeRef<CFNumberRef> cf_height(CFINT(coded_dimensions.height));
185 #undef CFINT 207 #undef CFINT
186 CFDictionarySetValue( 208 CFDictionarySetValue(
187 image_config, kCVPixelBufferPixelFormatTypeKey, cf_pixel_format); 209 image_config, kCVPixelBufferPixelFormatTypeKey, cf_pixel_format);
188 CFDictionarySetValue(image_config, kCVPixelBufferWidthKey, cf_width); 210 CFDictionarySetValue(image_config, kCVPixelBufferWidthKey, cf_width);
(...skipping 11 matching lines...) Expand all
200 &callback_, // output_callback 222 &callback_, // output_callback
201 session_.InitializeInto()); 223 session_.InitializeInto());
202 if (status) { 224 if (status) {
203 NOTIFY_STATUS("VTDecompressionSessionCreate()", status); 225 NOTIFY_STATUS("VTDecompressionSessionCreate()", status);
204 return false; 226 return false;
205 } 227 }
206 228
207 return true; 229 return true;
208 } 230 }
209 231
210 void VTVideoDecodeAccelerator::Decode(const media::BitstreamBuffer& bitstream) {
211 DCHECK(CalledOnValidThread());
212 // Not actually a requirement of the VDA API, but we're lazy and use negative
213 // values as flags internally. Revisit that if this actually happens.
214 if (bitstream.id() < 0) {
215 LOG(ERROR) << "Negative bitstream ID";
216 NotifyError(INVALID_ARGUMENT);
217 client_->NotifyEndOfBitstreamBuffer(bitstream.id());
218 return;
219 }
220 pending_bitstream_ids_.push(bitstream.id());
221 decoder_thread_.message_loop_proxy()->PostTask(FROM_HERE, base::Bind(
222 &VTVideoDecodeAccelerator::DecodeTask, base::Unretained(this),
223 bitstream));
224 }
225
226 void VTVideoDecodeAccelerator::DecodeTask( 232 void VTVideoDecodeAccelerator::DecodeTask(
227 const media::BitstreamBuffer& bitstream) { 233 const media::BitstreamBuffer& bitstream,
234 Frame* frame) {
228 DCHECK(decoder_thread_.message_loop_proxy()->BelongsToCurrentThread()); 235 DCHECK(decoder_thread_.message_loop_proxy()->BelongsToCurrentThread());
229 236
230 // Once we have a bitstream buffer, we must either decode it or drop it.
231 // This construct ensures that the buffer is always dropped unless we call
232 // drop_bitstream.Release().
233 base::ScopedClosureRunner drop_bitstream(base::Bind(
234 &VTVideoDecodeAccelerator::DropBitstream, base::Unretained(this),
235 bitstream.id()));
236
237 // Map the bitstream buffer. 237 // Map the bitstream buffer.
238 base::SharedMemory memory(bitstream.handle(), true); 238 base::SharedMemory memory(bitstream.handle(), true);
239 size_t size = bitstream.size(); 239 size_t size = bitstream.size();
240 if (!memory.Map(size)) { 240 if (!memory.Map(size)) {
241 LOG(ERROR) << "Failed to map bitstream buffer"; 241 LOG(ERROR) << "Failed to map bitstream buffer";
242 NotifyError(PLATFORM_FAILURE); 242 NotifyError(PLATFORM_FAILURE);
243 return; 243 return;
244 } 244 }
245 const uint8_t* buf = static_cast<uint8_t*>(memory.memory()); 245 const uint8_t* buf = static_cast<uint8_t*>(memory.memory());
246 246
247 // NALUs are stored with Annex B format in the bitstream buffer (start codes), 247 // NALUs are stored with Annex B format in the bitstream buffer (start codes),
DaleCurtis 2014/11/10 20:48:02 Peanut gallery: Seems like this entire NALU conver
sandersd (OOO until July 31) 2014/11/11 18:46:06 That's probably true, but there are a lot of outpu
248 // but VideoToolbox expects AVCC/MPEG-4 format (length headers), so we must 248 // but VideoToolbox expects AVC-4 format (length headers), so we must rewrite
249 // rewrite the data. 249 // the data.
250 // 250 //
251 // 1. Locate relevant NALUs and compute the size of the translated data. 251 // Locate relevant NALUs and compute the size of the rewritten data. Also
252 // Also record any parameter sets for VideoToolbox initialization. 252 // record any parameter sets for VideoToolbox initialization.
253 bool config_changed = false; 253 bool config_changed = false;
254 size_t data_size = 0; 254 size_t data_size = 0;
255 std::vector<media::H264NALU> nalus; 255 std::vector<media::H264NALU> nalus;
256 parser_.SetStream(buf, size); 256 parser_.SetStream(buf, size);
257 media::H264NALU nalu; 257 media::H264NALU nalu;
258 while (true) { 258 while (true) {
259 media::H264Parser::Result result = parser_.AdvanceToNextNALU(&nalu); 259 media::H264Parser::Result result = parser_.AdvanceToNextNALU(&nalu);
260 if (result == media::H264Parser::kEOStream) 260 if (result == media::H264Parser::kEOStream)
261 break; 261 break;
262 if (result != media::H264Parser::kOk) { 262 if (result != media::H264Parser::kOk) {
263 LOG(ERROR) << "Failed to find H.264 NALU"; 263 LOG(ERROR) << "Failed to find H.264 NALU";
264 NotifyError(PLATFORM_FAILURE); 264 NotifyError(PLATFORM_FAILURE);
265 return; 265 return;
266 } 266 }
267 // TODO(sandersd): Strict ordering rules.
268 switch (nalu.nal_unit_type) { 267 switch (nalu.nal_unit_type) {
269 case media::H264NALU::kSPS: 268 case media::H264NALU::kSPS:
270 last_sps_.assign(nalu.data, nalu.data + nalu.size); 269 last_sps_.assign(nalu.data, nalu.data + nalu.size);
271 last_spsext_.clear(); 270 last_spsext_.clear();
272 config_changed = true; 271 config_changed = true;
273 break; 272 break;
274 case media::H264NALU::kSPSExt: 273 case media::H264NALU::kSPSExt:
275 // TODO(sandersd): Check that the previous NALU was an SPS. 274 // TODO(sandersd): Check that the previous NALU was an SPS.
276 last_spsext_.assign(nalu.data, nalu.data + nalu.size); 275 last_spsext_.assign(nalu.data, nalu.data + nalu.size);
277 config_changed = true; 276 config_changed = true;
278 break; 277 break;
279 case media::H264NALU::kPPS: 278 case media::H264NALU::kPPS:
280 last_pps_.assign(nalu.data, nalu.data + nalu.size); 279 last_pps_.assign(nalu.data, nalu.data + nalu.size);
281 config_changed = true; 280 config_changed = true;
282 break; 281 break;
282 case media::H264NALU::kSliceDataA:
283 case media::H264NALU::kSliceDataB:
284 case media::H264NALU::kSliceDataC:
285 LOG(ERROR) << "Coded slide data partitions not implemented.";
286 NotifyError(PLATFORM_FAILURE);
287 return;
288 case media::H264NALU::kIDRSlice:
289 case media::H264NALU::kNonIDRSlice:
290 // TODO(sandersd): Compute pic_order_count.
283 default: 291 default:
284 nalus.push_back(nalu); 292 nalus.push_back(nalu);
285 data_size += kNALUHeaderLength + nalu.size; 293 data_size += kNALUHeaderLength + nalu.size;
286 break; 294 break;
287 } 295 }
288 } 296 }
289 297
290 // 2. Initialize VideoToolbox. 298 // Initialize VideoToolbox.
291 // TODO(sandersd): Check if the new configuration is identical before 299 // TODO(sandersd): Instead of assuming that the last SPS and PPS units are
292 // reconfiguring. 300 // always the correct ones, maintain a cache of recent SPS and PPS units and
301 // select from them using the slice header.
293 if (config_changed) { 302 if (config_changed) {
DaleCurtis 2014/11/10 20:48:02 Do you do this here instead of Initialize() since
sandersd (OOO until July 31) 2014/11/11 18:46:07 That's part of it, but also the config can change
294 if (last_sps_.size() == 0 || last_pps_.size() == 0) { 303 if (last_sps_.size() == 0 || last_pps_.size() == 0) {
295 LOG(ERROR) << "Invalid configuration data"; 304 LOG(ERROR) << "Invalid configuration data";
DaleCurtis 2014/11/10 20:48:02 You should convert all these LOG(ERROR) calls to D
sandersd (OOO until July 31) 2014/11/11 18:46:07 Ah, you're quite right, the "VDA Error" message pr
DaleCurtis 2014/11/11 21:02:34 You might file a bug to do this after a default on
sandersd (OOO until July 31) 2014/11/11 22:14:39 Perhaps it would be better to temporarily add a lo
296 NotifyError(INVALID_ARGUMENT); 305 NotifyError(INVALID_ARGUMENT);
297 return; 306 return;
298 } 307 }
299 // TODO(sandersd): Check that the SPS and PPS IDs match. 308 if (!ConfigureDecoder())
300 std::vector<const uint8_t*> nalu_data_ptrs;
301 std::vector<size_t> nalu_data_sizes;
302 nalu_data_ptrs.push_back(&last_sps_.front());
303 nalu_data_sizes.push_back(last_sps_.size());
304 if (last_spsext_.size() != 0) {
305 nalu_data_ptrs.push_back(&last_spsext_.front());
306 nalu_data_sizes.push_back(last_spsext_.size());
307 }
308 nalu_data_ptrs.push_back(&last_pps_.front());
309 nalu_data_sizes.push_back(last_pps_.size());
310
311 // If ConfigureDecoder() fails, it already called NotifyError().
312 if (!ConfigureDecoder(nalu_data_ptrs, nalu_data_sizes))
313 return; 309 return;
314 } 310 }
315 311
316 // If there are no non-configuration units, immediately return an empty 312 // If there are no non-configuration units, drop the bitstream buffer by
317 // (ie. dropped) frame. It is an error to create a MemoryBlock with zero 313 // returning an empty frame.
318 // size. 314 if (!data_size) {
319 if (!data_size) 315 if (!FinishDelayedFrames())
316 return;
317 DecodeDone(frame);
320 return; 318 return;
319 }
321 320
322 // If the session is not configured, fail. 321 // If the session is not configured by this point, fail.
323 if (!session_) { 322 if (!session_) {
324 LOG(ERROR) << "Image slice without configuration data"; 323 LOG(ERROR) << "Image slice without configuration";
325 NotifyError(INVALID_ARGUMENT); 324 NotifyError(INVALID_ARGUMENT);
326 return; 325 return;
327 } 326 }
328 327
329 // 3. Allocate a memory-backed CMBlockBuffer for the translated data. 328 // Create a memory-backed CMBlockBuffer for the translated data.
330 // TODO(sandersd): Check that the slice's PPS matches the current PPS.
331 base::ScopedCFTypeRef<CMBlockBufferRef> data; 329 base::ScopedCFTypeRef<CMBlockBufferRef> data;
332 OSStatus status = CMBlockBufferCreateWithMemoryBlock( 330 OSStatus status = CMBlockBufferCreateWithMemoryBlock(
333 kCFAllocatorDefault, 331 kCFAllocatorDefault,
334 NULL, // &memory_block 332 NULL, // &memory_block
335 data_size, // block_length 333 data_size, // block_length
DaleCurtis 2014/11/10 21:10:39 How big is this typically? You may want to mark t
sandersd (OOO until July 31) 2014/11/11 18:46:07 Done.
336 kCFAllocatorDefault, // block_allocator 334 kCFAllocatorDefault, // block_allocator
337 NULL, // &custom_block_source 335 NULL, // &custom_block_source
338 0, // offset_to_data 336 0, // offset_to_data
339 data_size, // data_length 337 data_size, // data_length
340 0, // flags 338 0, // flags
341 data.InitializeInto()); 339 data.InitializeInto());
342 if (status) { 340 if (status) {
343 NOTIFY_STATUS("CMBlockBufferCreateWithMemoryBlock()", status); 341 NOTIFY_STATUS("CMBlockBufferCreateWithMemoryBlock()", status);
344 return; 342 return;
345 } 343 }
346 344
347 // 4. Copy NALU data, inserting length headers. 345 // Copy NALU data into the CMBlockBuffer, inserting length headers.
348 size_t offset = 0; 346 size_t offset = 0;
349 for (size_t i = 0; i < nalus.size(); i++) { 347 for (size_t i = 0; i < nalus.size(); i++) {
350 media::H264NALU& nalu = nalus[i]; 348 media::H264NALU& nalu = nalus[i];
351 uint32_t header = base::HostToNet32(static_cast<uint32_t>(nalu.size)); 349 uint32_t header = base::HostToNet32(static_cast<uint32_t>(nalu.size));
352 status = CMBlockBufferReplaceDataBytes( 350 status = CMBlockBufferReplaceDataBytes(
353 &header, data, offset, kNALUHeaderLength); 351 &header, data, offset, kNALUHeaderLength);
354 if (status) { 352 if (status) {
355 NOTIFY_STATUS("CMBlockBufferReplaceDataBytes()", status); 353 NOTIFY_STATUS("CMBlockBufferReplaceDataBytes()", status);
356 return; 354 return;
357 } 355 }
358 offset += kNALUHeaderLength; 356 offset += kNALUHeaderLength;
359 status = CMBlockBufferReplaceDataBytes(nalu.data, data, offset, nalu.size); 357 status = CMBlockBufferReplaceDataBytes(nalu.data, data, offset, nalu.size);
360 if (status) { 358 if (status) {
361 NOTIFY_STATUS("CMBlockBufferReplaceDataBytes()", status); 359 NOTIFY_STATUS("CMBlockBufferReplaceDataBytes()", status);
362 return; 360 return;
363 } 361 }
364 offset += nalu.size; 362 offset += nalu.size;
365 } 363 }
366 364
367 // 5. Package the data for VideoToolbox and request decoding. 365 // Package the data in a CMSampleBuffer.
368 base::ScopedCFTypeRef<CMSampleBufferRef> frame; 366 base::ScopedCFTypeRef<CMSampleBufferRef> sample;
369 status = CMSampleBufferCreate( 367 status = CMSampleBufferCreate(
370 kCFAllocatorDefault, 368 kCFAllocatorDefault,
371 data, // data_buffer 369 data, // data_buffer
372 true, // data_ready 370 true, // data_ready
373 NULL, // make_data_ready_callback 371 NULL, // make_data_ready_callback
374 NULL, // make_data_ready_refcon 372 NULL, // make_data_ready_refcon
375 format_, // format_description 373 format_, // format_description
376 1, // num_samples 374 1, // num_samples
377 0, // num_sample_timing_entries 375 0, // num_sample_timing_entries
378 NULL, // &sample_timing_array 376 NULL, // &sample_timing_array
379 0, // num_sample_size_entries 377 0, // num_sample_size_entries
380 NULL, // &sample_size_array 378 NULL, // &sample_size_array
381 frame.InitializeInto()); 379 sample.InitializeInto());
382 if (status) { 380 if (status) {
383 NOTIFY_STATUS("CMSampleBufferCreate()", status); 381 NOTIFY_STATUS("CMSampleBufferCreate()", status);
384 return; 382 return;
385 } 383 }
386 384
385 // Update the frame data.
386 frame->coded_size = coded_size_;
387
388 // Send the frame for decoding.
387 // Asynchronous Decompression allows for parallel submission of frames 389 // Asynchronous Decompression allows for parallel submission of frames
388 // (without it, DecodeFrame() does not return until the frame has been 390 // (without it, DecodeFrame() does not return until the frame has been
389 // decoded). We don't enable Temporal Processing so that frames are always 391 // decoded). We don't enable Temporal Processing so that frames are always
390 // returned in decode order; this makes it easier to avoid deadlock. 392 // returned in decode order; this makes it easier to avoid deadlock.
391 VTDecodeFrameFlags decode_flags = 393 VTDecodeFrameFlags decode_flags =
392 kVTDecodeFrame_EnableAsynchronousDecompression; 394 kVTDecodeFrame_EnableAsynchronousDecompression;
393
394 intptr_t bitstream_id = bitstream.id();
395 status = VTDecompressionSessionDecodeFrame( 395 status = VTDecompressionSessionDecodeFrame(
396 session_, 396 session_,
397 frame, // sample_buffer 397 sample, // sample_buffer
398 decode_flags, // decode_flags 398 decode_flags, // decode_flags
399 reinterpret_cast<void*>(bitstream_id), // source_frame_refcon 399 reinterpret_cast<void*>(frame), // source_frame_refcon
400 NULL); // &info_flags_out 400 NULL); // &info_flags_out
401 if (status) { 401 if (status) {
402 NOTIFY_STATUS("VTDecompressionSessionDecodeFrame()", status); 402 NOTIFY_STATUS("VTDecompressionSessionDecodeFrame()", status);
403 return; 403 return;
404 } 404 }
405
406 // Now that the bitstream is decoding, don't drop it.
407 (void)drop_bitstream.Release();
408 } 405 }
409 406
410 // This method may be called on any VideoToolbox thread. 407 // This method may be called on any VideoToolbox thread.
411 void VTVideoDecodeAccelerator::Output( 408 void VTVideoDecodeAccelerator::Output(
412 int32_t bitstream_id, 409 void* source_frame_refcon,
413 OSStatus status, 410 OSStatus status,
414 CVImageBufferRef image_buffer) { 411 CVImageBufferRef image_buffer) {
415 if (status) { 412 if (status) {
416 // TODO(sandersd): Handle dropped frames.
417 NOTIFY_STATUS("Decoding", status); 413 NOTIFY_STATUS("Decoding", status);
418 image_buffer = NULL;
419 } else if (CFGetTypeID(image_buffer) != CVPixelBufferGetTypeID()) { 414 } else if (CFGetTypeID(image_buffer) != CVPixelBufferGetTypeID()) {
420 LOG(ERROR) << "Decoded frame is not a CVPixelBuffer"; 415 LOG(ERROR) << "Decoded frame is not a CVPixelBuffer";
421 NotifyError(PLATFORM_FAILURE); 416 NotifyError(PLATFORM_FAILURE);
422 image_buffer = NULL;
423 } else { 417 } else {
424 CFRetain(image_buffer); 418 Frame* frame = reinterpret_cast<Frame*>(source_frame_refcon);
419 frame->image.reset(image_buffer, base::scoped_policy::RETAIN);
420 DecodeDone(frame);
425 } 421 }
426 gpu_task_runner_->PostTask(FROM_HERE, base::Bind(
427 &VTVideoDecodeAccelerator::OutputTask,
428 weak_this_factory_.GetWeakPtr(),
429 DecodedFrame(bitstream_id, image_buffer)));
430 } 422 }
431 423
432 void VTVideoDecodeAccelerator::OutputTask(DecodedFrame frame) { 424 void VTVideoDecodeAccelerator::DecodeDone(Frame* frame) {
425 if (!CalledOnValidThread()) {
DaleCurtis 2014/11/10 21:10:39 If Output() is always going to call on the wrong t
sandersd (OOO until July 31) 2014/11/11 18:46:07 Done.
426 gpu_task_runner_->PostTask(FROM_HERE, base::Bind(
427 &VTVideoDecodeAccelerator::DecodeDone,
428 weak_this_factory_.GetWeakPtr(), frame));
429 } else {
430 DCHECK_EQ(frame->bitstream_id, pending_frames_.front()->bitstream_id);
431 Task task(TASK_FRAME);
432 task.frame = pending_frames_.front();
433 pending_frames_.pop();
434 pending_tasks_.push(task);
435 ProcessTasks();
436 }
437 }
438
439 void VTVideoDecodeAccelerator::FlushTask(TaskType type) {
440 DCHECK(decoder_thread_.message_loop_proxy()->BelongsToCurrentThread());
441 FinishDelayedFrames();
442
443 // Always queue a task, even if FinishDelayedFrames() fails, so that
444 // destruction always completes.
445 gpu_task_runner_->PostTask(FROM_HERE, base::Bind(
446 &VTVideoDecodeAccelerator::FlushDone,
447 weak_this_factory_.GetWeakPtr(), type));
448 }
449
450 void VTVideoDecodeAccelerator::FlushDone(TaskType type) {
433 DCHECK(CalledOnValidThread()); 451 DCHECK(CalledOnValidThread());
DaleCurtis 2014/11/10 21:10:39 CalledOnValidThread isn't very readable when there
sandersd (OOO until July 31) 2014/11/11 18:46:06 I could add a wrapper method, but CalledOnValidThr
DaleCurtis 2014/11/11 21:02:34 Hmm instead of inheriting non-threadsafe you shoul
sandersd (OOO until July 31) 2014/11/11 22:14:39 Done.
434 decoded_frames_.push(frame); 452 pending_tasks_.push(Task(type));
435 ProcessDecodedFrames(); 453 ProcessTasks();
454 }
455
456 void VTVideoDecodeAccelerator::Decode(const media::BitstreamBuffer& bitstream) {
457 DCHECK(CalledOnValidThread());
458 DCHECK_EQ(assigned_bitstream_ids_.count(bitstream.id()), 0u);
459 assigned_bitstream_ids_.insert(bitstream.id());
460 Frame* frame = new Frame(bitstream.id());
DaleCurtis 2014/11/10 21:10:39 Why does Frame have a bitstream_id instead of owni
sandersd (OOO until July 31) 2014/11/11 18:46:07 The ownership and lifetime story was not very clea
DaleCurtis 2014/11/11 21:02:34 I'd spend some time cleaning up the ownership mode
sandersd (OOO until July 31) 2014/11/11 22:14:39 I do not, and this is the best I had on my third r
461 pending_frames_.push(make_linked_ptr(frame));
462 decoder_thread_.message_loop_proxy()->PostTask(FROM_HERE, base::Bind(
463 &VTVideoDecodeAccelerator::DecodeTask, base::Unretained(this),
464 bitstream, frame));
436 } 465 }
437 466
438 void VTVideoDecodeAccelerator::AssignPictureBuffers( 467 void VTVideoDecodeAccelerator::AssignPictureBuffers(
439 const std::vector<media::PictureBuffer>& pictures) { 468 const std::vector<media::PictureBuffer>& pictures) {
440 DCHECK(CalledOnValidThread()); 469 DCHECK(CalledOnValidThread());
441 470
442 for (size_t i = 0; i < pictures.size(); i++) { 471 for (const media::PictureBuffer& picture : pictures) {
443 DCHECK(!texture_ids_.count(pictures[i].id())); 472 DCHECK(!texture_ids_.count(picture.id()));
444 assigned_picture_ids_.insert(pictures[i].id()); 473 assigned_picture_ids_.insert(picture.id());
445 available_picture_ids_.push_back(pictures[i].id()); 474 available_picture_ids_.push_back(picture.id());
446 texture_ids_[pictures[i].id()] = pictures[i].texture_id(); 475 texture_ids_[picture.id()] = picture.texture_id();
447 } 476 }
448 477
449 // Pictures are not marked as uncleared until after this method returns, and 478 // Pictures are not marked as uncleared until after this method returns, and
450 // they will be broken if they are used before that happens. So, schedule 479 // they will be broken if they are used before that happens. So, schedule
451 // future work after that happens. 480 // future work after that happens.
452 gpu_task_runner_->PostTask(FROM_HERE, base::Bind( 481 gpu_task_runner_->PostTask(FROM_HERE, base::Bind(
453 &VTVideoDecodeAccelerator::ProcessDecodedFrames, 482 &VTVideoDecodeAccelerator::ProcessTasks,
454 weak_this_factory_.GetWeakPtr())); 483 weak_this_factory_.GetWeakPtr()));
455 } 484 }
456 485
457 void VTVideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_id) { 486 void VTVideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_id) {
458 DCHECK(CalledOnValidThread()); 487 DCHECK(CalledOnValidThread());
459 DCHECK_EQ(CFGetRetainCount(picture_bindings_[picture_id]), 1); 488 DCHECK_EQ(CFGetRetainCount(picture_bindings_[picture_id]), 1);
460 picture_bindings_.erase(picture_id); 489 picture_bindings_.erase(picture_id);
461 // Don't put the picture back in the available list if has been dismissed.
462 if (assigned_picture_ids_.count(picture_id) != 0) { 490 if (assigned_picture_ids_.count(picture_id) != 0) {
463 available_picture_ids_.push_back(picture_id); 491 available_picture_ids_.push_back(picture_id);
464 ProcessDecodedFrames(); 492 ProcessTasks();
493 } else {
494 client_->DismissPictureBuffer(picture_id);
465 } 495 }
466 } 496 }
467 497
468 void VTVideoDecodeAccelerator::CompleteAction(Action action) { 498 void VTVideoDecodeAccelerator::ProcessTasks() {
469 DCHECK(CalledOnValidThread()); 499 DCHECK(CalledOnValidThread());
470 500
471 switch (action) { 501 while (!pending_tasks_.empty()) {
472 case ACTION_FLUSH: 502 const Task& task = pending_tasks_.front();
473 client_->NotifyFlushDone(); 503
474 break; 504 switch (state_) {
475 case ACTION_RESET: 505 case STATE_NORMAL:
476 client_->NotifyResetDone(); 506 if (!ProcessTask(task))
477 break; 507 return;
478 case ACTION_DESTROY: 508 pending_tasks_.pop();
479 delete this; 509 break;
480 break; 510
511 case STATE_ERROR:
512 // Do nothing until Destroy() is called.
513 break;
514
515 case STATE_DESTROYING:
516 // Discard tasks until destruction is complete.
517 if (task.type == TASK_DESTROY) {
518 delete this;
519 return;
520 }
521 pending_tasks_.pop();
522 break;
523 }
481 } 524 }
482 } 525 }
483 526
484 void VTVideoDecodeAccelerator::CompleteActions(int32_t bitstream_id) { 527 bool VTVideoDecodeAccelerator::ProcessTask(const Task& task) {
485 DCHECK(CalledOnValidThread()); 528 DCHECK(CalledOnValidThread());
486 while (!pending_actions_.empty() && 529 DCHECK_EQ(state_, STATE_NORMAL);
487 pending_actions_.front().bitstream_id == bitstream_id) { 530
488 CompleteAction(pending_actions_.front().action); 531 switch (task.type) {
489 pending_actions_.pop(); 532 case TASK_FRAME:
533 return ProcessFrame(*task.frame);
534
535 case TASK_FLUSH:
536 DCHECK_EQ(task.type, pending_flush_tasks_.front());
537 pending_flush_tasks_.pop();
538 client_->NotifyFlushDone();
539 return true;
540
541 case TASK_RESET:
542 DCHECK_EQ(task.type, pending_flush_tasks_.front());
543 pending_flush_tasks_.pop();
544 client_->NotifyResetDone();
545 return true;
546
547 case TASK_DESTROY:
548 NOTREACHED() << "Can't destroy while in STATE_NORMAL.";
549 NotifyError(ILLEGAL_STATE);
550 return false;
490 } 551 }
491 } 552 }
492 553
493 void VTVideoDecodeAccelerator::ProcessDecodedFrames() { 554 bool VTVideoDecodeAccelerator::ProcessFrame(const Frame& frame) {
494 DCHECK(CalledOnValidThread()); 555 DCHECK(CalledOnValidThread());
556 DCHECK_EQ(state_, STATE_NORMAL);
DaleCurtis 2014/11/10 21:10:39 This needs a comment on exactly what's happening h
sandersd (OOO until July 31) 2014/11/11 18:46:07 Done.
557 bool resetting = !pending_flush_tasks_.empty() &&
558 pending_flush_tasks_.front() == TASK_RESET;
559 if (!resetting && frame.image.get()) {
560 if (picture_size_ != frame.coded_size) {
561 // Dismiss current pictures.
562 for (int32_t picture_id : assigned_picture_ids_)
563 client_->DismissPictureBuffer(picture_id);
564 assigned_picture_ids_.clear();
565 available_picture_ids_.clear();
495 566
496 while (!decoded_frames_.empty()) { 567 // Request new pictures.
497 if (pending_actions_.empty()) { 568 picture_size_ = frame.coded_size;
498 // No pending actions; send frames normally. 569 client_->ProvidePictureBuffers(
499 if (!has_error_) 570 kNumPictureBuffers, coded_size_, GL_TEXTURE_RECTANGLE_ARB);
500 SendPictures(pending_bitstream_ids_.back()); 571 return false;
501 return;
502 } 572 }
503 573 if (!SendFrame(frame))
504 int32_t next_action_bitstream_id = pending_actions_.front().bitstream_id; 574 return false;
505 int32_t last_sent_bitstream_id = -1;
506 switch (pending_actions_.front().action) {
507 case ACTION_FLUSH:
508 // Send frames normally.
509 if (has_error_)
510 return;
511 last_sent_bitstream_id = SendPictures(next_action_bitstream_id);
512 break;
513
514 case ACTION_RESET:
515 // Drop decoded frames.
516 if (has_error_)
517 return;
518 while (!decoded_frames_.empty() &&
519 last_sent_bitstream_id != next_action_bitstream_id) {
520 last_sent_bitstream_id = decoded_frames_.front().bitstream_id;
521 decoded_frames_.pop();
522 DCHECK_EQ(pending_bitstream_ids_.front(), last_sent_bitstream_id);
523 pending_bitstream_ids_.pop();
524 client_->NotifyEndOfBitstreamBuffer(last_sent_bitstream_id);
525 }
526 break;
527
528 case ACTION_DESTROY:
529 // Drop decoded frames, without bookkeeping.
530 while (!decoded_frames_.empty()) {
531 last_sent_bitstream_id = decoded_frames_.front().bitstream_id;
532 decoded_frames_.pop();
533 }
534
535 // Handle completing the action specially, as it is important not to
536 // access |this| after calling CompleteAction().
537 if (last_sent_bitstream_id == next_action_bitstream_id)
538 CompleteAction(ACTION_DESTROY);
539
540 // Either |this| was deleted or no more progress can be made.
541 return;
542 }
543
544 // If we ran out of buffers (or pictures), no more progress can be made
545 // until more frames are decoded.
546 if (last_sent_bitstream_id != next_action_bitstream_id)
547 return;
548
549 // Complete all actions pending for this |bitstream_id|, then loop to see
550 // if progress can be made on the next action.
551 CompleteActions(next_action_bitstream_id);
552 } 575 }
576 assigned_bitstream_ids_.erase(frame.bitstream_id);
577 client_->NotifyEndOfBitstreamBuffer(frame.bitstream_id);
578 return true;
553 } 579 }
554 580
555 int32_t VTVideoDecodeAccelerator::ProcessDroppedFrames( 581 bool VTVideoDecodeAccelerator::SendFrame(const Frame& frame) {
556 int32_t last_sent_bitstream_id,
557 int32_t up_to_bitstream_id) {
558 DCHECK(CalledOnValidThread()); 582 DCHECK(CalledOnValidThread());
559 // Drop frames as long as there is a frame, we have not reached the next 583 DCHECK_EQ(state_, STATE_NORMAL);
560 // action, and the next frame has no image.
561 while (!decoded_frames_.empty() &&
562 last_sent_bitstream_id != up_to_bitstream_id &&
563 decoded_frames_.front().image_buffer.get() == NULL) {
564 const DecodedFrame& frame = decoded_frames_.front();
565 DCHECK_EQ(pending_bitstream_ids_.front(), frame.bitstream_id);
566 client_->NotifyEndOfBitstreamBuffer(frame.bitstream_id);
567 last_sent_bitstream_id = frame.bitstream_id;
568 decoded_frames_.pop();
569 pending_bitstream_ids_.pop();
570 }
571 return last_sent_bitstream_id;
572 }
573 584
574 // TODO(sandersd): If GpuVideoDecoder didn't specifically check the size of 585 if (available_picture_ids_.empty())
575 // textures, this would be unnecessary, as the size is actually a property of 586 return false;
576 // the texture binding, not the texture. We rebind every frame, so the size
577 // passed to ProvidePictureBuffers() is meaningless.
578 void VTVideoDecodeAccelerator::ProcessSizeChange() {
579 DCHECK(CalledOnValidThread());
580 DCHECK(!decoded_frames_.empty());
581 587
582 // Find the size of the next image. 588 int32_t picture_id = available_picture_ids_.back();
583 const DecodedFrame& frame = decoded_frames_.front(); 589 IOSurfaceRef surface = CVPixelBufferGetIOSurface(frame.image.get());
584 CVImageBufferRef image_buffer = frame.image_buffer.get();
585 size_t width = CVPixelBufferGetWidth(image_buffer);
586 size_t height = CVPixelBufferGetHeight(image_buffer);
587 gfx::Size image_size(width, height);
588
589 if (picture_size_ != image_size) {
590 // Dismiss all assigned picture buffers.
591 for (int32_t picture_id : assigned_picture_ids_)
592 client_->DismissPictureBuffer(picture_id);
593 assigned_picture_ids_.clear();
594 available_picture_ids_.clear();
595
596 // Request new pictures.
597 client_->ProvidePictureBuffers(
598 kNumPictureBuffers, image_size, GL_TEXTURE_RECTANGLE_ARB);
599 picture_size_ = image_size;
600 }
601 }
602
603 int32_t VTVideoDecodeAccelerator::SendPictures(int32_t up_to_bitstream_id) {
604 DCHECK(CalledOnValidThread());
605 DCHECK(!decoded_frames_.empty());
606
607 // TODO(sandersd): Store the actual last sent bitstream ID?
608 int32_t last_sent_bitstream_id = -1;
609
610 last_sent_bitstream_id =
611 ProcessDroppedFrames(last_sent_bitstream_id, up_to_bitstream_id);
612 if (last_sent_bitstream_id == up_to_bitstream_id || decoded_frames_.empty())
613 return last_sent_bitstream_id;
614
615 ProcessSizeChange();
616 if (available_picture_ids_.empty())
617 return last_sent_bitstream_id;
618 590
619 if (!make_context_current_.Run()) { 591 if (!make_context_current_.Run()) {
620 LOG(ERROR) << "Failed to make GL context current"; 592 LOG(ERROR) << "Failed to make GL context current";
621 NotifyError(PLATFORM_FAILURE); 593 NotifyError(PLATFORM_FAILURE);
622 return last_sent_bitstream_id; 594 return false;
623 } 595 }
624 596
625 glEnable(GL_TEXTURE_RECTANGLE_ARB); 597 glEnable(GL_TEXTURE_RECTANGLE_ARB);
626 while (!available_picture_ids_.empty() && !has_error_) { 598 gfx::ScopedTextureBinder
627 DCHECK_NE(last_sent_bitstream_id, up_to_bitstream_id); 599 texture_binder(GL_TEXTURE_RECTANGLE_ARB, texture_ids_[picture_id]);
628 DCHECK(!decoded_frames_.empty()); 600 CGLError status = CGLTexImageIOSurface2D(
629 601 cgl_context_, // ctx
630 // We don't pop |frame| or |picture_id| until they are consumed, which may 602 GL_TEXTURE_RECTANGLE_ARB, // target
631 // not happen if an error occurs. Conveniently, this also removes some 603 GL_RGB, // internal_format
632 // refcounting. 604 frame.coded_size.width(), // width
633 const DecodedFrame& frame = decoded_frames_.front(); 605 frame.coded_size.height(), // height
634 DCHECK_EQ(pending_bitstream_ids_.front(), frame.bitstream_id); 606 GL_YCBCR_422_APPLE, // format
635 int32_t picture_id = available_picture_ids_.back(); 607 GL_UNSIGNED_SHORT_8_8_APPLE, // type
636 608 surface, // io_surface
637 CVImageBufferRef image_buffer = frame.image_buffer.get(); 609 0); // plane
638 IOSurfaceRef surface = CVPixelBufferGetIOSurface(image_buffer); 610 if (status != kCGLNoError) {
639 611 NOTIFY_STATUS("CGLTexImageIOSurface2D()", status);
640 gfx::ScopedTextureBinder 612 return false;
641 texture_binder(GL_TEXTURE_RECTANGLE_ARB, texture_ids_[picture_id]);
642 CGLError status = CGLTexImageIOSurface2D(
643 cgl_context_, // ctx
644 GL_TEXTURE_RECTANGLE_ARB, // target
645 GL_RGB, // internal_format
646 picture_size_.width(), // width
647 picture_size_.height(), // height
648 GL_YCBCR_422_APPLE, // format
649 GL_UNSIGNED_SHORT_8_8_APPLE, // type
650 surface, // io_surface
651 0); // plane
652 if (status != kCGLNoError) {
653 NOTIFY_STATUS("CGLTexImageIOSurface2D()", status);
654 break;
655 }
656
657 picture_bindings_[picture_id] = frame.image_buffer;
658 client_->PictureReady(media::Picture(
659 picture_id, frame.bitstream_id, gfx::Rect(picture_size_)));
660 available_picture_ids_.pop_back();
661 client_->NotifyEndOfBitstreamBuffer(frame.bitstream_id);
662 last_sent_bitstream_id = frame.bitstream_id;
663 decoded_frames_.pop();
664 pending_bitstream_ids_.pop();
665
666 last_sent_bitstream_id =
667 ProcessDroppedFrames(last_sent_bitstream_id, up_to_bitstream_id);
668 if (last_sent_bitstream_id == up_to_bitstream_id || decoded_frames_.empty())
669 break;
670
671 ProcessSizeChange();
672 } 613 }
673 glDisable(GL_TEXTURE_RECTANGLE_ARB); 614 glDisable(GL_TEXTURE_RECTANGLE_ARB);
674 615
675 return last_sent_bitstream_id; 616 available_picture_ids_.pop_back();
676 } 617 picture_bindings_[picture_id] = frame.image;
677 618 client_->PictureReady(media::Picture(
678 void VTVideoDecodeAccelerator::FlushTask() { 619 picture_id, frame.bitstream_id, gfx::Rect(frame.coded_size)));
679 DCHECK(decoder_thread_.message_loop_proxy()->BelongsToCurrentThread()); 620 return true;
680 OSStatus status = VTDecompressionSessionFinishDelayedFrames(session_);
681 if (status)
682 NOTIFY_STATUS("VTDecompressionSessionFinishDelayedFrames()", status);
683 }
684
685 void VTVideoDecodeAccelerator::QueueAction(Action action) {
686 DCHECK(CalledOnValidThread());
687 if (pending_bitstream_ids_.empty()) {
688 // If there are no pending frames, all actions complete immediately.
689 CompleteAction(action);
690 } else {
691 // Otherwise, queue the action.
692 pending_actions_.push(PendingAction(action, pending_bitstream_ids_.back()));
693
694 // Request a flush to make sure the action will eventually complete.
695 decoder_thread_.message_loop_proxy()->PostTask(FROM_HERE, base::Bind(
696 &VTVideoDecodeAccelerator::FlushTask, base::Unretained(this)));
697
698 // See if we can make progress now that there is a new pending action.
699 ProcessDecodedFrames();
700 }
701 } 621 }
702 622
703 void VTVideoDecodeAccelerator::NotifyError(Error error) { 623 void VTVideoDecodeAccelerator::NotifyError(Error error) {
704 if (!CalledOnValidThread()) { 624 if (!CalledOnValidThread()) {
705 gpu_task_runner_->PostTask(FROM_HERE, base::Bind( 625 gpu_task_runner_->PostTask(FROM_HERE, base::Bind(
706 &VTVideoDecodeAccelerator::NotifyError, 626 &VTVideoDecodeAccelerator::NotifyError,
707 weak_this_factory_.GetWeakPtr(), 627 weak_this_factory_.GetWeakPtr(), error));
708 error)); 628 } else if (state_ == STATE_NORMAL) {
709 return; 629 state_ = STATE_ERROR;
630 client_->NotifyError(error);
710 } 631 }
711 has_error_ = true;
712 client_->NotifyError(error);
713 } 632 }
714 633
715 void VTVideoDecodeAccelerator::DropBitstream(int32_t bitstream_id) { 634 void VTVideoDecodeAccelerator::QueueFlush(TaskType type) {
716 DCHECK(decoder_thread_.message_loop_proxy()->BelongsToCurrentThread()); 635 DCHECK(CalledOnValidThread());
717 gpu_task_runner_->PostTask(FROM_HERE, base::Bind( 636 pending_flush_tasks_.push(type);
718 &VTVideoDecodeAccelerator::OutputTask, 637 decoder_thread_.message_loop_proxy()->PostTask(FROM_HERE, base::Bind(
719 weak_this_factory_.GetWeakPtr(), 638 &VTVideoDecodeAccelerator::FlushTask, base::Unretained(this),
720 DecodedFrame(bitstream_id, NULL))); 639 type));
640
641 // If this is a new flush reuqest, see if we can make progress.
DaleCurtis 2014/11/10 20:48:02 sp: request
sandersd (OOO until July 31) 2014/11/11 18:46:06 Done.
642 if (pending_flush_tasks_.size() == 1)
643 ProcessTasks();
721 } 644 }
722 645
723 void VTVideoDecodeAccelerator::Flush() { 646 void VTVideoDecodeAccelerator::Flush() {
724 DCHECK(CalledOnValidThread()); 647 DCHECK(CalledOnValidThread());
725 QueueAction(ACTION_FLUSH); 648 QueueFlush(TASK_FLUSH);
726 } 649 }
727 650
728 void VTVideoDecodeAccelerator::Reset() { 651 void VTVideoDecodeAccelerator::Reset() {
729 DCHECK(CalledOnValidThread()); 652 DCHECK(CalledOnValidThread());
730 QueueAction(ACTION_RESET); 653 QueueFlush(TASK_RESET);
731 } 654 }
732 655
733 void VTVideoDecodeAccelerator::Destroy() { 656 void VTVideoDecodeAccelerator::Destroy() {
734 DCHECK(CalledOnValidThread()); 657 DCHECK(CalledOnValidThread());
735 // Drop any other pending actions. 658 for (int32_t bitstream_id : assigned_bitstream_ids_)
736 while (!pending_actions_.empty()) 659 client_->NotifyEndOfBitstreamBuffer(bitstream_id);
737 pending_actions_.pop(); 660 assigned_bitstream_ids_.clear();
738 // Return all bitstream buffers. 661 state_ = STATE_DESTROYING;
739 while (!pending_bitstream_ids_.empty()) { 662 QueueFlush(TASK_DESTROY);
740 client_->NotifyEndOfBitstreamBuffer(pending_bitstream_ids_.front());
741 pending_bitstream_ids_.pop();
742 }
743 QueueAction(ACTION_DESTROY);
744 } 663 }
745 664
746 bool VTVideoDecodeAccelerator::CanDecodeOnIOThread() { 665 bool VTVideoDecodeAccelerator::CanDecodeOnIOThread() {
747 return false; 666 return false;
748 } 667 }
749 668
750 } // namespace content 669 } // namespace content
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698