Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include <CoreVideo/CoreVideo.h> | 5 #include <CoreVideo/CoreVideo.h> |
| 6 #include <OpenGL/CGLIOSurface.h> | 6 #include <OpenGL/CGLIOSurface.h> |
| 7 #include <OpenGL/gl.h> | 7 #include <OpenGL/gl.h> |
| 8 | 8 |
| 9 #include "base/bind.h" | 9 #include "base/bind.h" |
| 10 #include "base/command_line.h" | 10 #include "base/command_line.h" |
| (...skipping 20 matching lines...) Expand all Loading... | |
| 31 | 31 |
| 32 // Size to use for NALU length headers in AVC format (can be 1, 2, or 4). | 32 // Size to use for NALU length headers in AVC format (can be 1, 2, or 4). |
| 33 static const int kNALUHeaderLength = 4; | 33 static const int kNALUHeaderLength = 4; |
| 34 | 34 |
| 35 // We request 5 picture buffers from the client, each of which has a texture ID | 35 // We request 5 picture buffers from the client, each of which has a texture ID |
| 36 // that we can bind decoded frames to. We need enough to satisfy preroll, and | 36 // that we can bind decoded frames to. We need enough to satisfy preroll, and |
| 37 // enough to avoid unnecessary stalling, but no more than that. The resource | 37 // enough to avoid unnecessary stalling, but no more than that. The resource |
| 38 // requirements are low, as we don't need the textures to be backed by storage. | 38 // requirements are low, as we don't need the textures to be backed by storage. |
| 39 static const int kNumPictureBuffers = media::limits::kMaxVideoFrames + 1; | 39 static const int kNumPictureBuffers = media::limits::kMaxVideoFrames + 1; |
| 40 | 40 |
| 41 // TODO(sandersd): Use the configured reorder window instead. | |
| 42 static const int kMinReorderQueueSize = 4; | |
| 43 static const int kMaxReorderQueueSize = 16; | |
| 44 | |
| 41 // Route decoded frame callbacks back into the VTVideoDecodeAccelerator. | 45 // Route decoded frame callbacks back into the VTVideoDecodeAccelerator. |
| 42 static void OutputThunk( | 46 static void OutputThunk( |
| 43 void* decompression_output_refcon, | 47 void* decompression_output_refcon, |
| 44 void* source_frame_refcon, | 48 void* source_frame_refcon, |
| 45 OSStatus status, | 49 OSStatus status, |
| 46 VTDecodeInfoFlags info_flags, | 50 VTDecodeInfoFlags info_flags, |
| 47 CVImageBufferRef image_buffer, | 51 CVImageBufferRef image_buffer, |
| 48 CMTime presentation_time_stamp, | 52 CMTime presentation_time_stamp, |
| 49 CMTime presentation_duration) { | 53 CMTime presentation_duration) { |
| 50 VTVideoDecodeAccelerator* vda = | 54 VTVideoDecodeAccelerator* vda = |
| 51 reinterpret_cast<VTVideoDecodeAccelerator*>(decompression_output_refcon); | 55 reinterpret_cast<VTVideoDecodeAccelerator*>(decompression_output_refcon); |
| 52 vda->Output(source_frame_refcon, status, image_buffer); | 56 vda->Output(source_frame_refcon, status, image_buffer); |
| 53 } | 57 } |
| 54 | 58 |
| 55 VTVideoDecodeAccelerator::Task::Task(TaskType type) : type(type) { | 59 VTVideoDecodeAccelerator::Task::Task(TaskType type) : type(type) { |
| 56 } | 60 } |
| 57 | 61 |
| 58 VTVideoDecodeAccelerator::Task::~Task() { | 62 VTVideoDecodeAccelerator::Task::~Task() { |
| 59 } | 63 } |
| 60 | 64 |
| 61 VTVideoDecodeAccelerator::Frame::Frame(int32_t bitstream_id) | 65 VTVideoDecodeAccelerator::Frame::Frame(int32_t bitstream_id) |
| 62 : bitstream_id(bitstream_id) { | 66 : bitstream_id(bitstream_id), pic_order_cnt(0) { |
| 63 } | 67 } |
| 64 | 68 |
| 65 VTVideoDecodeAccelerator::Frame::~Frame() { | 69 VTVideoDecodeAccelerator::Frame::~Frame() { |
| 66 } | 70 } |
| 67 | 71 |
| 72 bool VTVideoDecodeAccelerator::FrameOrder::operator()( | |
| 73 const linked_ptr<Frame>& lhs, | |
| 74 const linked_ptr<Frame>& rhs) const { | |
| 75 if (lhs->pic_order_cnt != rhs->pic_order_cnt) | |
| 76 return lhs->pic_order_cnt > rhs->pic_order_cnt; | |
| 77 // If the pic_order is the same, fallback on using the bitstream order. | |
| 78 // TODO(sandersd): Assign a sequence number in Decode(). | |
| 79 return lhs->bitstream_id > rhs->bitstream_id; | |
| 80 } | |
| 81 | |
| 82 | |
| 68 VTVideoDecodeAccelerator::VTVideoDecodeAccelerator( | 83 VTVideoDecodeAccelerator::VTVideoDecodeAccelerator( |
| 69 CGLContextObj cgl_context, | 84 CGLContextObj cgl_context, |
| 70 const base::Callback<bool(void)>& make_context_current) | 85 const base::Callback<bool(void)>& make_context_current) |
| 71 : cgl_context_(cgl_context), | 86 : cgl_context_(cgl_context), |
| 72 make_context_current_(make_context_current), | 87 make_context_current_(make_context_current), |
| 73 client_(NULL), | 88 client_(NULL), |
| 74 state_(STATE_DECODING), | 89 state_(STATE_DECODING), |
| 75 format_(NULL), | 90 format_(NULL), |
| 76 session_(NULL), | 91 session_(NULL), |
| 92 last_sps_id_(-1), | |
| 93 last_pps_id_(-1), | |
| 77 gpu_task_runner_(base::ThreadTaskRunnerHandle::Get()), | 94 gpu_task_runner_(base::ThreadTaskRunnerHandle::Get()), |
| 78 weak_this_factory_(this), | 95 weak_this_factory_(this), |
| 79 decoder_thread_("VTDecoderThread") { | 96 decoder_thread_("VTDecoderThread") { |
| 80 DCHECK(!make_context_current_.is_null()); | 97 DCHECK(!make_context_current_.is_null()); |
| 81 callback_.decompressionOutputCallback = OutputThunk; | 98 callback_.decompressionOutputCallback = OutputThunk; |
| 82 callback_.decompressionOutputRefCon = this; | 99 callback_.decompressionOutputRefCon = this; |
| 83 } | 100 } |
| 84 | 101 |
| 85 VTVideoDecodeAccelerator::~VTVideoDecodeAccelerator() { | 102 VTVideoDecodeAccelerator::~VTVideoDecodeAccelerator() { |
| 86 } | 103 } |
| (...skipping 167 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 254 size_t data_size = 0; | 271 size_t data_size = 0; |
| 255 std::vector<media::H264NALU> nalus; | 272 std::vector<media::H264NALU> nalus; |
| 256 parser_.SetStream(buf, size); | 273 parser_.SetStream(buf, size); |
| 257 media::H264NALU nalu; | 274 media::H264NALU nalu; |
| 258 while (true) { | 275 while (true) { |
| 259 media::H264Parser::Result result = parser_.AdvanceToNextNALU(&nalu); | 276 media::H264Parser::Result result = parser_.AdvanceToNextNALU(&nalu); |
| 260 if (result == media::H264Parser::kEOStream) | 277 if (result == media::H264Parser::kEOStream) |
| 261 break; | 278 break; |
| 262 if (result != media::H264Parser::kOk) { | 279 if (result != media::H264Parser::kOk) { |
| 263 DLOG(ERROR) << "Failed to find H.264 NALU"; | 280 DLOG(ERROR) << "Failed to find H.264 NALU"; |
| 264 NotifyError(PLATFORM_FAILURE); | 281 NotifyError(UNREADABLE_INPUT); |
| 265 return; | 282 return; |
| 266 } | 283 } |
| 267 switch (nalu.nal_unit_type) { | 284 switch (nalu.nal_unit_type) { |
| 268 case media::H264NALU::kSPS: | 285 case media::H264NALU::kSPS: |
| 269 last_sps_.assign(nalu.data, nalu.data + nalu.size); | 286 last_sps_.assign(nalu.data, nalu.data + nalu.size); |
| 270 last_spsext_.clear(); | 287 last_spsext_.clear(); |
| 271 config_changed = true; | 288 config_changed = true; |
| 289 if (parser_.ParseSPS(&last_sps_id_) != media::H264Parser::kOk) { | |
| 290 DLOG(ERROR) << "Could not parse SPS"; | |
| 291 NotifyError(UNREADABLE_INPUT); | |
| 292 return; | |
| 293 } | |
| 272 break; | 294 break; |
| 295 | |
| 273 case media::H264NALU::kSPSExt: | 296 case media::H264NALU::kSPSExt: |
| 274 // TODO(sandersd): Check that the previous NALU was an SPS. | 297 // TODO(sandersd): Check that the previous NALU was an SPS. |
| 275 last_spsext_.assign(nalu.data, nalu.data + nalu.size); | 298 last_spsext_.assign(nalu.data, nalu.data + nalu.size); |
| 276 config_changed = true; | 299 config_changed = true; |
| 277 break; | 300 break; |
| 301 | |
| 278 case media::H264NALU::kPPS: | 302 case media::H264NALU::kPPS: |
| 279 last_pps_.assign(nalu.data, nalu.data + nalu.size); | 303 last_pps_.assign(nalu.data, nalu.data + nalu.size); |
| 280 config_changed = true; | 304 config_changed = true; |
| 305 if (parser_.ParsePPS(&last_pps_id_) != media::H264Parser::kOk) { | |
| 306 DLOG(ERROR) << "Could not parse PPS"; | |
| 307 NotifyError(UNREADABLE_INPUT); | |
| 308 return; | |
| 309 } | |
| 281 break; | 310 break; |
| 311 | |
| 282 case media::H264NALU::kSliceDataA: | 312 case media::H264NALU::kSliceDataA: |
| 283 case media::H264NALU::kSliceDataB: | 313 case media::H264NALU::kSliceDataB: |
| 284 case media::H264NALU::kSliceDataC: | 314 case media::H264NALU::kSliceDataC: |
| 285 DLOG(ERROR) << "Coded slide data partitions not implemented."; | 315 DLOG(ERROR) << "Coded slide data partitions not implemented."; |
| 286 NotifyError(PLATFORM_FAILURE); | 316 NotifyError(PLATFORM_FAILURE); |
| 287 return; | 317 return; |
| 318 | |
| 319 case media::H264NALU::kNonIDRSlice: | |
| 320 // TODO(sandersd): Check that there has been an SPS or IDR slice since | |
| 321 // the last reset. | |
| 288 case media::H264NALU::kIDRSlice: | 322 case media::H264NALU::kIDRSlice: |
| 289 case media::H264NALU::kNonIDRSlice: | 323 { |
| 290 // TODO(sandersd): Compute pic_order_count. | 324 // TODO(sandersd): Make sure this only happens once per frame. |
| 325 DCHECK_EQ(frame->pic_order_cnt, 0); | |
| 326 | |
| 327 media::H264SliceHeader slice_hdr; | |
| 328 result = parser_.ParseSliceHeader(nalu, &slice_hdr); | |
| 329 if (result != media::H264Parser::kOk) { | |
| 330 DLOG(ERROR) << "Could not parse slice header"; | |
| 331 NotifyError(UNREADABLE_INPUT); | |
| 332 return; | |
| 333 } | |
| 334 | |
| 335 // TODO(sandersd): Keep a cache of recent SPS/PPS units instead of | |
| 336 // only the most recent ones. | |
| 337 DCHECK_EQ(slice_hdr.pic_parameter_set_id, last_pps_id_); | |
| 338 const media::H264PPS* pps = | |
| 339 parser_.GetPPS(slice_hdr.pic_parameter_set_id); | |
| 340 if (!pps) { | |
| 341 DLOG(ERROR) << "Mising PPS referenced by slice"; | |
| 342 NotifyError(UNREADABLE_INPUT); | |
| 343 return false; | |
| 344 } | |
| 345 | |
| 346 DCHECK_EQ(pps->seq_parameter_set_id, last_sps_id_); | |
| 347 const media::H264SPS* sps = parser_.GetSPS(pps->seq_parameter_set_id); | |
| 348 if (!sps) { | |
| 349 DLOG(ERROR) << "Mising SPS referenced by PPS"; | |
| 350 NotifyError(UNREADABLE_INPUT); | |
| 351 return false; | |
| 352 } | |
| 353 | |
| 354 // TODO(sandersd): Compute pic_order_cnt. | |
| 355 DCHECK(!slice_hdr.field_pic_flag); | |
| 356 frame->pic_order_cnt = 0; | |
| 357 } | |
| 291 default: | 358 default: |
| 292 nalus.push_back(nalu); | 359 nalus.push_back(nalu); |
| 293 data_size += kNALUHeaderLength + nalu.size; | 360 data_size += kNALUHeaderLength + nalu.size; |
| 294 break; | 361 break; |
| 295 } | 362 } |
| 296 } | 363 } |
| 297 | 364 |
| 298 // Initialize VideoToolbox. | 365 // Initialize VideoToolbox. |
| 299 // TODO(sandersd): Instead of assuming that the last SPS and PPS units are | 366 // TODO(sandersd): Instead of assuming that the last SPS and PPS units are |
| 300 // always the correct ones, maintain a cache of recent SPS and PPS units and | 367 // always the correct ones, maintain a cache of recent SPS and PPS units and |
| (...skipping 19 matching lines...) Expand all Loading... | |
| 320 return; | 387 return; |
| 321 } | 388 } |
| 322 | 389 |
| 323 // If the session is not configured by this point, fail. | 390 // If the session is not configured by this point, fail. |
| 324 if (!session_) { | 391 if (!session_) { |
| 325 DLOG(ERROR) << "Image slice without configuration"; | 392 DLOG(ERROR) << "Image slice without configuration"; |
| 326 NotifyError(INVALID_ARGUMENT); | 393 NotifyError(INVALID_ARGUMENT); |
| 327 return; | 394 return; |
| 328 } | 395 } |
| 329 | 396 |
| 397 // Update the frame metadata with configuration data. | |
| 398 frame->coded_size = coded_size_; | |
| 399 | |
| 330 // Create a memory-backed CMBlockBuffer for the translated data. | 400 // Create a memory-backed CMBlockBuffer for the translated data. |
| 331 // TODO(sandersd): Pool of memory blocks. | 401 // TODO(sandersd): Pool of memory blocks. |
| 332 base::ScopedCFTypeRef<CMBlockBufferRef> data; | 402 base::ScopedCFTypeRef<CMBlockBufferRef> data; |
| 333 OSStatus status = CMBlockBufferCreateWithMemoryBlock( | 403 OSStatus status = CMBlockBufferCreateWithMemoryBlock( |
| 334 kCFAllocatorDefault, | 404 kCFAllocatorDefault, |
| 335 NULL, // &memory_block | 405 NULL, // &memory_block |
| 336 data_size, // block_length | 406 data_size, // block_length |
| 337 kCFAllocatorDefault, // block_allocator | 407 kCFAllocatorDefault, // block_allocator |
| 338 NULL, // &custom_block_source | 408 NULL, // &custom_block_source |
| 339 0, // offset_to_data | 409 0, // offset_to_data |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 378 0, // num_sample_timing_entries | 448 0, // num_sample_timing_entries |
| 379 NULL, // &sample_timing_array | 449 NULL, // &sample_timing_array |
| 380 0, // num_sample_size_entries | 450 0, // num_sample_size_entries |
| 381 NULL, // &sample_size_array | 451 NULL, // &sample_size_array |
| 382 sample.InitializeInto()); | 452 sample.InitializeInto()); |
| 383 if (status) { | 453 if (status) { |
| 384 NOTIFY_STATUS("CMSampleBufferCreate()", status); | 454 NOTIFY_STATUS("CMSampleBufferCreate()", status); |
| 385 return; | 455 return; |
| 386 } | 456 } |
| 387 | 457 |
| 388 // Update the frame data. | |
| 389 frame->coded_size = coded_size_; | |
| 390 | |
| 391 // Send the frame for decoding. | 458 // Send the frame for decoding. |
| 392 // Asynchronous Decompression allows for parallel submission of frames | 459 // Asynchronous Decompression allows for parallel submission of frames |
| 393 // (without it, DecodeFrame() does not return until the frame has been | 460 // (without it, DecodeFrame() does not return until the frame has been |
| 394 // decoded). We don't enable Temporal Processing so that frames are always | 461 // decoded). We don't enable Temporal Processing so that frames are always |
| 395 // returned in decode order; this makes it easier to avoid deadlock. | 462 // returned in decode order; this makes it easier to avoid deadlock. |
| 396 VTDecodeFrameFlags decode_flags = | 463 VTDecodeFrameFlags decode_flags = |
| 397 kVTDecodeFrame_EnableAsynchronousDecompression; | 464 kVTDecodeFrame_EnableAsynchronousDecompression; |
| 398 status = VTDecompressionSessionDecodeFrame( | 465 status = VTDecompressionSessionDecodeFrame( |
| 399 session_, | 466 session_, |
| 400 sample, // sample_buffer | 467 sample, // sample_buffer |
| (...skipping 24 matching lines...) Expand all Loading... | |
| 425 weak_this_factory_.GetWeakPtr(), frame)); | 492 weak_this_factory_.GetWeakPtr(), frame)); |
| 426 } | 493 } |
| 427 } | 494 } |
| 428 | 495 |
| 429 void VTVideoDecodeAccelerator::DecodeDone(Frame* frame) { | 496 void VTVideoDecodeAccelerator::DecodeDone(Frame* frame) { |
| 430 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | 497 DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
| 431 DCHECK_EQ(frame->bitstream_id, pending_frames_.front()->bitstream_id); | 498 DCHECK_EQ(frame->bitstream_id, pending_frames_.front()->bitstream_id); |
| 432 Task task(TASK_FRAME); | 499 Task task(TASK_FRAME); |
| 433 task.frame = pending_frames_.front(); | 500 task.frame = pending_frames_.front(); |
| 434 pending_frames_.pop(); | 501 pending_frames_.pop(); |
| 435 pending_tasks_.push(task); | 502 task_queue_.push(task); |
| 436 ProcessTasks(); | 503 ProcessWorkQueues(); |
| 437 } | 504 } |
| 438 | 505 |
| 439 void VTVideoDecodeAccelerator::FlushTask(TaskType type) { | 506 void VTVideoDecodeAccelerator::FlushTask(TaskType type) { |
| 440 DCHECK(decoder_thread_.message_loop_proxy()->BelongsToCurrentThread()); | 507 DCHECK(decoder_thread_.message_loop_proxy()->BelongsToCurrentThread()); |
| 441 FinishDelayedFrames(); | 508 FinishDelayedFrames(); |
| 442 | 509 |
| 443 // Always queue a task, even if FinishDelayedFrames() fails, so that | 510 // Always queue a task, even if FinishDelayedFrames() fails, so that |
| 444 // destruction always completes. | 511 // destruction always completes. |
| 445 gpu_task_runner_->PostTask(FROM_HERE, base::Bind( | 512 gpu_task_runner_->PostTask(FROM_HERE, base::Bind( |
| 446 &VTVideoDecodeAccelerator::FlushDone, | 513 &VTVideoDecodeAccelerator::FlushDone, |
| 447 weak_this_factory_.GetWeakPtr(), type)); | 514 weak_this_factory_.GetWeakPtr(), type)); |
| 448 } | 515 } |
| 449 | 516 |
| 450 void VTVideoDecodeAccelerator::FlushDone(TaskType type) { | 517 void VTVideoDecodeAccelerator::FlushDone(TaskType type) { |
| 451 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | 518 DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
| 452 pending_tasks_.push(Task(type)); | 519 task_queue_.push(Task(type)); |
| 453 ProcessTasks(); | 520 ProcessWorkQueues(); |
| 454 } | 521 } |
| 455 | 522 |
| 456 void VTVideoDecodeAccelerator::Decode(const media::BitstreamBuffer& bitstream) { | 523 void VTVideoDecodeAccelerator::Decode(const media::BitstreamBuffer& bitstream) { |
| 457 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | 524 DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
| 458 DCHECK_EQ(assigned_bitstream_ids_.count(bitstream.id()), 0u); | 525 DCHECK_EQ(assigned_bitstream_ids_.count(bitstream.id()), 0u); |
| 459 assigned_bitstream_ids_.insert(bitstream.id()); | 526 assigned_bitstream_ids_.insert(bitstream.id()); |
| 460 Frame* frame = new Frame(bitstream.id()); | 527 Frame* frame = new Frame(bitstream.id()); |
| 461 pending_frames_.push(make_linked_ptr(frame)); | 528 pending_frames_.push(make_linked_ptr(frame)); |
| 462 decoder_thread_.message_loop_proxy()->PostTask(FROM_HERE, base::Bind( | 529 decoder_thread_.message_loop_proxy()->PostTask(FROM_HERE, base::Bind( |
| 463 &VTVideoDecodeAccelerator::DecodeTask, base::Unretained(this), | 530 &VTVideoDecodeAccelerator::DecodeTask, base::Unretained(this), |
| 464 bitstream, frame)); | 531 bitstream, frame)); |
| 465 } | 532 } |
| 466 | 533 |
| 467 void VTVideoDecodeAccelerator::AssignPictureBuffers( | 534 void VTVideoDecodeAccelerator::AssignPictureBuffers( |
| 468 const std::vector<media::PictureBuffer>& pictures) { | 535 const std::vector<media::PictureBuffer>& pictures) { |
| 469 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | 536 DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
| 470 | 537 |
| 471 for (const media::PictureBuffer& picture : pictures) { | 538 for (const media::PictureBuffer& picture : pictures) { |
| 472 DCHECK(!texture_ids_.count(picture.id())); | 539 DCHECK(!texture_ids_.count(picture.id())); |
| 473 assigned_picture_ids_.insert(picture.id()); | 540 assigned_picture_ids_.insert(picture.id()); |
| 474 available_picture_ids_.push_back(picture.id()); | 541 available_picture_ids_.push_back(picture.id()); |
| 475 texture_ids_[picture.id()] = picture.texture_id(); | 542 texture_ids_[picture.id()] = picture.texture_id(); |
| 476 } | 543 } |
| 477 | 544 |
| 478 // Pictures are not marked as uncleared until after this method returns, and | 545 // Pictures are not marked as uncleared until after this method returns, and |
| 479 // they will be broken if they are used before that happens. So, schedule | 546 // they will be broken if they are used before that happens. So, schedule |
| 480 // future work after that happens. | 547 // future work after that happens. |
| 481 gpu_task_runner_->PostTask(FROM_HERE, base::Bind( | 548 gpu_task_runner_->PostTask(FROM_HERE, base::Bind( |
| 482 &VTVideoDecodeAccelerator::ProcessTasks, | 549 &VTVideoDecodeAccelerator::ProcessWorkQueues, |
| 483 weak_this_factory_.GetWeakPtr())); | 550 weak_this_factory_.GetWeakPtr())); |
| 484 } | 551 } |
| 485 | 552 |
| 486 void VTVideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_id) { | 553 void VTVideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_id) { |
| 487 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | 554 DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
| 488 DCHECK_EQ(CFGetRetainCount(picture_bindings_[picture_id]), 1); | 555 DCHECK_EQ(CFGetRetainCount(picture_bindings_[picture_id]), 1); |
| 489 picture_bindings_.erase(picture_id); | 556 picture_bindings_.erase(picture_id); |
| 490 if (assigned_picture_ids_.count(picture_id) != 0) { | 557 if (assigned_picture_ids_.count(picture_id) != 0) { |
| 491 available_picture_ids_.push_back(picture_id); | 558 available_picture_ids_.push_back(picture_id); |
| 492 ProcessTasks(); | 559 ProcessWorkQueues(); |
| 493 } else { | 560 } else { |
| 494 client_->DismissPictureBuffer(picture_id); | 561 client_->DismissPictureBuffer(picture_id); |
| 495 } | 562 } |
| 496 } | 563 } |
| 497 | 564 |
| 498 void VTVideoDecodeAccelerator::ProcessTasks() { | 565 void VTVideoDecodeAccelerator::ProcessWorkQueues() { |
| 499 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | 566 DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
| 567 switch (state_) { | |
| 568 case STATE_DECODING: | |
| 569 // TODO(sandersd): Batch where possible. | |
| 570 while (ProcessReorderQueue() || ProcessTaskQueue()); | |
|
DaleCurtis
2014/11/14 21:11:45
Is this something that could block for a long time
sandersd (OOO until July 31)
2014/11/14 21:28:52
This would be worth investigating. I do expect thi
DaleCurtis
2014/11/14 21:41:54
More important than the length is the time taken.
| |
| 571 return; | |
| 500 | 572 |
| 501 while (!pending_tasks_.empty()) { | 573 case STATE_ERROR: |
| 502 const Task& task = pending_tasks_.front(); | 574 // Do nothing until Destroy() is called. |
| 575 return; | |
| 503 | 576 |
| 504 switch (state_) { | 577 case STATE_DESTROYING: |
| 505 case STATE_DECODING: | 578 // Drop tasks until we are ready to destruct. |
| 506 if (!ProcessTask(task)) | 579 while (!task_queue_.empty()) { |
| 507 return; | 580 if (task_queue_.front().type == TASK_DESTROY) { |
| 508 pending_tasks_.pop(); | |
| 509 break; | |
| 510 | |
| 511 case STATE_ERROR: | |
| 512 // Do nothing until Destroy() is called. | |
| 513 return; | |
| 514 | |
| 515 case STATE_DESTROYING: | |
| 516 // Discard tasks until destruction is complete. | |
| 517 if (task.type == TASK_DESTROY) { | |
| 518 delete this; | 581 delete this; |
| 519 return; | 582 return; |
| 520 } | 583 } |
| 521 pending_tasks_.pop(); | 584 task_queue_.pop(); |
| 522 break; | 585 } |
| 523 } | 586 return; |
| 524 } | 587 } |
| 525 } | 588 } |
| 526 | 589 |
| 527 bool VTVideoDecodeAccelerator::ProcessTask(const Task& task) { | 590 bool VTVideoDecodeAccelerator::ProcessTaskQueue() { |
| 528 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | 591 DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
| 529 DCHECK_EQ(state_, STATE_DECODING); | 592 DCHECK_EQ(state_, STATE_DECODING); |
| 530 | 593 |
| 594 if (task_queue_.empty()) | |
| 595 return false; | |
| 596 | |
| 597 const Task& task = task_queue_.front(); | |
| 531 switch (task.type) { | 598 switch (task.type) { |
| 532 case TASK_FRAME: | 599 case TASK_FRAME: |
| 533 return ProcessFrame(*task.frame); | 600 // TODO(sandersd): Signal IDR explicitly (not using pic_order_cnt == 0). |
| 601 if (reorder_queue_.size() < kMaxReorderQueueSize && | |
| 602 (task.frame->pic_order_cnt != 0 || reorder_queue_.empty())) { | |
| 603 assigned_bitstream_ids_.erase(task.frame->bitstream_id); | |
| 604 client_->NotifyEndOfBitstreamBuffer(task.frame->bitstream_id); | |
| 605 reorder_queue_.push(task.frame); | |
| 606 task_queue_.pop(); | |
| 607 return true; | |
| 608 } | |
| 609 return false; | |
| 534 | 610 |
| 535 case TASK_FLUSH: | 611 case TASK_FLUSH: |
| 536 DCHECK_EQ(task.type, pending_flush_tasks_.front()); | 612 DCHECK_EQ(task.type, pending_flush_tasks_.front()); |
| 537 pending_flush_tasks_.pop(); | 613 if (reorder_queue_.size() == 0) { |
| 538 client_->NotifyFlushDone(); | 614 pending_flush_tasks_.pop(); |
| 539 return true; | 615 client_->NotifyFlushDone(); |
| 616 task_queue_.pop(); | |
| 617 return true; | |
| 618 } | |
| 619 return false; | |
| 540 | 620 |
| 541 case TASK_RESET: | 621 case TASK_RESET: |
| 542 DCHECK_EQ(task.type, pending_flush_tasks_.front()); | 622 DCHECK_EQ(task.type, pending_flush_tasks_.front()); |
| 543 pending_flush_tasks_.pop(); | 623 if (reorder_queue_.size() == 0) { |
| 544 client_->NotifyResetDone(); | 624 pending_flush_tasks_.pop(); |
| 545 return true; | 625 client_->NotifyResetDone(); |
| 626 task_queue_.pop(); | |
| 627 return true; | |
| 628 } | |
| 629 return false; | |
| 546 | 630 |
| 547 case TASK_DESTROY: | 631 case TASK_DESTROY: |
| 548 NOTREACHED() << "Can't destroy while in STATE_DECODING."; | 632 NOTREACHED() << "Can't destroy while in STATE_DECODING."; |
| 549 NotifyError(ILLEGAL_STATE); | 633 NotifyError(ILLEGAL_STATE); |
| 550 return false; | 634 return false; |
| 551 } | 635 } |
| 552 } | 636 } |
| 553 | 637 |
| 638 bool VTVideoDecodeAccelerator::ProcessReorderQueue() { | |
| 639 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | |
| 640 DCHECK_EQ(state_, STATE_DECODING); | |
| 641 | |
| 642 if (reorder_queue_.empty()) | |
| 643 return false; | |
| 644 | |
| 645 // If the next task is a flush (because there is a pending flush or becuase | |
| 646 // the next frame is an IDR), then we don't need a full reorder buffer to send | |
| 647 // the next frame. | |
| 648 bool flushing = !task_queue_.empty() && | |
| 649 (task_queue_.front().type != TASK_FRAME || | |
| 650 task_queue_.front().frame->pic_order_cnt == 0); | |
| 651 if (flushing || reorder_queue_.size() >= kMinReorderQueueSize) { | |
| 652 if (ProcessFrame(*reorder_queue_.top())) { | |
| 653 reorder_queue_.pop(); | |
| 654 return true; | |
| 655 } | |
| 656 } | |
| 657 | |
| 658 return false; | |
| 659 } | |
| 660 | |
| 554 bool VTVideoDecodeAccelerator::ProcessFrame(const Frame& frame) { | 661 bool VTVideoDecodeAccelerator::ProcessFrame(const Frame& frame) { |
| 555 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | 662 DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
| 556 DCHECK_EQ(state_, STATE_DECODING); | 663 DCHECK_EQ(state_, STATE_DECODING); |
| 664 | |
| 557 // If the next pending flush is for a reset, then the frame will be dropped. | 665 // If the next pending flush is for a reset, then the frame will be dropped. |
| 558 bool resetting = !pending_flush_tasks_.empty() && | 666 bool resetting = !pending_flush_tasks_.empty() && |
| 559 pending_flush_tasks_.front() == TASK_RESET; | 667 pending_flush_tasks_.front() == TASK_RESET; |
| 668 | |
| 560 if (!resetting && frame.image.get()) { | 669 if (!resetting && frame.image.get()) { |
| 561 // If the |coded_size| has changed, request new picture buffers and then | 670 // If the |coded_size| has changed, request new picture buffers and then |
| 562 // wait for them. | 671 // wait for them. |
| 563 // TODO(sandersd): If GpuVideoDecoder didn't specifically check the size of | 672 // TODO(sandersd): If GpuVideoDecoder didn't specifically check the size of |
| 564 // textures, this would be unnecessary, as the size is actually a property | 673 // textures, this would be unnecessary, as the size is actually a property |
| 565 // of the texture binding, not the texture. We rebind every frame, so the | 674 // of the texture binding, not the texture. We rebind every frame, so the |
| 566 // size passed to ProvidePictureBuffers() is meaningless. | 675 // size passed to ProvidePictureBuffers() is meaningless. |
| 567 if (picture_size_ != frame.coded_size) { | 676 if (picture_size_ != frame.coded_size) { |
| 568 // Dismiss current pictures. | 677 // Dismiss current pictures. |
| 569 for (int32_t picture_id : assigned_picture_ids_) | 678 for (int32_t picture_id : assigned_picture_ids_) |
| 570 client_->DismissPictureBuffer(picture_id); | 679 client_->DismissPictureBuffer(picture_id); |
| 571 assigned_picture_ids_.clear(); | 680 assigned_picture_ids_.clear(); |
| 572 available_picture_ids_.clear(); | 681 available_picture_ids_.clear(); |
| 573 | 682 |
| 574 // Request new pictures. | 683 // Request new pictures. |
| 575 picture_size_ = frame.coded_size; | 684 picture_size_ = frame.coded_size; |
| 576 client_->ProvidePictureBuffers( | 685 client_->ProvidePictureBuffers( |
| 577 kNumPictureBuffers, coded_size_, GL_TEXTURE_RECTANGLE_ARB); | 686 kNumPictureBuffers, coded_size_, GL_TEXTURE_RECTANGLE_ARB); |
| 578 return false; | 687 return false; |
| 579 } | 688 } |
| 580 if (!SendFrame(frame)) | 689 if (!SendFrame(frame)) |
| 581 return false; | 690 return false; |
| 582 } | 691 } |
| 583 assigned_bitstream_ids_.erase(frame.bitstream_id); | 692 |
| 584 client_->NotifyEndOfBitstreamBuffer(frame.bitstream_id); | |
| 585 return true; | 693 return true; |
| 586 } | 694 } |
| 587 | 695 |
| 588 bool VTVideoDecodeAccelerator::SendFrame(const Frame& frame) { | 696 bool VTVideoDecodeAccelerator::SendFrame(const Frame& frame) { |
| 589 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | 697 DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
| 590 DCHECK_EQ(state_, STATE_DECODING); | 698 DCHECK_EQ(state_, STATE_DECODING); |
| 591 | 699 |
| 592 if (available_picture_ids_.empty()) | 700 if (available_picture_ids_.empty()) |
| 593 return false; | 701 return false; |
| 594 | 702 |
| (...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 640 | 748 |
| 641 void VTVideoDecodeAccelerator::QueueFlush(TaskType type) { | 749 void VTVideoDecodeAccelerator::QueueFlush(TaskType type) { |
| 642 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | 750 DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
| 643 pending_flush_tasks_.push(type); | 751 pending_flush_tasks_.push(type); |
| 644 decoder_thread_.message_loop_proxy()->PostTask(FROM_HERE, base::Bind( | 752 decoder_thread_.message_loop_proxy()->PostTask(FROM_HERE, base::Bind( |
| 645 &VTVideoDecodeAccelerator::FlushTask, base::Unretained(this), | 753 &VTVideoDecodeAccelerator::FlushTask, base::Unretained(this), |
| 646 type)); | 754 type)); |
| 647 | 755 |
| 648 // If this is a new flush request, see if we can make progress. | 756 // If this is a new flush request, see if we can make progress. |
| 649 if (pending_flush_tasks_.size() == 1) | 757 if (pending_flush_tasks_.size() == 1) |
| 650 ProcessTasks(); | 758 ProcessWorkQueues(); |
| 651 } | 759 } |
| 652 | 760 |
| 653 void VTVideoDecodeAccelerator::Flush() { | 761 void VTVideoDecodeAccelerator::Flush() { |
| 654 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | 762 DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
| 655 QueueFlush(TASK_FLUSH); | 763 QueueFlush(TASK_FLUSH); |
| 656 } | 764 } |
| 657 | 765 |
| 658 void VTVideoDecodeAccelerator::Reset() { | 766 void VTVideoDecodeAccelerator::Reset() { |
| 659 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | 767 DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
| 660 QueueFlush(TASK_RESET); | 768 QueueFlush(TASK_RESET); |
| 661 } | 769 } |
| 662 | 770 |
| 663 void VTVideoDecodeAccelerator::Destroy() { | 771 void VTVideoDecodeAccelerator::Destroy() { |
| 664 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | 772 DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
| 773 // TODO(sandersd): Make sure the decoder won't try to read the buffers again | |
| 774 // before discarding them. | |
| 665 for (int32_t bitstream_id : assigned_bitstream_ids_) | 775 for (int32_t bitstream_id : assigned_bitstream_ids_) |
| 666 client_->NotifyEndOfBitstreamBuffer(bitstream_id); | 776 client_->NotifyEndOfBitstreamBuffer(bitstream_id); |
| 667 assigned_bitstream_ids_.clear(); | 777 assigned_bitstream_ids_.clear(); |
| 668 state_ = STATE_DESTROYING; | 778 state_ = STATE_DESTROYING; |
| 669 QueueFlush(TASK_DESTROY); | 779 QueueFlush(TASK_DESTROY); |
| 670 } | 780 } |
| 671 | 781 |
| 672 bool VTVideoDecodeAccelerator::CanDecodeOnIOThread() { | 782 bool VTVideoDecodeAccelerator::CanDecodeOnIOThread() { |
| 673 return false; | 783 return false; |
| 674 } | 784 } |
| 675 | 785 |
| 676 } // namespace content | 786 } // namespace content |
| OLD | NEW |