Chromium Code Reviews| Index: content/renderer/media/rtc_video_decoder.cc |
| diff --git a/content/renderer/media/rtc_video_decoder.cc b/content/renderer/media/rtc_video_decoder.cc |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..d3a156828c151cb93607604fc7f612029548ac59 |
| --- /dev/null |
| +++ b/content/renderer/media/rtc_video_decoder.cc |
| @@ -0,0 +1,505 @@ |
| +// Copyright (c) 2013 The Chromium Authors. All rights reserved. |
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +#include "content/renderer/media/rtc_video_decoder.h" |
| + |
| +#include "base/bind.h" |
| +#include "base/callback_helpers.h" |
| +#include "base/logging.h" |
| +#include "base/memory/ref_counted.h" |
| +#include "base/message_loop_proxy.h" |
| +#include "base/task_runner_util.h" |
| +#include "content/renderer/media/native_handle_impl.h" |
| +#include "media/base/audio_decoder_config.h" |
| +#include "media/base/bind_to_loop.h" |
| +#include "media/base/decoder_buffer.h" |
| +#include "media/base/decoder_buffer_queue.h" |
| +#include "media/base/video_decoder_config.h" |
| +#include "third_party/webrtc/system_wrappers/interface/ref_count.h" |
| + |
| +namespace content { |
| + |
| +RTCVideoDecoder::RTCVideoDecoder( |
| + const scoped_refptr<media::GpuVideoDecoder::Factories>& factories) |
| + : decode_complete_callback_(NULL), |
| + decoder_waiter_(false, false), |
| + frame_width_(0), |
| + frame_height_(0), |
| + state_(kUninitialized), |
| + weak_factory_(this), |
| + factories_(factories), |
| + vda_loop_proxy_(factories->GetMessageLoop()), |
| + decoder_texture_target_(0), |
| + next_picture_buffer_id_(0), |
| + next_bitstream_buffer_id_(0) { |
| + DCHECK(!vda_loop_proxy_->BelongsToCurrentThread()); |
| + vda_loop_proxy_->PostTask( |
| + FROM_HERE, |
| + base::Bind(&RTCVideoDecoder::InitWeakPtr, base::Unretained(this))); |
| + decoder_waiter_.Wait(); |
| +} |
| + |
| +void RTCVideoDecoder::InitWeakPtr() { |
| + DCHECK(vda_loop_proxy_->BelongsToCurrentThread()); |
| + weak_this_ = weak_factory_.GetWeakPtr(); |
| + decoder_waiter_.Signal(); |
|
Ami GONE FROM CHROMIUM
2013/05/29 21:11:46
Just in case you missed it before:
What happens i
wuchengli
2013/06/10 12:33:42
I added DestructionObserver in this class.
|
| +} |
| + |
| +RTCVideoDecoder::~RTCVideoDecoder() { |
| + DCHECK(vda_loop_proxy_->BelongsToCurrentThread()); |
| + DCHECK(!vda_.get()); // Stop should have been already called. |
| + DVLOG(2) << "~RTCVideoDecoder"; |
| + // Delete all shared memories. |
| + for (size_t i = 0; i < available_shm_segments_.size(); ++i) { |
| + available_shm_segments_[i]->shm->Close(); |
| + delete available_shm_segments_[i]; |
| + } |
| + available_shm_segments_.clear(); |
| + for (std::map<int32, SHMBuffer*>::iterator it = |
| + bitstream_buffers_in_decoder_.begin(); |
| + it != bitstream_buffers_in_decoder_.end(); ++it) { |
| + it->second->shm->Close(); |
| + } |
| + bitstream_buffers_in_decoder_.clear(); |
| + for (std::deque<std::pair<SHMBuffer*, BufferData> >::iterator it = |
| + buffers_to_be_decoded.begin(); |
| + it != buffers_to_be_decoded.end(); ++it) { |
| + it->first->shm->Close(); |
| + } |
| + buffers_to_be_decoded.clear(); |
| + |
| + DestroyTextures(); |
| +} |
| + |
| +bool RTCVideoDecoder::Initialize(webrtc::VideoCodecType type) { |
| + DCHECK(!vda_loop_proxy_->BelongsToCurrentThread()); |
| + // Convert WebRTC codec type to media codec profile. |
| + media::VideoCodecProfile profile; |
| + switch (type) { |
| + case webrtc::kVideoCodecVP8: |
| + profile = media::VP8PROFILE_MAIN; |
| + break; |
| + default: |
| + DVLOG(2) << "Video codec not supported:" << type; |
| + return false; |
| + } |
| + vda_loop_proxy_->PostTask( |
| + FROM_HERE, |
| + base::Bind(&RTCVideoDecoder::CreateVideoDecodeAccelerator, |
| + base::Unretained(this), profile)); |
| + decoder_waiter_.Wait(); |
| + return (vda_ != NULL); |
| +} |
| + |
| +void RTCVideoDecoder::CreateVideoDecodeAccelerator( |
| + media::VideoCodecProfile profile) { |
| + DCHECK(vda_loop_proxy_->BelongsToCurrentThread()); |
| + DVLOG(3) << "CreateVideoDecodeAccelerator"; |
| + media::VideoDecodeAccelerator* vda = |
| + factories_->CreateVideoDecodeAccelerator(profile, this); |
| + // vda can be NULL if the codec type is not supported. |
| + vda_.reset(vda); |
| + |
| + base::AutoLock auto_lock(lock_); |
| + state_ = kInitialized; |
| + decoder_waiter_.Signal(); |
| +} |
| + |
| +int32_t RTCVideoDecoder::InitDecode( |
| + const webrtc::VideoCodec* codecSettings, |
| + int32_t /*numberOfCores*/) { |
| + DVLOG(2) << "InitDecode"; |
| + DCHECK_EQ(codecSettings->codecType, webrtc::kVideoCodecVP8); |
| + if (codecSettings->codecSpecific.VP8.feedbackModeOn) { |
| + LOG(ERROR) << "Feedback mode not supported"; |
| + return WEBRTC_VIDEO_CODEC_ERROR; |
| + } |
| + return WEBRTC_VIDEO_CODEC_OK; |
| +} |
| + |
| +int32_t RTCVideoDecoder::Decode( |
| + const webrtc::EncodedImage& inputImage, |
| + bool missingFrames, |
| + const webrtc::RTPFragmentationHeader* /*fragmentation*/, |
| + const webrtc::CodecSpecificInfo* /*codecSpecificInfo*/, |
| + int64_t /*renderTimeMs*/) { |
| + DVLOG(3) << "Decode"; |
| + |
| + { |
| + base::AutoLock auto_lock(lock_); |
| + if (state_ == kUninitialized || decode_complete_callback_ == NULL) { |
| + LOG(ERROR) << "The decoder has not initialized."; |
| + return WEBRTC_VIDEO_CODEC_UNINITIALIZED; |
| + } |
| + if (state_ == kDecodeError) { |
| + LOG(ERROR) << "Decoding error occurred."; |
| + return WEBRTC_VIDEO_CODEC_ERROR; |
| + } |
| + } |
| + if (missingFrames || !inputImage._completeFrame) { |
| + DLOG(ERROR) << "Missing or incomplete frames."; |
| + // Unlike the SW decoder in libvpx, hw decoder cannot handle broken frames. |
| + // Return an error to request a key frame. |
| + return WEBRTC_VIDEO_CODEC_ERROR; |
| + } |
| + |
| + if (inputImage._frameType == webrtc::kKeyFrame) { |
| + frame_width_ = inputImage._encodedWidth; |
| + frame_height_ = inputImage._encodedHeight; |
| + } |
| + |
| + // Copy WebRTC buffer to SHM buffer and create buffer data. |
| + SHMBuffer* shm_buffer = GetSHM(inputImage._length); |
| + if (!shm_buffer) |
| + return WEBRTC_VIDEO_CODEC_ERROR; |
| + memcpy(shm_buffer->shm->memory(), inputImage._buffer, inputImage._length); |
| + BufferData buffer_data(next_bitstream_buffer_id_, inputImage._timeStamp, |
| + frame_width_, frame_height_, inputImage._length); |
| + // Mask against 30 bits, to avoid (undefined) wraparound on signed integer. |
| + next_bitstream_buffer_id_ = (next_bitstream_buffer_id_ + 1) & 0x3FFFFFFF; |
| + std::pair<SHMBuffer*, BufferData> buffer_pair |
| + = std::make_pair(shm_buffer, buffer_data); |
| + |
| + // Store the buffer and the data to the queue. |
| + { |
| + base::AutoLock auto_lock(lock_); |
| + buffers_to_be_decoded.push_back(buffer_pair); |
| + } |
| + |
| + vda_loop_proxy_->PostTask(FROM_HERE, base::Bind( |
| + &RTCVideoDecoder::RequestBufferDecode, weak_this_)); |
| + |
| + return WEBRTC_VIDEO_CODEC_OK; |
| +} |
| + |
| +// Maximum number of concurrent VDA::Decode() operations GVD will maintain. |
| +// Higher values allow better pipelining in the GPU, but also require more |
| +// resources. |
| +enum { kMaxInFlightDecodes = 4 }; |
| + |
| +bool RTCVideoDecoder::CanMoreDecodeWorkBeDone() { |
| + return bitstream_buffers_in_decoder_.size() < kMaxInFlightDecodes; |
| +} |
| + |
| +void RTCVideoDecoder::RequestBufferDecode() { |
| + if(!CanMoreDecodeWorkBeDone()) |
| + return; |
| + |
| + // Get a buffer and data from the queue. |
| + std::pair<SHMBuffer*, BufferData> *buffer_pair; |
| + { |
| + base::AutoLock auto_lock(lock_); |
| + if (buffers_to_be_decoded.size() == 0) |
| + return; |
| + buffer_pair = &buffers_to_be_decoded.front(); |
| + buffers_to_be_decoded.pop_front(); |
| + } |
| + SHMBuffer* shm_buffer = buffer_pair->first; |
| + BufferData buffer_data = buffer_pair->second; |
| + |
| + // Create a BitstreamBuffer and send to VDA to decode. |
| + media::BitstreamBuffer bitstream_buffer( |
| + buffer_data.bitstream_buffer_id, shm_buffer->shm->handle(), |
| + buffer_data.size); |
| + bool inserted = bitstream_buffers_in_decoder_.insert(std::make_pair( |
| + bitstream_buffer.id(), shm_buffer)).second; |
| + DCHECK(inserted); |
| + RecordBufferData(buffer_data); |
| + vda_->Decode(bitstream_buffer); |
| +} |
| + |
| +// Size of shared-memory segments we allocate. Since we reuse them we let them |
| +// be on the beefy side. |
| +static const size_t kSharedMemorySegmentBytes = 100 << 10; |
| + |
| +RTCVideoDecoder::SHMBuffer* RTCVideoDecoder::GetSHM(size_t min_size) { |
| + { |
| + // Reuse a SHM if possible. |
| + base::AutoLock auto_lock(lock_); |
| + if (!available_shm_segments_.empty() && |
| + available_shm_segments_.back()->size >= min_size) { |
| + SHMBuffer* ret = available_shm_segments_.back(); |
| + available_shm_segments_.pop_back(); |
| + return ret; |
| + } |
| + } |
| + // Create a new shared memory. This is done in main thread. |
| + size_t size_to_allocate = std::max(min_size, kSharedMemorySegmentBytes); |
| + base::SharedMemory *shm = factories_->CreateSharedMemory(size_to_allocate); |
|
wuchengli
2013/05/28 15:01:00
I cannot write more tests without adding a mock fo
|
| + if (!shm) |
| + return NULL; |
| + return new SHMBuffer(shm, size_to_allocate); |
| +} |
| + |
| +void RTCVideoDecoder::PutSHM(SHMBuffer* shm_buffer) { |
| + DCHECK(vda_loop_proxy_->BelongsToCurrentThread()); |
| + base::AutoLock auto_lock(lock_); |
| + available_shm_segments_.push_back(shm_buffer); |
| +} |
| + |
| +int32_t RTCVideoDecoder::RegisterDecodeCompleteCallback( |
| + webrtc::DecodedImageCallback* callback) { |
| + base::AutoLock auto_lock(lock_); |
| + decode_complete_callback_ = callback; |
| + return WEBRTC_VIDEO_CODEC_OK; |
| +} |
| + |
| +int32_t RTCVideoDecoder::Release() { |
| + DVLOG(2) << "Release"; |
| + vda_loop_proxy_->PostTask( |
| + FROM_HERE, |
| + base::Bind(&RTCVideoDecoder::Destroy, weak_this_)); |
| + decoder_waiter_.Wait(); |
| + return WEBRTC_VIDEO_CODEC_OK; |
| +} |
| + |
| +int32_t RTCVideoDecoder::Reset() { |
| + DVLOG(2) << "Reset"; |
| + { |
| + base::AutoLock auto_lock(lock_); |
| + if (state_ == kUninitialized) { |
| + LOG(ERROR) << "Decoder not initialized."; |
| + return WEBRTC_VIDEO_CODEC_UNINITIALIZED; |
| + } |
| + } |
| + vda_loop_proxy_->PostTask(FROM_HERE, base::Bind( |
| + &RTCVideoDecoder::ResetInternal, weak_this_)); |
| + decoder_waiter_.Wait(); |
| + return WEBRTC_VIDEO_CODEC_OK; |
| +} |
| + |
| +void RTCVideoDecoder::Destroy() { |
| + DCHECK(vda_loop_proxy_->BelongsToCurrentThread()); |
| + DVLOG(2) << "Destroy"; |
| + |
| + if (vda_) |
| + DestroyVDA(); |
| + |
| + base::AutoLock auto_lock(lock_); |
| + state_ = kUninitialized; |
| + decoder_waiter_.Signal(); |
| +} |
| + |
| +void RTCVideoDecoder::ResetInternal() { |
| + DCHECK(vda_loop_proxy_->BelongsToCurrentThread()); |
| + vda_->Reset(); |
| +} |
| + |
| +void RTCVideoDecoder::NotifyInitializeDone() { |
| + DVLOG(2) << "NotifyInitializeDone"; |
| + NOTREACHED(); |
| +} |
| + |
| +void RTCVideoDecoder::ProvidePictureBuffers(uint32 count, |
| + const gfx::Size& size, |
| + uint32 texture_target) { |
| + DCHECK(vda_loop_proxy_->BelongsToCurrentThread()); |
| + DVLOG(3) << "ProvidePictureBuffers. texture_target=" << texture_target; |
| + std::vector<uint32> texture_ids; |
| + decoder_texture_target_ = texture_target; |
| + if (!factories_->CreateTextures( |
| + count, size, &texture_ids, decoder_texture_target_)) { |
| + NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE); |
| + return; |
| + } |
| + |
| + if (!vda_) { |
| + LOG(ERROR) << "vda is NULL"; |
| + return; |
| + } |
| + |
| + std::vector<media::PictureBuffer> picture_buffers; |
| + for (size_t i = 0; i < texture_ids.size(); ++i) { |
| + picture_buffers.push_back(media::PictureBuffer( |
| + next_picture_buffer_id_++, size, texture_ids[i])); |
| + bool inserted = picture_buffers_in_decoder_.insert(std::make_pair( |
| + picture_buffers.back().id(), picture_buffers.back())).second; |
| + DCHECK(inserted); |
| + } |
| + vda_->AssignPictureBuffers(picture_buffers); |
| +} |
| + |
| +void RTCVideoDecoder::DismissPictureBuffer(int32 id) { |
| + DVLOG(3) << "DismissPictureBuffer. id=" << id; |
| + DCHECK(vda_loop_proxy_->BelongsToCurrentThread()); |
| + |
| + std::map<int32, media::PictureBuffer>::iterator it = |
| + picture_buffers_in_decoder_.find(id); |
| + if (it == picture_buffers_in_decoder_.end()) { |
| + NOTREACHED() << "Missing picture buffer: " << id; |
| + return; |
| + } |
| + factories_->DeleteTexture(it->second.texture_id()); |
| + picture_buffers_in_decoder_.erase(it); |
| +} |
| + |
| +void RTCVideoDecoder::PictureReady(const media::Picture& picture) { |
| + DVLOG(3) << "PictureReady"; |
| + DCHECK(vda_loop_proxy_->BelongsToCurrentThread()); |
| + |
| + std::map<int32, media::PictureBuffer>::iterator it = |
| + picture_buffers_in_decoder_.find(picture.picture_buffer_id()); |
| + if (it == picture_buffers_in_decoder_.end()) { |
| + NOTREACHED() << "Missing picture buffer: " << picture.picture_buffer_id(); |
| + NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE); |
| + return; |
| + } |
| + const media::PictureBuffer& pb = it->second; |
| + |
| + // Create a media::VideoFrame. |
| + uint32_t timestamp = 0; |
| + uint32_t width = 0, height = 0; |
| + size_t size = 0; |
| + GetBufferData(picture.bitstream_buffer_id(), ×tamp, &width, &height, |
| + &size); |
| + gfx::Rect visible_rect(width, height); |
| + gfx::Size natural_size(width, height); |
| + DCHECK(decoder_texture_target_); |
| + base::TimeDelta timestamp_ms = base::TimeDelta::FromInternalValue( |
| + (uint64_t)timestamp * 1000 / 90); |
| + scoped_refptr<media::VideoFrame> frame( |
| + media::VideoFrame::WrapNativeTexture( |
| + pb.texture_id(), decoder_texture_target_, pb.size(), visible_rect, |
| + natural_size, timestamp_ms, |
| + base::Bind(&media::GpuVideoDecoder::Factories::ReadPixels, factories_, |
| + pb.texture_id(), decoder_texture_target_, |
| + gfx::Size(visible_rect.width(), visible_rect.height())), |
| + media::BindToCurrentLoop(base::Bind( |
| + &RTCVideoDecoder::ReusePictureBuffer, weak_this_, |
| + picture.picture_buffer_id())))); |
| + |
| + // Create a webrtc::I420VideoFrame. |
| + gfx::Rect rect = frame->visible_rect(); |
| + webrtc::I420VideoFrame decoded_image; |
| + decoded_image.CreateEmptyFrame( |
| + rect.width(), rect.height(), |
| + rect.width(), rect.width() / 2, rect.width() / 2); |
| + webrtc::RefCountImpl<NativeHandleImpl>* handle = |
| + new webrtc::RefCountImpl<NativeHandleImpl>(); |
| + handle->SetHandle(frame.get()); |
| + decoded_image.set_native_handle(handle); |
| + decoded_image.set_timestamp(timestamp); |
| + |
| + // Send to decode callback. |
| + webrtc::DecodedImageCallback *callback; |
| + { |
| + base::AutoLock auto_lock(lock_); |
| + callback = decode_complete_callback_; |
| + } |
| + callback->Decoded(decoded_image); |
| +} |
| + |
| +void RTCVideoDecoder::ReusePictureBuffer(int64 picture_buffer_id) { |
| + DCHECK(vda_loop_proxy_->BelongsToCurrentThread()); |
| + DVLOG(3) << "ReusePictureBuffer. id=" << picture_buffer_id; |
| + |
| + if (!vda_) |
| + return; |
| + vda_->ReusePictureBuffer(picture_buffer_id); |
| +} |
| + |
| +void RTCVideoDecoder::NotifyEndOfBitstreamBuffer(int32 id) { |
| + DVLOG(3) << "NotifyEndOfBitstreamBuffer. id=" << id; |
| + DCHECK(vda_loop_proxy_->BelongsToCurrentThread()); |
| + |
| + std::map<int32, SHMBuffer*>::iterator it = |
| + bitstream_buffers_in_decoder_.find(id); |
| + if (it == bitstream_buffers_in_decoder_.end()) { |
| + NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE); |
| + NOTREACHED() << "Missing bitstream buffer: " << id; |
| + return; |
| + } |
| + |
| + PutSHM(it->second); |
| + bitstream_buffers_in_decoder_.erase(it); |
| + |
| + RequestBufferDecode(); |
| +} |
| + |
| +void RTCVideoDecoder::NotifyFlushDone() { |
| + DVLOG(3) << "NotifyFlushDone"; |
| +} |
| + |
| +void RTCVideoDecoder::NotifyResetDone() { |
| + DCHECK(vda_loop_proxy_->BelongsToCurrentThread()); |
| + DVLOG(3) << "NotifyResetDone"; |
| + |
| + base::AutoLock auto_lock(lock_); |
| + state_ = kInitialized; |
| + decoder_waiter_.Signal(); |
| +} |
| + |
| +void RTCVideoDecoder::NotifyError(media::VideoDecodeAccelerator::Error error) { |
| + DCHECK(vda_loop_proxy_->BelongsToCurrentThread()); |
| + if (!vda_) |
| + return; |
| + |
| + DLOG(ERROR) << "VDA Error:" << error; |
| + DestroyVDA(); |
| + |
| + base::AutoLock auto_lock(lock_); |
| + state_ = kDecodeError; |
| +} |
| + |
| +void RTCVideoDecoder::DestroyTextures() { |
| + DCHECK(vda_loop_proxy_->BelongsToCurrentThread()); |
| + for (std::map<int32, media::PictureBuffer>::iterator it = |
| + picture_buffers_in_decoder_.begin(); |
| + it != picture_buffers_in_decoder_.end(); ++it) { |
| + factories_->DeleteTexture(it->second.texture_id()); |
| + } |
| + picture_buffers_in_decoder_.clear(); |
| +} |
| + |
| +void RTCVideoDecoder::DestroyVDA() { |
| + DVLOG(2) << "DestroyVDA"; |
| + DCHECK(vda_loop_proxy_->BelongsToCurrentThread()); |
| + if (vda_) |
| + vda_.release()->Destroy(); |
| + DestroyTextures(); |
| +} |
| + |
| +void RTCVideoDecoder::RecordBufferData(const BufferData& buffer_data) { |
| + input_buffer_data_.push_front(buffer_data); |
| + // Why this value? Because why not. avformat.h:MAX_REORDER_DELAY is 16, but |
| + // that's too small for some pathological B-frame test videos. The cost of |
| + // using too-high a value is low (192 bits per extra slot). |
| + static const size_t kMaxInputBufferDataSize = 128; |
| + // Pop from the back of the list, because that's the oldest and least likely |
| + // to be useful in the future data. |
| + if (input_buffer_data_.size() > kMaxInputBufferDataSize) |
| + input_buffer_data_.pop_back(); |
| +} |
| + |
| +void RTCVideoDecoder::GetBufferData( |
| + int32 id, uint32_t* timestamp, uint32_t* width, uint32_t* height, |
| + size_t *size) { |
| + for (std::list<BufferData>::const_iterator it = |
| + input_buffer_data_.begin(); it != input_buffer_data_.end(); |
| + ++it) { |
| + if (it->bitstream_buffer_id != id) |
| + continue; |
| + *timestamp = it->timestamp; |
| + *width = it->width; |
| + *height = it->height; |
| + return; |
| + } |
| + NOTREACHED() << "Missing bitstreambuffer id: " << id; |
| +} |
| + |
| +RTCVideoDecoder::SHMBuffer::SHMBuffer(base::SharedMemory* m, size_t s) |
| + : shm(m), size(s) { |
| +} |
| + |
| +RTCVideoDecoder::SHMBuffer::~SHMBuffer() {} |
| + |
| +RTCVideoDecoder::BufferData::BufferData( |
| + int32 bbid, uint32_t ts, int w, int h, size_t s) |
| + : bitstream_buffer_id(bbid), timestamp(ts), width(w), |
| + height(h), size(s) { |
| +} |
| + |
| +RTCVideoDecoder::BufferData::~BufferData() {} |
| + |
| +} // namespace content |