| Index: content/renderer/media/gpu/rtc_video_decoder.cc
|
| diff --git a/content/renderer/media/gpu/rtc_video_decoder.cc b/content/renderer/media/gpu/rtc_video_decoder.cc
|
| index 47aae3eee3646c58c554b1d42920a3190bd869db..edd5ed445628532d41753eafb599e982c3325814 100644
|
| --- a/content/renderer/media/gpu/rtc_video_decoder.cc
|
| +++ b/content/renderer/media/gpu/rtc_video_decoder.cc
|
| @@ -16,6 +16,9 @@
|
| #include "content/renderer/media/webrtc/webrtc_video_frame_adapter.h"
|
| #include "gpu/command_buffer/common/mailbox_holder.h"
|
| #include "media/base/bind_to_current_loop.h"
|
| +#include "media/base/decoder_buffer.h"
|
| +#include "media/base/encryption_scheme.h"
|
| +#include "media/base/video_decoder.h"
|
| #include "media/renderers/gpu_video_accelerator_factories.h"
|
| #include "third_party/skia/include/core/SkBitmap.h"
|
| #include "third_party/webrtc/base/bind.h"
|
| @@ -37,62 +40,52 @@ const int32_t RTCVideoDecoder::ID_INVALID = -1;
|
|
|
| // Number of consecutive frames that can be lost due to a VDA error before
|
| // falling back to SW implementation.
|
| -const uint32_t kNumVDAErrorsBeforeSWFallback = 5;
|
| -
|
| -// Maximum number of concurrent VDA::Decode() operations RVD will maintain.
|
| -// Higher values allow better pipelining in the GPU, but also require more
|
| -// resources.
|
| -static const size_t kMaxInFlightDecodes = 8;
|
| -
|
| -// Number of allocated shared memory segments.
|
| -static const size_t kNumSharedMemorySegments = 16;
|
| +const uint32_t kNumDecoderErrorsBeforeSWFallback = 5;
|
|
|
| // Maximum number of pending WebRTC buffers that are waiting for shared memory.
|
| static const size_t kMaxNumOfPendingBuffers = 8;
|
|
|
| -RTCVideoDecoder::BufferData::BufferData(int32_t bitstream_buffer_id,
|
| - uint32_t timestamp,
|
| - size_t size,
|
| - const gfx::Rect& visible_rect)
|
| - : bitstream_buffer_id(bitstream_buffer_id),
|
| - timestamp(timestamp),
|
| - size(size),
|
| - visible_rect(visible_rect) {}
|
| -
|
| -RTCVideoDecoder::BufferData::BufferData() {}
|
| -
|
| -RTCVideoDecoder::BufferData::~BufferData() {}
|
| +scoped_refptr<media::DecoderBuffer> CreateDecoderBuffer(
|
| + const webrtc::EncodedImage& encoded_image) {
|
| + auto decoder_buffer = media::DecoderBuffer::CopyFrom(encoded_image._buffer,
|
| + encoded_image._length);
|
| + decoder_buffer->set_timestamp(
|
| + base::TimeDelta::FromInternalValue(encoded_image._timeStamp));
|
| + return decoder_buffer;
|
| +}
|
|
|
| -RTCVideoDecoder::RTCVideoDecoder(webrtc::VideoCodecType type,
|
| - media::GpuVideoAcceleratorFactories* factories)
|
| - : vda_error_counter_(0),
|
| +RTCVideoDecoder::RTCVideoDecoder(
|
| + webrtc::VideoCodecType type,
|
| + const CreateVideoDecoderCB& create_video_decoder_cb,
|
| + const scoped_refptr<base::SingleThreadTaskRunner>& decoder_task_runner)
|
| + : decoder_error_counter_(0),
|
| video_codec_type_(type),
|
| - factories_(factories),
|
| - decoder_texture_target_(0),
|
| + create_video_decoder_cb_(create_video_decoder_cb),
|
| + decoder_task_runner_(decoder_task_runner),
|
| pixel_format_(media::PIXEL_FORMAT_UNKNOWN),
|
| - next_picture_buffer_id_(0),
|
| state_(UNINITIALIZED),
|
| decode_complete_callback_(nullptr),
|
| - num_shm_buffers_(0),
|
| - next_bitstream_buffer_id_(0),
|
| - reset_bitstream_buffer_id_(ID_INVALID),
|
| + next_decoder_buffer_id_(0),
|
| + reset_decoder_buffer_id_(ID_INVALID),
|
| weak_factory_(this) {
|
| - DCHECK(!factories_->GetTaskRunner()->BelongsToCurrentThread());
|
| + DCHECK(!decoder_task_runner_->BelongsToCurrentThread());
|
| }
|
|
|
| RTCVideoDecoder::~RTCVideoDecoder() {
|
| DVLOG(2) << "~RTCVideoDecoder";
|
| - DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
|
| - DestroyVDA();
|
| + DCheckDecoderTaskRunnerIsCurrent();
|
|
|
| - // Delete all shared memories.
|
| - ClearPendingBuffers();
|
| + // Grab the lock so that the function doesn't DCHECK on us.
|
| + base::AutoLock auto_lock(lock_);
|
| + ClearPendingBuffers_Locked();
|
| }
|
|
|
| // static
|
| std::unique_ptr<RTCVideoDecoder> RTCVideoDecoder::Create(
|
| webrtc::VideoCodecType type,
|
| - media::GpuVideoAcceleratorFactories* factories) {
|
| + const CreateVideoDecoderCB& create_video_decoder_cb,
|
| + const scoped_refptr<base::SingleThreadTaskRunner>& decoder_task_runner) {
|
| + VLOG(0) << __func__;
|
| std::unique_ptr<RTCVideoDecoder> decoder;
|
| // See https://bugs.chromium.org/p/webrtc/issues/detail?id=5717.
|
| #if defined(OS_WIN)
|
| @@ -104,47 +97,56 @@ std::unique_ptr<RTCVideoDecoder> RTCVideoDecoder::Create(
|
| return decoder;
|
| }
|
| #endif // defined(OS_WIN)
|
| +
|
| // Convert WebRTC codec type to media codec profile.
|
| + // TODO(slan): This should produce a VideoDecoderConfig object.
|
| media::VideoCodecProfile profile;
|
| + media::VideoCodec codec;
|
| switch (type) {
|
| case webrtc::kVideoCodecVP8:
|
| profile = media::VP8PROFILE_ANY;
|
| + codec = media::kCodecVP8;
|
| break;
|
| case webrtc::kVideoCodecH264:
|
| profile = media::H264PROFILE_MAIN;
|
| + codec = media::kCodecH264;
|
| break;
|
| default:
|
| - DVLOG(2) << "Video codec not supported:" << type;
|
| + VLOG(0) << "Video codec not supported:" << type;
|
| return decoder;
|
| }
|
|
|
| base::WaitableEvent waiter(base::WaitableEvent::ResetPolicy::MANUAL,
|
| base::WaitableEvent::InitialState::NOT_SIGNALED);
|
| - decoder.reset(new RTCVideoDecoder(type, factories));
|
| - decoder->factories_->GetTaskRunner()->PostTask(
|
| + decoder.reset(
|
| + new RTCVideoDecoder(type, create_video_decoder_cb, decoder_task_runner));
|
| + decoder_task_runner->PostTask(
|
| FROM_HERE,
|
| - base::Bind(&RTCVideoDecoder::CreateVDA,
|
| - base::Unretained(decoder.get()),
|
| - profile,
|
| - &waiter));
|
| - waiter.Wait();
|
| - // |decoder->vda_| is nullptr if the codec is not supported.
|
| - if (decoder->vda_)
|
| + base::Bind(&RTCVideoDecoder::InitializeDecoder,
|
| + base::Unretained(decoder.get()), profile, codec, &waiter));
|
| + VLOG(0) << "TaskPosted, thread locked.";
|
| + DCHECK(!decoder_task_runner->BelongsToCurrentThread());
|
| + // waiter.Wait();
|
| + if (true) { // decoder->decoder_) {
|
| + VLOG(0) << "Decoder is initialized!";
|
| decoder->state_ = INITIALIZED;
|
| - else
|
| - factories->GetTaskRunner()->DeleteSoon(FROM_HERE, decoder.release());
|
| + } else {
|
| + VLOG(0) << "Decoder not initialized!";
|
| + decoder_task_runner->DeleteSoon(FROM_HERE, decoder.release());
|
| + }
|
| return decoder;
|
| }
|
|
|
| // static
|
| -void RTCVideoDecoder::Destroy(webrtc::VideoDecoder* decoder,
|
| - media::GpuVideoAcceleratorFactories* factories) {
|
| - factories->GetTaskRunner()->DeleteSoon(FROM_HERE, decoder);
|
| +void RTCVideoDecoder::Destroy(
|
| + webrtc::VideoDecoder* decoder,
|
| + const scoped_refptr<base::SingleThreadTaskRunner>& decoder_task_runner) {
|
| + decoder_task_runner->DeleteSoon(FROM_HERE, decoder);
|
| }
|
|
|
| int32_t RTCVideoDecoder::InitDecode(const webrtc::VideoCodec* codecSettings,
|
| int32_t /*numberOfCores*/) {
|
| - DVLOG(2) << "InitDecode";
|
| + VLOG(0) << __func__;
|
| DCHECK_EQ(video_codec_type_, codecSettings->codecType);
|
| if (codecSettings->codecType == webrtc::kVideoCodecVP8 &&
|
| codecSettings->codecSpecific.VP8.feedbackModeOn) {
|
| @@ -167,8 +169,9 @@ int32_t RTCVideoDecoder::Decode(
|
| const webrtc::RTPFragmentationHeader* /*fragmentation*/,
|
| const webrtc::CodecSpecificInfo* /*codecSpecificInfo*/,
|
| int64_t /*renderTimeMs*/) {
|
| - DVLOG(3) << "Decode";
|
| + VLOG(0) << __func__ << " " << inputImage._timeStamp;
|
|
|
| + // NOTE(slan): WTF, this whole method is locked???
|
| base::AutoLock auto_lock(lock_);
|
|
|
| if (state_ == UNINITIALIZED || !decode_complete_callback_) {
|
| @@ -180,10 +183,10 @@ int32_t RTCVideoDecoder::Decode(
|
| LOG(ERROR) << "Decoding error occurred.";
|
| // Try reseting the session up to |kNumVDAErrorsHandled| times.
|
| // Check if SW H264 implementation is available before falling back.
|
| - if (vda_error_counter_ > kNumVDAErrorsBeforeSWFallback &&
|
| + if (decoder_error_counter_ > kNumDecoderErrorsBeforeSWFallback &&
|
| (video_codec_type_ != webrtc::kVideoCodecH264 ||
|
| webrtc::H264Decoder::IsSupported())) {
|
| - DLOG(ERROR) << vda_error_counter_
|
| + DLOG(ERROR) << decoder_error_counter_
|
| << " errors reported by VDA, falling back to software decode";
|
| return WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE;
|
| }
|
| @@ -193,7 +196,7 @@ int32_t RTCVideoDecoder::Decode(
|
| }
|
|
|
| if (missingFrames || !inputImage._completeFrame) {
|
| - DLOG(ERROR) << "Missing or incomplete frames.";
|
| + LOG(ERROR) << "Missing or incomplete frames.";
|
| // Unlike the SW decoder in libvpx, hw decoder cannot handle broken frames.
|
| // Return an error to request a key frame.
|
| return WEBRTC_VIDEO_CODEC_ERROR;
|
| @@ -213,13 +216,13 @@ int32_t RTCVideoDecoder::Decode(
|
| if (inputImage._frameType == webrtc::kVideoFrameKey) {
|
| const gfx::Size new_frame_size(inputImage._encodedWidth,
|
| inputImage._encodedHeight);
|
| - DVLOG(2) << "Got key frame. size=" << new_frame_size.ToString();
|
| + VLOG(0) << "Got key frame. size=" << new_frame_size.ToString();
|
|
|
| if (new_frame_size.width() > max_resolution_.width() ||
|
| new_frame_size.width() < min_resolution_.width() ||
|
| new_frame_size.height() > max_resolution_.height() ||
|
| new_frame_size.height() < min_resolution_.height()) {
|
| - DVLOG(1) << "Resolution unsupported, falling back to software decode";
|
| + VLOG(0) << "Resolution unsupported, falling back to software decode";
|
| return WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE;
|
| }
|
|
|
| @@ -229,69 +232,63 @@ int32_t RTCVideoDecoder::Decode(
|
| prev_frame_size != frame_size_) {
|
| need_to_reset_for_midstream_resize = true;
|
| }
|
| - } else if (IsFirstBufferAfterReset(next_bitstream_buffer_id_,
|
| - reset_bitstream_buffer_id_)) {
|
| + } else if (IsFirstBufferAfterReset(next_decoder_buffer_id_,
|
| + reset_decoder_buffer_id_)) {
|
| // TODO(wuchengli): VDA should handle it. Remove this when
|
| // http://crosbug.com/p/21913 is fixed.
|
|
|
| // If we're are in an error condition, increase the counter.
|
| - vda_error_counter_ += vda_error_counter_ ? 1 : 0;
|
| + decoder_error_counter_ += decoder_error_counter_ ? 1 : 0;
|
|
|
| - DVLOG(1) << "The first frame should be a key frame. Drop this.";
|
| + VLOG(0) << "The first frame should be a key frame. Drop this.";
|
| return WEBRTC_VIDEO_CODEC_ERROR;
|
| }
|
|
|
| - // Create buffer metadata.
|
| - BufferData buffer_data(next_bitstream_buffer_id_,
|
| - inputImage._timeStamp,
|
| - inputImage._length,
|
| - gfx::Rect(frame_size_));
|
| + int32_t decoder_buffer_id = next_decoder_buffer_id_;
|
| + auto decoder_buffer = CreateDecoderBuffer(inputImage);
|
| +
|
| // Mask against 30 bits, to avoid (undefined) wraparound on signed integer.
|
| - next_bitstream_buffer_id_ = (next_bitstream_buffer_id_ + 1) & ID_LAST;
|
| -
|
| - // If a shared memory segment is available, there are no pending buffers, and
|
| - // this isn't a mid-stream resolution change, then send the buffer for decode
|
| - // immediately. Otherwise, save the buffer in the queue for later decode.
|
| - std::unique_ptr<base::SharedMemory> shm_buffer;
|
| - if (!need_to_reset_for_midstream_resize && pending_buffers_.empty())
|
| - shm_buffer = GetSHM_Locked(inputImage._length);
|
| - if (!shm_buffer) {
|
| - if (!SaveToPendingBuffers_Locked(inputImage, buffer_data)) {
|
| - // We have exceeded the pending buffers count, we are severely behind.
|
| - // Since we are returning ERROR, WebRTC will not be interested in the
|
| - // remaining buffers, and will provide us with a new keyframe instead.
|
| - // Better to drop any pending buffers and start afresh to catch up faster.
|
| - DVLOG(1) << "Exceeded maximum pending buffer count, dropping";
|
| - ClearPendingBuffers();
|
| - return WEBRTC_VIDEO_CODEC_ERROR;
|
| - }
|
| + next_decoder_buffer_id_ = (next_decoder_buffer_id_ + 1) & ID_LAST;
|
| +
|
| + // Try to enqueue the image to be decoded. These frames will be consumed on
|
| + // the decoder thread. It would be ideal to post this to the decoder thread to
|
| + // avoid the lock, but we need to return an error from this function if
|
| + // |pending_buffers_| is full. So use a lock instead.
|
| + if (!SaveToPendingBuffers_Locked(decoder_buffer_id, decoder_buffer)) {
|
| + // We have exceeded the pending buffers count, we are severely behind.
|
| + // Since we are returning ERROR, WebRTC will not be interested in the
|
| + // remaining buffers, and will provide us with a new keyframe instead.
|
| + // Better to drop any pending buffers and start afresh to catch up faster.
|
| + VLOG(0) << "Exceeded maximum pending buffer count, dropping";
|
| + ClearPendingBuffers_Locked();
|
| + return WEBRTC_VIDEO_CODEC_ERROR;
|
| + }
|
|
|
| - if (need_to_reset_for_midstream_resize) {
|
| - base::AutoUnlock auto_unlock(lock_);
|
| - Release();
|
| - }
|
| - return WEBRTC_VIDEO_CODEC_OK;
|
| + if (need_to_reset_for_midstream_resize) {
|
| + base::AutoUnlock auto_unlock(lock_);
|
| + Release();
|
| }
|
|
|
| - SaveToDecodeBuffers_Locked(inputImage, std::move(shm_buffer), buffer_data);
|
| - factories_->GetTaskRunner()->PostTask(
|
| - FROM_HERE,
|
| - base::Bind(&RTCVideoDecoder::RequestBufferDecode,
|
| - weak_factory_.GetWeakPtr()));
|
| + decoder_task_runner_->PostTask(
|
| + FROM_HERE, base::Bind(&RTCVideoDecoder::RequestBufferDecode,
|
| + weak_factory_.GetWeakPtr()));
|
| return WEBRTC_VIDEO_CODEC_OK;
|
| }
|
|
|
| int32_t RTCVideoDecoder::RegisterDecodeCompleteCallback(
|
| webrtc::DecodedImageCallback* callback) {
|
| - DVLOG(2) << "RegisterDecodeCompleteCallback";
|
| + VLOG(0) << __func__;
|
| DCHECK(callback);
|
| +
|
| + // This is so terrible. Lock here when setting so that we can use this
|
| + // callback on the decoder thread. TODO(slan): See if we can avoid this.
|
| base::AutoLock auto_lock(lock_);
|
| decode_complete_callback_ = callback;
|
| return WEBRTC_VIDEO_CODEC_OK;
|
| }
|
|
|
| int32_t RTCVideoDecoder::Release() {
|
| - DVLOG(2) << "Release";
|
| + VLOG(0) << __func__;
|
| // Do not destroy VDA because WebRTC can call InitDecode and start decoding
|
| // again.
|
| base::AutoLock auto_lock(lock_);
|
| @@ -299,289 +296,132 @@ int32_t RTCVideoDecoder::Release() {
|
| LOG(ERROR) << "Decoder not initialized.";
|
| return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
| }
|
| - if (next_bitstream_buffer_id_ != 0)
|
| - reset_bitstream_buffer_id_ = next_bitstream_buffer_id_ - 1;
|
| +
|
| + //
|
| + if (next_decoder_buffer_id_ != 0)
|
| + reset_decoder_buffer_id_ = next_decoder_buffer_id_ - 1;
|
| else
|
| - reset_bitstream_buffer_id_ = ID_LAST;
|
| + reset_decoder_buffer_id_ = ID_LAST;
|
| // If VDA is already resetting, no need to request the reset again.
|
| if (state_ != RESETTING) {
|
| state_ = RESETTING;
|
| - factories_->GetTaskRunner()->PostTask(
|
| - FROM_HERE,
|
| - base::Bind(&RTCVideoDecoder::ResetInternal,
|
| - weak_factory_.GetWeakPtr()));
|
| + decoder_task_runner_->PostTask(
|
| + FROM_HERE, base::Bind(&RTCVideoDecoder::ResetOnDecoderThread,
|
| + weak_factory_.GetWeakPtr()));
|
| }
|
| return WEBRTC_VIDEO_CODEC_OK;
|
| }
|
|
|
| -void RTCVideoDecoder::ProvidePictureBuffers(uint32_t count,
|
| - media::VideoPixelFormat format,
|
| - uint32_t textures_per_buffer,
|
| - const gfx::Size& size,
|
| - uint32_t texture_target) {
|
| - DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
|
| - DVLOG(3) << "ProvidePictureBuffers. texture_target=" << texture_target;
|
| - DCHECK_EQ(1u, textures_per_buffer);
|
| -
|
| - if (!vda_)
|
| - return;
|
| -
|
| - std::vector<uint32_t> texture_ids;
|
| - std::vector<gpu::Mailbox> texture_mailboxes;
|
| - decoder_texture_target_ = texture_target;
|
| -
|
| - if (format == media::PIXEL_FORMAT_UNKNOWN)
|
| - format = media::PIXEL_FORMAT_ARGB;
|
| -
|
| - if ((pixel_format_ != media::PIXEL_FORMAT_UNKNOWN) &&
|
| - (format != pixel_format_)) {
|
| - NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
|
| - return;
|
| - }
|
| -
|
| - pixel_format_ = format;
|
| - if (!factories_->CreateTextures(count,
|
| - size,
|
| - &texture_ids,
|
| - &texture_mailboxes,
|
| - decoder_texture_target_)) {
|
| - NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
|
| - return;
|
| - }
|
| - DCHECK_EQ(count, texture_ids.size());
|
| - DCHECK_EQ(count, texture_mailboxes.size());
|
| -
|
| - std::vector<media::PictureBuffer> picture_buffers;
|
| - for (size_t i = 0; i < texture_ids.size(); ++i) {
|
| - media::PictureBuffer::TextureIds ids;
|
| - ids.push_back(texture_ids[i]);
|
| - std::vector<gpu::Mailbox> mailboxes;
|
| - mailboxes.push_back(texture_mailboxes[i]);
|
| -
|
| - picture_buffers.push_back(
|
| - media::PictureBuffer(next_picture_buffer_id_++, size, ids, mailboxes));
|
| - bool inserted = assigned_picture_buffers_.insert(std::make_pair(
|
| - picture_buffers.back().id(), picture_buffers.back())).second;
|
| - DCHECK(inserted);
|
| - }
|
| - vda_->AssignPictureBuffers(picture_buffers);
|
| -}
|
| -
|
| -void RTCVideoDecoder::DismissPictureBuffer(int32_t id) {
|
| - DVLOG(3) << "DismissPictureBuffer. id=" << id;
|
| - DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
|
| -
|
| - std::map<int32_t, media::PictureBuffer>::iterator it =
|
| - assigned_picture_buffers_.find(id);
|
| - if (it == assigned_picture_buffers_.end()) {
|
| - NOTREACHED() << "Missing picture buffer: " << id;
|
| - return;
|
| - }
|
| -
|
| - media::PictureBuffer buffer_to_dismiss = it->second;
|
| - assigned_picture_buffers_.erase(it);
|
| -
|
| - if (!picture_buffers_at_display_.count(id)) {
|
| - // We can delete the texture immediately as it's not being displayed.
|
| - factories_->DeleteTexture(buffer_to_dismiss.texture_ids()[0]);
|
| - return;
|
| - }
|
| - // Not destroying a texture in display in |picture_buffers_at_display_|.
|
| - // Postpone deletion until after it's returned to us.
|
| -}
|
| -
|
| -void RTCVideoDecoder::PictureReady(const media::Picture& picture) {
|
| - DVLOG(3) << "PictureReady";
|
| - DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
|
| -
|
| - std::map<int32_t, media::PictureBuffer>::iterator it =
|
| - assigned_picture_buffers_.find(picture.picture_buffer_id());
|
| - if (it == assigned_picture_buffers_.end()) {
|
| - NOTREACHED() << "Missing picture buffer: " << picture.picture_buffer_id();
|
| - NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
|
| - return;
|
| - }
|
| -
|
| - uint32_t timestamp = 0;
|
| - gfx::Rect visible_rect;
|
| - GetBufferData(picture.bitstream_buffer_id(), ×tamp, &visible_rect);
|
| - if (!picture.visible_rect().IsEmpty())
|
| - visible_rect = picture.visible_rect();
|
| -
|
| - const media::PictureBuffer& pb = it->second;
|
| - if (visible_rect.IsEmpty() || !gfx::Rect(pb.size()).Contains(visible_rect)) {
|
| - LOG(ERROR) << "Invalid picture size: " << visible_rect.ToString()
|
| - << " should fit in " << pb.size().ToString();
|
| - NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
|
| - return;
|
| - }
|
| -
|
| - scoped_refptr<media::VideoFrame> frame =
|
| - CreateVideoFrame(picture, pb, timestamp, visible_rect, pixel_format_);
|
| - if (!frame) {
|
| - NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
|
| - return;
|
| - }
|
| - bool inserted = picture_buffers_at_display_
|
| - .insert(std::make_pair(picture.picture_buffer_id(),
|
| - pb.texture_ids()[0]))
|
| - .second;
|
| - DCHECK(inserted);
|
| -
|
| - // Create a WebRTC video frame.
|
| - webrtc::VideoFrame decoded_image(
|
| - new rtc::RefCountedObject<WebRtcVideoFrameAdapter>(frame), timestamp, 0,
|
| - webrtc::kVideoRotation_0);
|
| -
|
| - // Invoke decode callback. WebRTC expects no callback after Release.
|
| +void RTCVideoDecoder::OnResetDone() {
|
| + VLOG(0) << __func__;
|
| + DCheckDecoderTaskRunnerIsCurrent();
|
| {
|
| base::AutoLock auto_lock(lock_);
|
| - DCHECK(decode_complete_callback_);
|
| - if (IsBufferAfterReset(picture.bitstream_buffer_id(),
|
| - reset_bitstream_buffer_id_)) {
|
| - decode_complete_callback_->Decoded(decoded_image);
|
| - }
|
| - // Reset error counter as we successfully decoded a frame.
|
| - vda_error_counter_ = 0;
|
| - }
|
| -}
|
| -
|
| -scoped_refptr<media::VideoFrame> RTCVideoDecoder::CreateVideoFrame(
|
| - const media::Picture& picture,
|
| - const media::PictureBuffer& pb,
|
| - uint32_t timestamp,
|
| - const gfx::Rect& visible_rect,
|
| - media::VideoPixelFormat pixel_format) {
|
| - DCHECK(decoder_texture_target_);
|
| - // Convert timestamp from 90KHz to ms.
|
| - base::TimeDelta timestamp_ms = base::TimeDelta::FromInternalValue(
|
| - base::checked_cast<uint64_t>(timestamp) * 1000 / 90);
|
| - // TODO(mcasas): The incoming data may actually be in a YUV format, but may be
|
| - // labelled as ARGB. This may or may not be reported by VDA, depending on
|
| - // whether it provides an implementation of VDA::GetOutputFormat().
|
| - // This prevents the compositor from messing with it, since the underlying
|
| - // platform can handle the former format natively. Make sure the
|
| - // correct format is used and everyone down the line understands it.
|
| - gpu::MailboxHolder holders[media::VideoFrame::kMaxPlanes] = {
|
| - gpu::MailboxHolder(pb.texture_mailbox(0), gpu::SyncToken(),
|
| - decoder_texture_target_)};
|
| - scoped_refptr<media::VideoFrame> frame =
|
| - media::VideoFrame::WrapNativeTextures(
|
| - pixel_format, holders,
|
| - media::BindToCurrentLoop(base::Bind(
|
| - &RTCVideoDecoder::ReleaseMailbox, weak_factory_.GetWeakPtr(),
|
| - factories_, picture.picture_buffer_id(), pb.texture_ids()[0])),
|
| - pb.size(), visible_rect, visible_rect.size(), timestamp_ms);
|
| - if (frame && picture.allow_overlay()) {
|
| - frame->metadata()->SetBoolean(media::VideoFrameMetadata::ALLOW_OVERLAY,
|
| - true);
|
| + state_ = INITIALIZED;
|
| }
|
| - return frame;
|
| + // Send the pending buffers for decoding.
|
| + RequestBufferDecode();
|
| }
|
|
|
| -void RTCVideoDecoder::NotifyEndOfBitstreamBuffer(int32_t id) {
|
| - DVLOG(3) << "NotifyEndOfBitstreamBuffer. id=" << id;
|
| - DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
|
| -
|
| - auto it = bitstream_buffers_in_decoder_.find(id);
|
| - if (it == bitstream_buffers_in_decoder_.end()) {
|
| - NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
|
| - NOTREACHED() << "Missing bitstream buffer: " << id;
|
| +// DEBUG_NOTE(slan): This function captures the WebRTC-specific state of
|
| +// NotifyError() and DestroyVDA() from the old implementation. All VDA/buffer
|
| +// management is handled by GpuVideoDecoder.
|
| +void RTCVideoDecoder::OnBufferDecoded(int32_t buffer_decode_id,
|
| + bool is_eos,
|
| + media::DecodeStatus status) {
|
| + VLOG(0) << __func__;
|
| + DCheckDecoderTaskRunnerIsCurrent();
|
| +
|
| + // This buffer is no longer in flight. Remove it from the in-flight buffers.
|
| + bool erased = buffers_in_decoder_.erase(buffer_decode_id);
|
| + DCHECK(erased);
|
| +
|
| + // DEBUG_NOTE(slan): Not sure if we need to do any special handling here or
|
| + // not. It should be sufficient to simply kick off another Decode, to emulate
|
| + // functionality from NotifyEndOfStream.
|
| + if (is_eos) {
|
| + // TODO(slan): Anything to do here?
|
| + }
|
| +
|
| + // This is only called when |decoder_| is torn down while buffers are in
|
| + // flight. This probably indicates that something has gone wrong, so let's
|
| + // do NOTREACHED() now and handle this later.
|
| + if (status == media::DecodeStatus::ABORTED) {
|
| + NOTREACHED();
|
| + } else if (status == media::DecodeStatus::OK) {
|
| + RequestBufferDecode();
|
| return;
|
| }
|
|
|
| - {
|
| - base::AutoLock auto_lock(lock_);
|
| - PutSHM_Locked(std::move(it->second));
|
| - }
|
| - bitstream_buffers_in_decoder_.erase(it);
|
| -
|
| - RequestBufferDecode();
|
| -}
|
| + // If we hit here, |decoder_| is bubbling up an error from the remote decoder.
|
| + DCHECK(status == media::DecodeStatus::DECODE_ERROR);
|
|
|
| -void RTCVideoDecoder::NotifyFlushDone() {
|
| - DVLOG(3) << "NotifyFlushDone";
|
| - NOTREACHED() << "Unexpected flush done notification.";
|
| -}
|
| -
|
| -void RTCVideoDecoder::NotifyResetDone() {
|
| - DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
|
| - DVLOG(3) << "NotifyResetDone";
|
| + // DEBUG_NOTE(slan): Functionality from old NotifyError().
|
| + // This could be problematic as WebRTC relies on a custom UMA stat. For now
|
| + // return a random error.
|
| + UMA_HISTOGRAM_ENUMERATION("Media.RTCVideoDecoderError", -1,
|
| + media::VideoDecodeAccelerator::ERROR_MAX + 1);
|
|
|
| - if (!vda_)
|
| - return;
|
| + // DEBUG_NOTE(slan): Emulating functionality from old DestroyVDA().
|
| + // Re-enqueue patches in-flight so we can attempt to decode them again. Insert
|
| + // them back into the queue in their original order.
|
| + std::map<int32_t, scoped_refptr<media::DecoderBuffer>> sorted(
|
| + buffers_in_decoder_.begin(), buffers_in_decoder_.end());
|
| + for (auto rit = sorted.rbegin(); rit != sorted.rend(); ++rit)
|
| + pending_buffers_.push_front(std::make_pair(rit->first, rit->second));
|
|
|
| - input_buffer_data_.clear();
|
| + // DEBUG_NOTE(slan): Functionality from old NotifyError().
|
| {
|
| base::AutoLock auto_lock(lock_);
|
| - state_ = INITIALIZED;
|
| + state_ = DECODE_ERROR;
|
| + ++decoder_error_counter_;
|
| }
|
| - // Send the pending buffers for decoding.
|
| - RequestBufferDecode();
|
| -}
|
| -
|
| -void RTCVideoDecoder::NotifyError(media::VideoDecodeAccelerator::Error error) {
|
| - DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
|
| - if (!vda_)
|
| - return;
|
| -
|
| - LOG(ERROR) << "VDA Error:" << error;
|
| - UMA_HISTOGRAM_ENUMERATION("Media.RTCVideoDecoderError", error,
|
| - media::VideoDecodeAccelerator::ERROR_MAX + 1);
|
| - DestroyVDA();
|
| -
|
| - base::AutoLock auto_lock(lock_);
|
| - state_ = DECODE_ERROR;
|
| - ++vda_error_counter_;
|
| }
|
|
|
| void RTCVideoDecoder::RequestBufferDecode() {
|
| - DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
|
| - if (!vda_)
|
| - return;
|
| -
|
| - MovePendingBuffersToDecodeBuffers();
|
| -
|
| - while (CanMoreDecodeWorkBeDone()) {
|
| - // Get a buffer and data from the queue.
|
| - std::unique_ptr<base::SharedMemory> shm_buffer;
|
| - BufferData buffer_data;
|
| + VLOG(0) << __func__;
|
| + DCheckDecoderTaskRunnerIsCurrent();
|
| +
|
| + // If there are buffers pending, and less than the maximum possible number of
|
| + // decode requests are in flight, push more into the decoder.
|
| + while (!pending_buffers_.empty() && CanMoreDecodeWorkBeDone()) {
|
| + int32_t decoder_buffer_id = pending_buffers_.front().first;
|
| + scoped_refptr<media::DecoderBuffer> decoder_buffer =
|
| + pending_buffers_.front().second;
|
| {
|
| + // Do not request decode if decoder_ is resetting.
|
| base::AutoLock auto_lock(lock_);
|
| - // Do not request decode if VDA is resetting.
|
| - if (decode_buffers_.empty() || state_ == RESETTING)
|
| + if (state_ == RESETTING)
|
| return;
|
| - shm_buffer = std::move(decode_buffers_.front().first);
|
| - buffer_data = decode_buffers_.front().second;
|
| - decode_buffers_.pop_front();
|
| - // Drop the buffers before Release is called.
|
| - if (!IsBufferAfterReset(buffer_data.bitstream_buffer_id,
|
| - reset_bitstream_buffer_id_)) {
|
| - PutSHM_Locked(std::move(shm_buffer));
|
| - continue;
|
| - }
|
| }
|
|
|
| - // Create a BitstreamBuffer and send to VDA to decode.
|
| - media::BitstreamBuffer bitstream_buffer(
|
| - buffer_data.bitstream_buffer_id, shm_buffer->handle(), buffer_data.size,
|
| - 0, base::TimeDelta::FromInternalValue(buffer_data.timestamp));
|
| - const bool inserted = bitstream_buffers_in_decoder_
|
| - .insert(std::make_pair(bitstream_buffer.id(),
|
| - std::move(shm_buffer)))
|
| - .second;
|
| - DCHECK(inserted) << "bitstream_buffer_id " << bitstream_buffer.id()
|
| - << " existed already in bitstream_buffers_in_decoder_";
|
| - RecordBufferData(buffer_data);
|
| - vda_->Decode(bitstream_buffer);
|
| + // Remove the buffer from the queue.
|
| + pending_buffers_.pop_front();
|
| +
|
| + // If the buffer is from before the last call to Release(), drop it on
|
| + // the floor and keep going.
|
| + if (!IsBufferAfterReset(decoder_buffer_id, reset_decoder_buffer_id_)) {
|
| + continue;
|
| + }
|
| +
|
| + // Push the buffer to the decoder.
|
| + DCHECK(!base::ContainsKey(buffers_in_decoder_, decoder_buffer_id));
|
| + buffers_in_decoder_[decoder_buffer_id] = decoder_buffer;
|
| + decoder_->Decode(
|
| + decoder_buffer,
|
| + base::Bind(&RTCVideoDecoder::OnBufferDecoded, base::Unretained(this),
|
| + decoder_buffer_id, false /* is_eos */));
|
| }
|
| }
|
|
|
| bool RTCVideoDecoder::CanMoreDecodeWorkBeDone() {
|
| - return bitstream_buffers_in_decoder_.size() < kMaxInFlightDecodes;
|
| + VLOG(0) << __func__;
|
| + return (static_cast<int>(buffers_in_decoder_.size()) <
|
| + decoder_->GetMaxDecodeRequests());
|
| }
|
|
|
| bool RTCVideoDecoder::IsBufferAfterReset(int32_t id_buffer, int32_t id_reset) {
|
| + VLOG(0) << __func__;
|
| if (id_reset == ID_INVALID)
|
| return true;
|
| int32_t diff = id_buffer - id_reset;
|
| @@ -592,285 +432,154 @@ bool RTCVideoDecoder::IsBufferAfterReset(int32_t id_buffer, int32_t id_reset) {
|
|
|
| bool RTCVideoDecoder::IsFirstBufferAfterReset(int32_t id_buffer,
|
| int32_t id_reset) {
|
| + VLOG(0) << __func__;
|
| if (id_reset == ID_INVALID)
|
| return id_buffer == 0;
|
| return id_buffer == ((id_reset + 1) & ID_LAST);
|
| }
|
|
|
| -void RTCVideoDecoder::SaveToDecodeBuffers_Locked(
|
| - const webrtc::EncodedImage& input_image,
|
| - std::unique_ptr<base::SharedMemory> shm_buffer,
|
| - const BufferData& buffer_data) {
|
| - memcpy(shm_buffer->memory(), input_image._buffer, input_image._length);
|
| -
|
| - // Store the buffer and the metadata to the queue.
|
| - decode_buffers_.emplace_back(std::move(shm_buffer), buffer_data);
|
| -}
|
| -
|
| bool RTCVideoDecoder::SaveToPendingBuffers_Locked(
|
| - const webrtc::EncodedImage& input_image,
|
| - const BufferData& buffer_data) {
|
| - DVLOG(2) << "SaveToPendingBuffers_Locked"
|
| - << ". pending_buffers size=" << pending_buffers_.size()
|
| - << ". decode_buffers_ size=" << decode_buffers_.size()
|
| - << ". available_shm size=" << available_shm_segments_.size();
|
| + int32_t decoder_buffer_id,
|
| + const scoped_refptr<media::DecoderBuffer>& decoder_buffer) {
|
| + VLOG(0) << "SaveToPendingBuffers_Locked"
|
| + << ". pending_buffers size=" << pending_buffers_.size();
|
| + lock_.AssertAcquired();
|
| +
|
| // Queued too many buffers. Something goes wrong.
|
| if (pending_buffers_.size() >= kMaxNumOfPendingBuffers) {
|
| LOG(WARNING) << "Too many pending buffers!";
|
| return false;
|
| }
|
|
|
| - // Clone the input image and save it to the queue.
|
| - uint8_t* buffer = new uint8_t[input_image._length];
|
| - // TODO(wuchengli): avoid memcpy. Extend webrtc::VideoDecoder::Decode()
|
| - // interface to take a non-const ptr to the frame and add a method to the
|
| - // frame that will swap buffers with another.
|
| - memcpy(buffer, input_image._buffer, input_image._length);
|
| - webrtc::EncodedImage encoded_image(
|
| - buffer, input_image._length, input_image._length);
|
| - std::pair<webrtc::EncodedImage, BufferData> buffer_pair =
|
| - std::make_pair(encoded_image, buffer_data);
|
| -
|
| - pending_buffers_.push_back(buffer_pair);
|
| + // Enqueue the buffer, so that it may be consumed by the decoder.
|
| + // TODO(slan): Use std::move all the way down to cut down on atomic ops.
|
| + pending_buffers_.push_back(std::make_pair(decoder_buffer_id, decoder_buffer));
|
| return true;
|
| }
|
|
|
| -void RTCVideoDecoder::MovePendingBuffersToDecodeBuffers() {
|
| +void RTCVideoDecoder::ResetOnDecoderThread() {
|
| + VLOG(0) << __func__;
|
| + DCheckDecoderTaskRunnerIsCurrent();
|
| + DCHECK(decoder_);
|
| + // The decoder is resetting.
|
| base::AutoLock auto_lock(lock_);
|
| - while (pending_buffers_.size() > 0) {
|
| - // Get a pending buffer from the queue.
|
| - const webrtc::EncodedImage& input_image = pending_buffers_.front().first;
|
| - const BufferData& buffer_data = pending_buffers_.front().second;
|
| -
|
| - // Drop the frame if it comes before Release.
|
| - if (!IsBufferAfterReset(buffer_data.bitstream_buffer_id,
|
| - reset_bitstream_buffer_id_)) {
|
| - delete[] input_image._buffer;
|
| - pending_buffers_.pop_front();
|
| - continue;
|
| - }
|
| - // Get shared memory and save it to decode buffers.
|
| - std::unique_ptr<base::SharedMemory> shm_buffer =
|
| - GetSHM_Locked(input_image._length);
|
| - if (!shm_buffer)
|
| - return;
|
| - SaveToDecodeBuffers_Locked(input_image, std::move(shm_buffer), buffer_data);
|
| - delete[] input_image._buffer;
|
| - pending_buffers_.pop_front();
|
| - }
|
| -}
|
| -
|
| -void RTCVideoDecoder::ResetInternal() {
|
| - DVLOG(2) << __func__;
|
| - DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
|
| -
|
| - if (vda_) {
|
| - vda_->Reset();
|
| - } else {
|
| - CreateVDA(vda_codec_profile_, nullptr);
|
| - if (vda_) {
|
| - base::AutoLock auto_lock(lock_);
|
| - state_ = INITIALIZED;
|
| - }
|
| - }
|
| -}
|
| -
|
| -// static
|
| -void RTCVideoDecoder::ReleaseMailbox(
|
| - base::WeakPtr<RTCVideoDecoder> decoder,
|
| - media::GpuVideoAcceleratorFactories* factories,
|
| - int64_t picture_buffer_id,
|
| - uint32_t texture_id,
|
| - const gpu::SyncToken& release_sync_token) {
|
| - DCHECK(factories->GetTaskRunner()->BelongsToCurrentThread());
|
| - factories->WaitSyncToken(release_sync_token);
|
| -
|
| - if (decoder) {
|
| - decoder->ReusePictureBuffer(picture_buffer_id);
|
| - return;
|
| - }
|
| - // It's the last chance to delete the texture after display,
|
| - // because RTCVideoDecoder was destructed.
|
| - factories->DeleteTexture(texture_id);
|
| -}
|
| -
|
| -void RTCVideoDecoder::ReusePictureBuffer(int64_t picture_buffer_id) {
|
| - DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
|
| - DVLOG(3) << "ReusePictureBuffer. id=" << picture_buffer_id;
|
| -
|
| - DCHECK(!picture_buffers_at_display_.empty());
|
| - PictureBufferTextureMap::iterator display_iterator =
|
| - picture_buffers_at_display_.find(picture_buffer_id);
|
| - uint32_t texture_id = display_iterator->second;
|
| - DCHECK(display_iterator != picture_buffers_at_display_.end());
|
| - picture_buffers_at_display_.erase(display_iterator);
|
| -
|
| - if (!assigned_picture_buffers_.count(picture_buffer_id)) {
|
| - // This picture was dismissed while in display, so we postponed deletion.
|
| - factories_->DeleteTexture(texture_id);
|
| - return;
|
| - }
|
| -
|
| - // DestroyVDA() might already have been called.
|
| - if (vda_)
|
| - vda_->ReusePictureBuffer(picture_buffer_id);
|
| + state_ = RESETTING;
|
| + decoder_->Reset(
|
| + base::Bind(&RTCVideoDecoder::OnResetDone, base::Unretained(this)));
|
| }
|
|
|
| bool RTCVideoDecoder::IsProfileSupported(media::VideoCodecProfile profile) {
|
| - DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
|
| - media::VideoDecodeAccelerator::Capabilities capabilities =
|
| - factories_->GetVideoDecodeAcceleratorCapabilities();
|
| -
|
| - for (const auto& supported_profile : capabilities.supported_profiles) {
|
| - if (profile == supported_profile.profile) {
|
| - min_resolution_ = supported_profile.min_resolution;
|
| - max_resolution_ = supported_profile.max_resolution;
|
| - return true;
|
| - }
|
| - }
|
| + VLOG(0) << __func__;
|
| + DCheckDecoderTaskRunnerIsCurrent();
|
|
|
| - return false;
|
| + // TODO(slan): Figure out how to do capabilites.
|
| + min_resolution_ = gfx::Size(0, 0);
|
| + max_resolution_ = gfx::Size(4000, 4000);
|
| + return true;
|
| }
|
|
|
| -void RTCVideoDecoder::CreateVDA(media::VideoCodecProfile profile,
|
| - base::WaitableEvent* waiter) {
|
| - DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
|
| +void RTCVideoDecoder::InitializeDecoder(media::VideoCodecProfile profile,
|
| + media::VideoCodec codec,
|
| + base::WaitableEvent* waiter) {
|
| + VLOG(0) << __func__;
|
| + DCheckDecoderTaskRunnerIsCurrent();
|
| +
|
| + decoder_ = create_video_decoder_cb_.Run();
|
|
|
| if (!IsProfileSupported(profile)) {
|
| DVLOG(1) << "Unsupported profile " << profile;
|
| } else {
|
| - vda_ = factories_->CreateVideoDecodeAccelerator();
|
| -
|
| - media::VideoDecodeAccelerator::Config config(profile);
|
| - if (vda_ && !vda_->Initialize(config, this))
|
| - vda_.release()->Destroy();
|
| + // TODO(slan): Pass in a media::VideoDecoderConfig to this class. The only
|
| + // things that seem to matter here are populated.
|
| + media::VideoDecoderConfig config(
|
| + codec, profile, media::PIXEL_FORMAT_ARGB, /* Not used */
|
| + media::COLOR_SPACE_UNSPECIFIED, /* not used */
|
| + gfx::Size(320, 240), /* coded_size - default value in VDA::Client */
|
| + gfx::Rect(0, 0, 320, 240), /* visible_rect - Not used */
|
| + gfx::Size(320, 240), /* natural_size - figure this out. */
|
| + std::vector<uint8_t>(), /* extra_data - not used */
|
| + media::EncryptionScheme());
|
| + DCHECK(config.IsValidConfig()) << config.AsHumanReadableString();
|
| + decoder_->Initialize(
|
| + config, true /* low_delay */, nullptr /* cdm_context */,
|
| + base::Bind(&RTCVideoDecoder::OnDecoderInitialized,
|
| + base::Unretained(this), waiter),
|
| + base::Bind(&RTCVideoDecoder::OnFrameReady, base::Unretained(this)));
|
| vda_codec_profile_ = profile;
|
| }
|
| -
|
| - if (waiter)
|
| - waiter->Signal();
|
| -}
|
| -
|
| -void RTCVideoDecoder::DestroyTextures() {
|
| - DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
|
| -
|
| - // Not destroying PictureBuffers in |picture_buffers_at_display_| yet, since
|
| - // their textures may still be in use by the user of this RTCVideoDecoder.
|
| - for (const auto& picture_buffer_at_display : picture_buffers_at_display_)
|
| - assigned_picture_buffers_.erase(picture_buffer_at_display.first);
|
| -
|
| - for (const auto& assigned_picture_buffer : assigned_picture_buffers_)
|
| - factories_->DeleteTexture(assigned_picture_buffer.second.texture_ids()[0]);
|
| -
|
| - assigned_picture_buffers_.clear();
|
| }
|
|
|
| -void RTCVideoDecoder::DestroyVDA() {
|
| - DVLOG(2) << "DestroyVDA";
|
| - DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
|
| - if (vda_)
|
| - vda_.release()->Destroy();
|
| - DestroyTextures();
|
| -
|
| - base::AutoLock auto_lock(lock_);
|
| -
|
| - // Put the buffers back in case we restart the decoder.
|
| - for (auto& buffer : bitstream_buffers_in_decoder_)
|
| - PutSHM_Locked(std::move(buffer.second));
|
| - bitstream_buffers_in_decoder_.clear();
|
| -
|
| - state_ = UNINITIALIZED;
|
| -}
|
| +void RTCVideoDecoder::OnDecoderInitialized(base::WaitableEvent* waiter,
|
| + bool success) {
|
| + VLOG(0) << __func__;
|
| + DCheckDecoderTaskRunnerIsCurrent();
|
|
|
| -std::unique_ptr<base::SharedMemory> RTCVideoDecoder::GetSHM_Locked(
|
| - size_t min_size) {
|
| - // Reuse a SHM if possible.
|
| - if (!available_shm_segments_.empty() &&
|
| - available_shm_segments_.back()->mapped_size() >= min_size) {
|
| - std::unique_ptr<base::SharedMemory> buffer =
|
| - std::move(available_shm_segments_.back());
|
| - available_shm_segments_.pop_back();
|
| - return buffer;
|
| + // TODO(slan): Figure out how to handle this case better. For now, let's get
|
| + // it working.
|
| + if (!success) {
|
| + base::AutoLock lock(lock_);
|
| + state_ = DECODE_ERROR;
|
| }
|
|
|
| - if (available_shm_segments_.size() != num_shm_buffers_) {
|
| - // Either available_shm_segments_ is empty (and we already have some SHM
|
| - // buffers allocated), or the size of available segments is not large
|
| - // enough. In the former case we need to wait for buffers to be returned,
|
| - // in the latter we need to wait for all buffers to be returned to drop
|
| - // them and reallocate with a new size.
|
| - return NULL;
|
| - }
|
| + // // TODO(slan): Figure out how to handle this case better. For now, let's
|
| + // get
|
| + // // it working.
|
| + // CHECK(success);
|
|
|
| - if (num_shm_buffers_ != 0) {
|
| - available_shm_segments_.clear();
|
| - num_shm_buffers_ = 0;
|
| - }
|
| + // // Update the internal state of the decoder.
|
| + // {
|
| + // base::AutoLock lock(lock_);
|
| + // state_ = INITIALIZED;
|
| + // }
|
|
|
| - // Create twice as large buffers as required, to avoid frequent reallocation.
|
| - factories_->GetTaskRunner()->PostTask(
|
| - FROM_HERE,
|
| - base::Bind(&RTCVideoDecoder::CreateSHM, weak_factory_.GetWeakPtr(),
|
| - kNumSharedMemorySegments, min_size * 2));
|
| -
|
| - // We'll be called again after the shared memory is created.
|
| - return NULL;
|
| + // // Release the WebRTC thread, indicating we are ready to start decoding.
|
| + // if (waiter)
|
| + // waiter->Signal();
|
| }
|
|
|
| -void RTCVideoDecoder::PutSHM_Locked(
|
| - std::unique_ptr<base::SharedMemory> shm_buffer) {
|
| - lock_.AssertAcquired();
|
| - available_shm_segments_.push_back(std::move(shm_buffer));
|
| -}
|
| +void RTCVideoDecoder::OnFrameReady(
|
| + const scoped_refptr<media::VideoFrame>& frame) {
|
| + VLOG(0) << __func__;
|
| + DCheckDecoderTaskRunnerIsCurrent();
|
|
|
| -void RTCVideoDecoder::CreateSHM(size_t count, size_t size) {
|
| - DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
|
| - DVLOG(2) << "CreateSHM. count=" << count << ", size=" << size;
|
| -
|
| - for (size_t i = 0; i < count; i++) {
|
| - std::unique_ptr<base::SharedMemory> shm =
|
| - factories_->CreateSharedMemory(size);
|
| - if (!shm) {
|
| - LOG(ERROR) << "Failed allocating shared memory of size=" << size;
|
| - NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
|
| - return;
|
| - }
|
| + // DEBUG - Comment this in to make the stream all-black.
|
| + // frame = media::VideoFrame::CreateBlackFrame(frame->natural_size());
|
|
|
| + // Create a WebRTC video frame.
|
| + webrtc::VideoFrame decoded_image(
|
| + new rtc::RefCountedObject<WebRtcVideoFrameAdapter>(frame),
|
| + frame->timestamp().ToInternalValue(), 0, webrtc::kVideoRotation_0);
|
| +
|
| + // DEBUG_NOTE(slan): In the original code from PictureReady(), the code inside
|
| + // the lock looks something like this:
|
| + //
|
| + // // Invoke decode callback. WebRTC expects no callback after Release.
|
| + // if (IsBufferAfterReset(picture.bitstream_buffer_id(),
|
| + // reset_decoder_buffer_id_)) {
|
| + // decode_complete_callback_->Decoded(decoded_image);
|
| + // }
|
| + // decoder_error_counter_ = 0;
|
| + //
|
| + // This code makes sure that all frames that had been sent to the decoder
|
| + // service, but had not returned yet, are not called back to WebRTC. This
|
| + // *should* be handled by GpuVideoDecoder::DeliverFrame, which drops any
|
| + // frames that are returned while a VDA::Reset() call is pending. The VDA
|
| + // should also flush its pending frames when it recieves the Reset()
|
| + // command.
|
| +
|
| + // Lock and pass the frame up to the WebRTC client class.
|
| + {
|
| base::AutoLock auto_lock(lock_);
|
| - PutSHM_Locked(std::move(shm));
|
| - ++num_shm_buffers_;
|
| - }
|
| -
|
| - // Kick off the decoding.
|
| - RequestBufferDecode();
|
| -}
|
| -
|
| -void RTCVideoDecoder::RecordBufferData(const BufferData& buffer_data) {
|
| - input_buffer_data_.push_front(buffer_data);
|
| - // Why this value? Because why not. avformat.h:MAX_REORDER_DELAY is 16, but
|
| - // that's too small for some pathological B-frame test videos. The cost of
|
| - // using too-high a value is low (192 bits per extra slot).
|
| - static const size_t kMaxInputBufferDataSize = 128;
|
| - // Pop from the back of the list, because that's the oldest and least likely
|
| - // to be useful in the future data.
|
| - if (input_buffer_data_.size() > kMaxInputBufferDataSize)
|
| - input_buffer_data_.pop_back();
|
| -}
|
| + DCHECK(decode_complete_callback_);
|
| + decode_complete_callback_->Decoded(decoded_image);
|
|
|
| -void RTCVideoDecoder::GetBufferData(int32_t bitstream_buffer_id,
|
| - uint32_t* timestamp,
|
| - gfx::Rect* visible_rect) {
|
| - for (const auto& buffer_data : input_buffer_data_) {
|
| - if (buffer_data.bitstream_buffer_id != bitstream_buffer_id)
|
| - continue;
|
| - *timestamp = buffer_data.timestamp;
|
| - *visible_rect = buffer_data.visible_rect;
|
| - return;
|
| + // Reset error counter as we successfully decoded a frame.
|
| + decoder_error_counter_ = 0;
|
| }
|
| - NOTREACHED() << "Missing bitstream buffer id: " << bitstream_buffer_id;
|
| }
|
|
|
| int32_t RTCVideoDecoder::RecordInitDecodeUMA(int32_t status) {
|
| + VLOG(0) << __func__;
|
| // Logging boolean is enough to know if HW decoding has been used. Also,
|
| // InitDecode is less likely to return an error so enum is not used here.
|
| bool sample = (status == WEBRTC_VIDEO_CODEC_OK) ? true : false;
|
| @@ -878,15 +587,13 @@ int32_t RTCVideoDecoder::RecordInitDecodeUMA(int32_t status) {
|
| return status;
|
| }
|
|
|
| -void RTCVideoDecoder::DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent()
|
| - const {
|
| - DCHECK(factories_->GetTaskRunner()->BelongsToCurrentThread());
|
| +void RTCVideoDecoder::DCheckDecoderTaskRunnerIsCurrent() const {
|
| + DCHECK(decoder_task_runner_->BelongsToCurrentThread());
|
| }
|
|
|
| -void RTCVideoDecoder::ClearPendingBuffers() {
|
| - // Delete WebRTC input buffers.
|
| - for (const auto& pending_buffer : pending_buffers_)
|
| - delete[] pending_buffer.first._buffer;
|
| +void RTCVideoDecoder::ClearPendingBuffers_Locked() {
|
| + VLOG(0) << __func__;
|
| + lock_.AssertAcquired();
|
| pending_buffers_.clear();
|
| }
|
|
|
|
|