OLD | NEW |
---|---|
1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/renderer/media/rtc_video_decoder.h" | 5 #include "content/renderer/media/rtc_video_decoder.h" |
6 | 6 |
7 #include "base/bind.h" | 7 #include "base/bind.h" |
8 #include "base/logging.h" | 8 #include "base/logging.h" |
9 #include "base/memory/ref_counted.h" | 9 #include "base/memory/ref_counted.h" |
10 #include "base/message_loop/message_loop_proxy.h" | 10 #include "base/message_loop/message_loop_proxy.h" |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
54 const size_t size; | 54 const size_t size; |
55 }; | 55 }; |
56 | 56 |
57 RTCVideoDecoder::SHMBuffer::SHMBuffer(base::SharedMemory* shm, size_t size) | 57 RTCVideoDecoder::SHMBuffer::SHMBuffer(base::SharedMemory* shm, size_t size) |
58 : shm(shm), size(size) {} | 58 : shm(shm), size(size) {} |
59 | 59 |
60 RTCVideoDecoder::SHMBuffer::~SHMBuffer() { shm->Close(); } | 60 RTCVideoDecoder::SHMBuffer::~SHMBuffer() { shm->Close(); } |
61 | 61 |
62 RTCVideoDecoder::BufferData::BufferData(int32 bitstream_buffer_id, | 62 RTCVideoDecoder::BufferData::BufferData(int32 bitstream_buffer_id, |
63 uint32_t timestamp, | 63 uint32_t timestamp, |
64 int width, | |
65 int height, | |
66 size_t size) | 64 size_t size) |
67 : bitstream_buffer_id(bitstream_buffer_id), | 65 : bitstream_buffer_id(bitstream_buffer_id), |
68 timestamp(timestamp), | 66 timestamp(timestamp), |
69 width(width), | |
70 height(height), | |
71 size(size) {} | 67 size(size) {} |
72 | 68 |
73 RTCVideoDecoder::BufferData::BufferData() {} | 69 RTCVideoDecoder::BufferData::BufferData() {} |
74 | 70 |
75 RTCVideoDecoder::BufferData::~BufferData() {} | 71 RTCVideoDecoder::BufferData::~BufferData() {} |
76 | 72 |
77 RTCVideoDecoder::RTCVideoDecoder( | 73 RTCVideoDecoder::RTCVideoDecoder( |
78 const scoped_refptr<media::GpuVideoAcceleratorFactories>& factories) | 74 const scoped_refptr<media::GpuVideoAcceleratorFactories>& factories) |
79 : factories_(factories), | 75 : factories_(factories), |
80 decoder_texture_target_(0), | 76 decoder_texture_target_(0), |
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
193 DLOG(ERROR) << "Missing or incomplete frames."; | 189 DLOG(ERROR) << "Missing or incomplete frames."; |
194 // Unlike the SW decoder in libvpx, hw decoder cannot handle broken frames. | 190 // Unlike the SW decoder in libvpx, hw decoder cannot handle broken frames. |
195 // Return an error to request a key frame. | 191 // Return an error to request a key frame. |
196 return WEBRTC_VIDEO_CODEC_ERROR; | 192 return WEBRTC_VIDEO_CODEC_ERROR; |
197 } | 193 } |
198 | 194 |
199 // Most platforms' VDA implementations support mid-stream resolution change | 195 // Most platforms' VDA implementations support mid-stream resolution change |
200 // internally. Platforms whose VDAs fail to support mid-stream resolution | 196 // internally. Platforms whose VDAs fail to support mid-stream resolution |
201 // change gracefully need to have their clients cover for them, and we do that | 197 // change gracefully need to have their clients cover for them, and we do that |
202 // here. | 198 // here. |
199 // Note this may not work because encoded size is not always available. | |
Pawel Osciak
2014/08/12 08:50:40
Please actually explain why and what the consequen
kcwu
2014/08/13 14:27:19
I don't know what to say here. Could you advise?
Pawel Osciak
2014/08/14 07:15:36
Please say when encoded size is not available and
kcwu
2014/08/14 12:31:01
per offline chat, the original code already silent
| |
203 #ifdef ANDROID | 200 #ifdef ANDROID |
204 const bool kVDACanHandleMidstreamResize = false; | 201 const bool kVDACanHandleMidstreamResize = false; |
205 #else | 202 #else |
206 const bool kVDACanHandleMidstreamResize = true; | 203 const bool kVDACanHandleMidstreamResize = true; |
207 #endif | 204 #endif |
208 | 205 |
209 bool need_to_reset_for_midstream_resize = false; | 206 bool need_to_reset_for_midstream_resize = false; |
210 if (inputImage._frameType == webrtc::kKeyFrame) { | 207 if (inputImage._frameType == webrtc::kKeyFrame) { |
211 DVLOG(2) << "Got key frame. size=" << inputImage._encodedWidth << "x" | 208 if (inputImage._encodedWidth && inputImage._encodedHeight) { |
212 << inputImage._encodedHeight; | 209 DVLOG(2) << "Got key frame. size=" << inputImage._encodedWidth << "x" |
213 gfx::Size prev_frame_size = frame_size_; | 210 << inputImage._encodedHeight; |
214 frame_size_.SetSize(inputImage._encodedWidth, inputImage._encodedHeight); | 211 gfx::Size prev_frame_size = frame_size_; |
215 if (!kVDACanHandleMidstreamResize && !prev_frame_size.IsEmpty() && | 212 frame_size_.SetSize(inputImage._encodedWidth, inputImage._encodedHeight); |
216 prev_frame_size != frame_size_) { | 213 if (!kVDACanHandleMidstreamResize && !prev_frame_size.IsEmpty() && |
217 need_to_reset_for_midstream_resize = true; | 214 prev_frame_size != frame_size_) { |
215 need_to_reset_for_midstream_resize = true; | |
216 } | |
218 } | 217 } |
219 } else if (IsFirstBufferAfterReset(next_bitstream_buffer_id_, | 218 } else if (IsFirstBufferAfterReset(next_bitstream_buffer_id_, |
220 reset_bitstream_buffer_id_)) { | 219 reset_bitstream_buffer_id_)) { |
221 // TODO(wuchengli): VDA should handle it. Remove this when | 220 // TODO(wuchengli): VDA should handle it. Remove this when |
222 // http://crosbug.com/p/21913 is fixed. | 221 // http://crosbug.com/p/21913 is fixed. |
223 DVLOG(1) << "The first frame should be a key frame. Drop this."; | 222 DVLOG(1) << "The first frame should be a key frame. Drop this."; |
224 return WEBRTC_VIDEO_CODEC_ERROR; | 223 return WEBRTC_VIDEO_CODEC_ERROR; |
225 } | 224 } |
226 | 225 |
227 // Create buffer metadata. | 226 // Create buffer metadata. |
228 BufferData buffer_data(next_bitstream_buffer_id_, | 227 BufferData buffer_data(next_bitstream_buffer_id_, |
229 inputImage._timeStamp, | 228 inputImage._timeStamp, |
230 frame_size_.width(), | |
231 frame_size_.height(), | |
232 inputImage._length); | 229 inputImage._length); |
233 // Mask against 30 bits, to avoid (undefined) wraparound on signed integer. | 230 // Mask against 30 bits, to avoid (undefined) wraparound on signed integer. |
234 next_bitstream_buffer_id_ = (next_bitstream_buffer_id_ + 1) & ID_LAST; | 231 next_bitstream_buffer_id_ = (next_bitstream_buffer_id_ + 1) & ID_LAST; |
235 | 232 |
236 // If a shared memory segment is available, there are no pending buffers, and | 233 // If a shared memory segment is available, there are no pending buffers, and |
237 // this isn't a mid-stream resolution change, then send the buffer for decode | 234 // this isn't a mid-stream resolution change, then send the buffer for decode |
238 // immediately. Otherwise, save the buffer in the queue for later decode. | 235 // immediately. Otherwise, save the buffer in the queue for later decode. |
239 scoped_ptr<SHMBuffer> shm_buffer; | 236 scoped_ptr<SHMBuffer> shm_buffer; |
240 if (!need_to_reset_for_midstream_resize && pending_buffers_.size() == 0) | 237 if (!need_to_reset_for_midstream_resize && pending_buffers_.size() == 0) |
241 shm_buffer = GetSHM_Locked(inputImage._length); | 238 shm_buffer = GetSHM_Locked(inputImage._length); |
(...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
358 std::map<int32, media::PictureBuffer>::iterator it = | 355 std::map<int32, media::PictureBuffer>::iterator it = |
359 assigned_picture_buffers_.find(picture.picture_buffer_id()); | 356 assigned_picture_buffers_.find(picture.picture_buffer_id()); |
360 if (it == assigned_picture_buffers_.end()) { | 357 if (it == assigned_picture_buffers_.end()) { |
361 NOTREACHED() << "Missing picture buffer: " << picture.picture_buffer_id(); | 358 NOTREACHED() << "Missing picture buffer: " << picture.picture_buffer_id(); |
362 NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE); | 359 NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE); |
363 return; | 360 return; |
364 } | 361 } |
365 const media::PictureBuffer& pb = it->second; | 362 const media::PictureBuffer& pb = it->second; |
366 | 363 |
367 // Create a media::VideoFrame. | 364 // Create a media::VideoFrame. |
368 uint32_t timestamp = 0, width = 0, height = 0; | 365 uint32_t timestamp = 0; |
369 size_t size = 0; | 366 size_t size = 0; |
370 GetBufferData( | 367 GetBufferData(picture.bitstream_buffer_id(), ×tamp); |
371 picture.bitstream_buffer_id(), ×tamp, &width, &height, &size); | |
372 scoped_refptr<media::VideoFrame> frame = | 368 scoped_refptr<media::VideoFrame> frame = |
373 CreateVideoFrame(picture, pb, timestamp, width, height, size); | 369 CreateVideoFrame(picture, pb, timestamp, size); |
Pawel Osciak
2014/08/12 08:50:40
size is always zero... Looks like this variable wa
kcwu
2014/08/13 14:27:19
Done.
| |
374 bool inserted = | 370 bool inserted = |
375 picture_buffers_at_display_.insert(std::make_pair( | 371 picture_buffers_at_display_.insert(std::make_pair( |
376 picture.picture_buffer_id(), | 372 picture.picture_buffer_id(), |
377 pb.texture_id())).second; | 373 pb.texture_id())).second; |
378 DCHECK(inserted); | 374 DCHECK(inserted); |
379 | 375 |
380 // Create a WebRTC video frame. | 376 // Create a WebRTC video frame. |
381 webrtc::RefCountImpl<NativeHandleImpl>* handle = | 377 webrtc::RefCountImpl<NativeHandleImpl>* handle = |
382 new webrtc::RefCountImpl<NativeHandleImpl>(frame); | 378 new webrtc::RefCountImpl<NativeHandleImpl>(frame); |
383 webrtc::TextureVideoFrame decoded_image(handle, width, height, timestamp, 0); | 379 webrtc::TextureVideoFrame decoded_image( |
380 handle, picture.size().width(), picture.size().height(), timestamp, 0); | |
384 | 381 |
385 // Invoke decode callback. WebRTC expects no callback after Reset or Release. | 382 // Invoke decode callback. WebRTC expects no callback after Reset or Release. |
386 { | 383 { |
387 base::AutoLock auto_lock(lock_); | 384 base::AutoLock auto_lock(lock_); |
388 DCHECK(decode_complete_callback_ != NULL); | 385 DCHECK(decode_complete_callback_ != NULL); |
389 if (IsBufferAfterReset(picture.bitstream_buffer_id(), | 386 if (IsBufferAfterReset(picture.bitstream_buffer_id(), |
390 reset_bitstream_buffer_id_)) { | 387 reset_bitstream_buffer_id_)) { |
391 decode_complete_callback_->Decoded(decoded_image); | 388 decode_complete_callback_->Decoded(decoded_image); |
392 } | 389 } |
393 } | 390 } |
(...skipping 23 matching lines...) Expand all Loading... | |
417 pixels, | 414 pixels, |
418 &event))) | 415 &event))) |
419 return; | 416 return; |
420 event.Wait(); | 417 event.Wait(); |
421 } | 418 } |
422 | 419 |
423 scoped_refptr<media::VideoFrame> RTCVideoDecoder::CreateVideoFrame( | 420 scoped_refptr<media::VideoFrame> RTCVideoDecoder::CreateVideoFrame( |
424 const media::Picture& picture, | 421 const media::Picture& picture, |
425 const media::PictureBuffer& pb, | 422 const media::PictureBuffer& pb, |
426 uint32_t timestamp, | 423 uint32_t timestamp, |
427 uint32_t width, | |
428 uint32_t height, | |
429 size_t size) { | 424 size_t size) { |
430 gfx::Rect visible_rect(width, height); | 425 gfx::Rect visible_rect(picture.size()); |
431 DCHECK(decoder_texture_target_); | 426 DCHECK(decoder_texture_target_); |
432 // Convert timestamp from 90KHz to ms. | 427 // Convert timestamp from 90KHz to ms. |
433 base::TimeDelta timestamp_ms = base::TimeDelta::FromInternalValue( | 428 base::TimeDelta timestamp_ms = base::TimeDelta::FromInternalValue( |
434 base::checked_cast<uint64_t>(timestamp) * 1000 / 90); | 429 base::checked_cast<uint64_t>(timestamp) * 1000 / 90); |
435 return media::VideoFrame::WrapNativeTexture( | 430 return media::VideoFrame::WrapNativeTexture( |
436 make_scoped_ptr(new gpu::MailboxHolder( | 431 make_scoped_ptr(new gpu::MailboxHolder( |
437 pb.texture_mailbox(), decoder_texture_target_, 0)), | 432 pb.texture_mailbox(), decoder_texture_target_, 0)), |
438 media::BindToCurrentLoop(base::Bind(&RTCVideoDecoder::ReleaseMailbox, | 433 media::BindToCurrentLoop(base::Bind(&RTCVideoDecoder::ReleaseMailbox, |
439 weak_factory_.GetWeakPtr(), | 434 weak_factory_.GetWeakPtr(), |
440 factories_, | 435 factories_, |
(...skipping 329 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
770 // that's too small for some pathological B-frame test videos. The cost of | 765 // that's too small for some pathological B-frame test videos. The cost of |
771 // using too-high a value is low (192 bits per extra slot). | 766 // using too-high a value is low (192 bits per extra slot). |
772 static const size_t kMaxInputBufferDataSize = 128; | 767 static const size_t kMaxInputBufferDataSize = 128; |
773 // Pop from the back of the list, because that's the oldest and least likely | 768 // Pop from the back of the list, because that's the oldest and least likely |
774 // to be useful in the future data. | 769 // to be useful in the future data. |
775 if (input_buffer_data_.size() > kMaxInputBufferDataSize) | 770 if (input_buffer_data_.size() > kMaxInputBufferDataSize) |
776 input_buffer_data_.pop_back(); | 771 input_buffer_data_.pop_back(); |
777 } | 772 } |
778 | 773 |
779 void RTCVideoDecoder::GetBufferData(int32 bitstream_buffer_id, | 774 void RTCVideoDecoder::GetBufferData(int32 bitstream_buffer_id, |
780 uint32_t* timestamp, | 775 uint32_t* timestamp) { |
781 uint32_t* width, | |
782 uint32_t* height, | |
783 size_t* size) { | |
784 for (std::list<BufferData>::iterator it = input_buffer_data_.begin(); | 776 for (std::list<BufferData>::iterator it = input_buffer_data_.begin(); |
785 it != input_buffer_data_.end(); | 777 it != input_buffer_data_.end(); |
786 ++it) { | 778 ++it) { |
787 if (it->bitstream_buffer_id != bitstream_buffer_id) | 779 if (it->bitstream_buffer_id != bitstream_buffer_id) |
788 continue; | 780 continue; |
789 *timestamp = it->timestamp; | 781 *timestamp = it->timestamp; |
790 *width = it->width; | |
791 *height = it->height; | |
792 return; | 782 return; |
793 } | 783 } |
794 NOTREACHED() << "Missing bitstream buffer id: " << bitstream_buffer_id; | 784 NOTREACHED() << "Missing bitstream buffer id: " << bitstream_buffer_id; |
795 } | 785 } |
796 | 786 |
797 int32_t RTCVideoDecoder::RecordInitDecodeUMA(int32_t status) { | 787 int32_t RTCVideoDecoder::RecordInitDecodeUMA(int32_t status) { |
798 // Logging boolean is enough to know if HW decoding has been used. Also, | 788 // Logging boolean is enough to know if HW decoding has been used. Also, |
799 // InitDecode is less likely to return an error so enum is not used here. | 789 // InitDecode is less likely to return an error so enum is not used here. |
800 bool sample = (status == WEBRTC_VIDEO_CODEC_OK) ? true : false; | 790 bool sample = (status == WEBRTC_VIDEO_CODEC_OK) ? true : false; |
801 UMA_HISTOGRAM_BOOLEAN("Media.RTCVideoDecoderInitDecodeSuccess", sample); | 791 UMA_HISTOGRAM_BOOLEAN("Media.RTCVideoDecoderInitDecodeSuccess", sample); |
802 return status; | 792 return status; |
803 } | 793 } |
804 | 794 |
805 void RTCVideoDecoder::DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent() | 795 void RTCVideoDecoder::DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent() |
806 const { | 796 const { |
807 DCHECK(factories_->GetTaskRunner()->BelongsToCurrentThread()); | 797 DCHECK(factories_->GetTaskRunner()->BelongsToCurrentThread()); |
808 } | 798 } |
809 | 799 |
810 } // namespace content | 800 } // namespace content |
OLD | NEW |