OLD | NEW |
---|---|
(Empty) | |
1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "content/renderer/media/rtc_video_decoder.h" | |
6 | |
7 #include "base/bind.h" | |
8 #include "base/callback_helpers.h" | |
9 #include "base/logging.h" | |
10 #include "base/memory/ref_counted.h" | |
11 #include "base/message_loop_proxy.h" | |
12 #include "base/task_runner_util.h" | |
13 #include "content/renderer/media/native_handle_impl.h" | |
14 #include "media/base/audio_decoder_config.h" | |
15 #include "media/base/bind_to_loop.h" | |
16 #include "media/base/decoder_buffer.h" | |
17 #include "media/base/decoder_buffer_queue.h" | |
18 #include "media/base/video_decoder_config.h" | |
19 #include "third_party/webrtc/system_wrappers/interface/ref_count.h" | |
20 | |
21 namespace content { | |
22 | |
23 RTCVideoDecoder::RTCVideoDecoder( | |
24 const scoped_refptr<media::GpuVideoDecoder::Factories>& factories) | |
25 : decode_complete_callback_(NULL), | |
26 decoder_waiter_(false, false), | |
27 frame_width_(0), | |
28 frame_height_(0), | |
29 state_(kUninitialized), | |
30 weak_factory_(this), | |
31 factories_(factories), | |
32 vda_loop_proxy_(factories->GetMessageLoop()), | |
33 decoder_texture_target_(0), | |
34 next_picture_buffer_id_(0), | |
35 next_bitstream_buffer_id_(0) { | |
36 DCHECK(!vda_loop_proxy_->BelongsToCurrentThread()); | |
37 vda_loop_proxy_->PostTask( | |
38 FROM_HERE, | |
39 base::Bind(&RTCVideoDecoder::InitWeakPtr, base::Unretained(this))); | |
40 decoder_waiter_.Wait(); | |
41 } | |
42 | |
43 void RTCVideoDecoder::InitWeakPtr() { | |
44 DCHECK(vda_loop_proxy_->BelongsToCurrentThread()); | |
45 weak_this_ = weak_factory_.GetWeakPtr(); | |
46 decoder_waiter_.Signal(); | |
Ami GONE FROM CHROMIUM
2013/05/29 21:11:46
Just in case you missed it before:
What happens i
wuchengli
2013/06/10 12:33:42
I added DestructionObserver in this class.
| |
47 } | |
48 | |
49 RTCVideoDecoder::~RTCVideoDecoder() { | |
50 DCHECK(vda_loop_proxy_->BelongsToCurrentThread()); | |
51 DCHECK(!vda_.get()); // Stop should have been already called. | |
52 DVLOG(2) << "~RTCVideoDecoder"; | |
53 // Delete all shared memories. | |
54 for (size_t i = 0; i < available_shm_segments_.size(); ++i) { | |
55 available_shm_segments_[i]->shm->Close(); | |
56 delete available_shm_segments_[i]; | |
57 } | |
58 available_shm_segments_.clear(); | |
59 for (std::map<int32, SHMBuffer*>::iterator it = | |
60 bitstream_buffers_in_decoder_.begin(); | |
61 it != bitstream_buffers_in_decoder_.end(); ++it) { | |
62 it->second->shm->Close(); | |
63 } | |
64 bitstream_buffers_in_decoder_.clear(); | |
65 for (std::deque<std::pair<SHMBuffer*, BufferData> >::iterator it = | |
66 buffers_to_be_decoded.begin(); | |
67 it != buffers_to_be_decoded.end(); ++it) { | |
68 it->first->shm->Close(); | |
69 } | |
70 buffers_to_be_decoded.clear(); | |
71 | |
72 DestroyTextures(); | |
73 } | |
74 | |
75 bool RTCVideoDecoder::Initialize(webrtc::VideoCodecType type) { | |
76 DCHECK(!vda_loop_proxy_->BelongsToCurrentThread()); | |
77 // Convert WebRTC codec type to media codec profile. | |
78 media::VideoCodecProfile profile; | |
79 switch (type) { | |
80 case webrtc::kVideoCodecVP8: | |
81 profile = media::VP8PROFILE_MAIN; | |
82 break; | |
83 default: | |
84 DVLOG(2) << "Video codec not supported:" << type; | |
85 return false; | |
86 } | |
87 vda_loop_proxy_->PostTask( | |
88 FROM_HERE, | |
89 base::Bind(&RTCVideoDecoder::CreateVideoDecodeAccelerator, | |
90 base::Unretained(this), profile)); | |
91 decoder_waiter_.Wait(); | |
92 return (vda_ != NULL); | |
93 } | |
94 | |
95 void RTCVideoDecoder::CreateVideoDecodeAccelerator( | |
96 media::VideoCodecProfile profile) { | |
97 DCHECK(vda_loop_proxy_->BelongsToCurrentThread()); | |
98 DVLOG(3) << "CreateVideoDecodeAccelerator"; | |
99 media::VideoDecodeAccelerator* vda = | |
100 factories_->CreateVideoDecodeAccelerator(profile, this); | |
101 // vda can be NULL if the codec type is not supported. | |
102 vda_.reset(vda); | |
103 | |
104 base::AutoLock auto_lock(lock_); | |
105 state_ = kInitialized; | |
106 decoder_waiter_.Signal(); | |
107 } | |
108 | |
109 int32_t RTCVideoDecoder::InitDecode( | |
110 const webrtc::VideoCodec* codecSettings, | |
111 int32_t /*numberOfCores*/) { | |
112 DVLOG(2) << "InitDecode"; | |
113 DCHECK_EQ(codecSettings->codecType, webrtc::kVideoCodecVP8); | |
114 if (codecSettings->codecSpecific.VP8.feedbackModeOn) { | |
115 LOG(ERROR) << "Feedback mode not supported"; | |
116 return WEBRTC_VIDEO_CODEC_ERROR; | |
117 } | |
118 return WEBRTC_VIDEO_CODEC_OK; | |
119 } | |
120 | |
121 int32_t RTCVideoDecoder::Decode( | |
122 const webrtc::EncodedImage& inputImage, | |
123 bool missingFrames, | |
124 const webrtc::RTPFragmentationHeader* /*fragmentation*/, | |
125 const webrtc::CodecSpecificInfo* /*codecSpecificInfo*/, | |
126 int64_t /*renderTimeMs*/) { | |
127 DVLOG(3) << "Decode"; | |
128 | |
129 { | |
130 base::AutoLock auto_lock(lock_); | |
131 if (state_ == kUninitialized || decode_complete_callback_ == NULL) { | |
132 LOG(ERROR) << "The decoder has not initialized."; | |
133 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | |
134 } | |
135 if (state_ == kDecodeError) { | |
136 LOG(ERROR) << "Decoding error occurred."; | |
137 return WEBRTC_VIDEO_CODEC_ERROR; | |
138 } | |
139 } | |
140 if (missingFrames || !inputImage._completeFrame) { | |
141 DLOG(ERROR) << "Missing or incomplete frames."; | |
142 // Unlike the SW decoder in libvpx, hw decoder cannot handle broken frames. | |
143 // Return an error to request a key frame. | |
144 return WEBRTC_VIDEO_CODEC_ERROR; | |
145 } | |
146 | |
147 if (inputImage._frameType == webrtc::kKeyFrame) { | |
148 frame_width_ = inputImage._encodedWidth; | |
149 frame_height_ = inputImage._encodedHeight; | |
150 } | |
151 | |
152 // Copy WebRTC buffer to SHM buffer and create buffer data. | |
153 SHMBuffer* shm_buffer = GetSHM(inputImage._length); | |
154 if (!shm_buffer) | |
155 return WEBRTC_VIDEO_CODEC_ERROR; | |
156 memcpy(shm_buffer->shm->memory(), inputImage._buffer, inputImage._length); | |
157 BufferData buffer_data(next_bitstream_buffer_id_, inputImage._timeStamp, | |
158 frame_width_, frame_height_, inputImage._length); | |
159 // Mask against 30 bits, to avoid (undefined) wraparound on signed integer. | |
160 next_bitstream_buffer_id_ = (next_bitstream_buffer_id_ + 1) & 0x3FFFFFFF; | |
161 std::pair<SHMBuffer*, BufferData> buffer_pair | |
162 = std::make_pair(shm_buffer, buffer_data); | |
163 | |
164 // Store the buffer and the data to the queue. | |
165 { | |
166 base::AutoLock auto_lock(lock_); | |
167 buffers_to_be_decoded.push_back(buffer_pair); | |
168 } | |
169 | |
170 vda_loop_proxy_->PostTask(FROM_HERE, base::Bind( | |
171 &RTCVideoDecoder::RequestBufferDecode, weak_this_)); | |
172 | |
173 return WEBRTC_VIDEO_CODEC_OK; | |
174 } | |
175 | |
176 // Maximum number of concurrent VDA::Decode() operations GVD will maintain. | |
177 // Higher values allow better pipelining in the GPU, but also require more | |
178 // resources. | |
179 enum { kMaxInFlightDecodes = 4 }; | |
180 | |
181 bool RTCVideoDecoder::CanMoreDecodeWorkBeDone() { | |
182 return bitstream_buffers_in_decoder_.size() < kMaxInFlightDecodes; | |
183 } | |
184 | |
185 void RTCVideoDecoder::RequestBufferDecode() { | |
186 if(!CanMoreDecodeWorkBeDone()) | |
187 return; | |
188 | |
189 // Get a buffer and data from the queue. | |
190 std::pair<SHMBuffer*, BufferData> *buffer_pair; | |
191 { | |
192 base::AutoLock auto_lock(lock_); | |
193 if (buffers_to_be_decoded.size() == 0) | |
194 return; | |
195 buffer_pair = &buffers_to_be_decoded.front(); | |
196 buffers_to_be_decoded.pop_front(); | |
197 } | |
198 SHMBuffer* shm_buffer = buffer_pair->first; | |
199 BufferData buffer_data = buffer_pair->second; | |
200 | |
201 // Create a BitstreamBuffer and send to VDA to decode. | |
202 media::BitstreamBuffer bitstream_buffer( | |
203 buffer_data.bitstream_buffer_id, shm_buffer->shm->handle(), | |
204 buffer_data.size); | |
205 bool inserted = bitstream_buffers_in_decoder_.insert(std::make_pair( | |
206 bitstream_buffer.id(), shm_buffer)).second; | |
207 DCHECK(inserted); | |
208 RecordBufferData(buffer_data); | |
209 vda_->Decode(bitstream_buffer); | |
210 } | |
211 | |
212 // Size of shared-memory segments we allocate. Since we reuse them we let them | |
213 // be on the beefy side. | |
214 static const size_t kSharedMemorySegmentBytes = 100 << 10; | |
215 | |
216 RTCVideoDecoder::SHMBuffer* RTCVideoDecoder::GetSHM(size_t min_size) { | |
217 { | |
218 // Reuse a SHM if possible. | |
219 base::AutoLock auto_lock(lock_); | |
220 if (!available_shm_segments_.empty() && | |
221 available_shm_segments_.back()->size >= min_size) { | |
222 SHMBuffer* ret = available_shm_segments_.back(); | |
223 available_shm_segments_.pop_back(); | |
224 return ret; | |
225 } | |
226 } | |
227 // Create a new shared memory. This is done in main thread. | |
228 size_t size_to_allocate = std::max(min_size, kSharedMemorySegmentBytes); | |
229 base::SharedMemory *shm = factories_->CreateSharedMemory(size_to_allocate); | |
wuchengli
2013/05/28 15:01:00
I cannot write more tests without adding a mock fo
| |
230 if (!shm) | |
231 return NULL; | |
232 return new SHMBuffer(shm, size_to_allocate); | |
233 } | |
234 | |
235 void RTCVideoDecoder::PutSHM(SHMBuffer* shm_buffer) { | |
236 DCHECK(vda_loop_proxy_->BelongsToCurrentThread()); | |
237 base::AutoLock auto_lock(lock_); | |
238 available_shm_segments_.push_back(shm_buffer); | |
239 } | |
240 | |
241 int32_t RTCVideoDecoder::RegisterDecodeCompleteCallback( | |
242 webrtc::DecodedImageCallback* callback) { | |
243 base::AutoLock auto_lock(lock_); | |
244 decode_complete_callback_ = callback; | |
245 return WEBRTC_VIDEO_CODEC_OK; | |
246 } | |
247 | |
248 int32_t RTCVideoDecoder::Release() { | |
249 DVLOG(2) << "Release"; | |
250 vda_loop_proxy_->PostTask( | |
251 FROM_HERE, | |
252 base::Bind(&RTCVideoDecoder::Destroy, weak_this_)); | |
253 decoder_waiter_.Wait(); | |
254 return WEBRTC_VIDEO_CODEC_OK; | |
255 } | |
256 | |
257 int32_t RTCVideoDecoder::Reset() { | |
258 DVLOG(2) << "Reset"; | |
259 { | |
260 base::AutoLock auto_lock(lock_); | |
261 if (state_ == kUninitialized) { | |
262 LOG(ERROR) << "Decoder not initialized."; | |
263 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | |
264 } | |
265 } | |
266 vda_loop_proxy_->PostTask(FROM_HERE, base::Bind( | |
267 &RTCVideoDecoder::ResetInternal, weak_this_)); | |
268 decoder_waiter_.Wait(); | |
269 return WEBRTC_VIDEO_CODEC_OK; | |
270 } | |
271 | |
272 void RTCVideoDecoder::Destroy() { | |
273 DCHECK(vda_loop_proxy_->BelongsToCurrentThread()); | |
274 DVLOG(2) << "Destroy"; | |
275 | |
276 if (vda_) | |
277 DestroyVDA(); | |
278 | |
279 base::AutoLock auto_lock(lock_); | |
280 state_ = kUninitialized; | |
281 decoder_waiter_.Signal(); | |
282 } | |
283 | |
284 void RTCVideoDecoder::ResetInternal() { | |
285 DCHECK(vda_loop_proxy_->BelongsToCurrentThread()); | |
286 vda_->Reset(); | |
287 } | |
288 | |
289 void RTCVideoDecoder::NotifyInitializeDone() { | |
290 DVLOG(2) << "NotifyInitializeDone"; | |
291 NOTREACHED(); | |
292 } | |
293 | |
294 void RTCVideoDecoder::ProvidePictureBuffers(uint32 count, | |
295 const gfx::Size& size, | |
296 uint32 texture_target) { | |
297 DCHECK(vda_loop_proxy_->BelongsToCurrentThread()); | |
298 DVLOG(3) << "ProvidePictureBuffers. texture_target=" << texture_target; | |
299 std::vector<uint32> texture_ids; | |
300 decoder_texture_target_ = texture_target; | |
301 if (!factories_->CreateTextures( | |
302 count, size, &texture_ids, decoder_texture_target_)) { | |
303 NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE); | |
304 return; | |
305 } | |
306 | |
307 if (!vda_) { | |
308 LOG(ERROR) << "vda is NULL"; | |
309 return; | |
310 } | |
311 | |
312 std::vector<media::PictureBuffer> picture_buffers; | |
313 for (size_t i = 0; i < texture_ids.size(); ++i) { | |
314 picture_buffers.push_back(media::PictureBuffer( | |
315 next_picture_buffer_id_++, size, texture_ids[i])); | |
316 bool inserted = picture_buffers_in_decoder_.insert(std::make_pair( | |
317 picture_buffers.back().id(), picture_buffers.back())).second; | |
318 DCHECK(inserted); | |
319 } | |
320 vda_->AssignPictureBuffers(picture_buffers); | |
321 } | |
322 | |
323 void RTCVideoDecoder::DismissPictureBuffer(int32 id) { | |
324 DVLOG(3) << "DismissPictureBuffer. id=" << id; | |
325 DCHECK(vda_loop_proxy_->BelongsToCurrentThread()); | |
326 | |
327 std::map<int32, media::PictureBuffer>::iterator it = | |
328 picture_buffers_in_decoder_.find(id); | |
329 if (it == picture_buffers_in_decoder_.end()) { | |
330 NOTREACHED() << "Missing picture buffer: " << id; | |
331 return; | |
332 } | |
333 factories_->DeleteTexture(it->second.texture_id()); | |
334 picture_buffers_in_decoder_.erase(it); | |
335 } | |
336 | |
337 void RTCVideoDecoder::PictureReady(const media::Picture& picture) { | |
338 DVLOG(3) << "PictureReady"; | |
339 DCHECK(vda_loop_proxy_->BelongsToCurrentThread()); | |
340 | |
341 std::map<int32, media::PictureBuffer>::iterator it = | |
342 picture_buffers_in_decoder_.find(picture.picture_buffer_id()); | |
343 if (it == picture_buffers_in_decoder_.end()) { | |
344 NOTREACHED() << "Missing picture buffer: " << picture.picture_buffer_id(); | |
345 NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE); | |
346 return; | |
347 } | |
348 const media::PictureBuffer& pb = it->second; | |
349 | |
350 // Create a media::VideoFrame. | |
351 uint32_t timestamp = 0; | |
352 uint32_t width = 0, height = 0; | |
353 size_t size = 0; | |
354 GetBufferData(picture.bitstream_buffer_id(), ×tamp, &width, &height, | |
355 &size); | |
356 gfx::Rect visible_rect(width, height); | |
357 gfx::Size natural_size(width, height); | |
358 DCHECK(decoder_texture_target_); | |
359 base::TimeDelta timestamp_ms = base::TimeDelta::FromInternalValue( | |
360 (uint64_t)timestamp * 1000 / 90); | |
361 scoped_refptr<media::VideoFrame> frame( | |
362 media::VideoFrame::WrapNativeTexture( | |
363 pb.texture_id(), decoder_texture_target_, pb.size(), visible_rect, | |
364 natural_size, timestamp_ms, | |
365 base::Bind(&media::GpuVideoDecoder::Factories::ReadPixels, factories_, | |
366 pb.texture_id(), decoder_texture_target_, | |
367 gfx::Size(visible_rect.width(), visible_rect.height())), | |
368 media::BindToCurrentLoop(base::Bind( | |
369 &RTCVideoDecoder::ReusePictureBuffer, weak_this_, | |
370 picture.picture_buffer_id())))); | |
371 | |
372 // Create a webrtc::I420VideoFrame. | |
373 gfx::Rect rect = frame->visible_rect(); | |
374 webrtc::I420VideoFrame decoded_image; | |
375 decoded_image.CreateEmptyFrame( | |
376 rect.width(), rect.height(), | |
377 rect.width(), rect.width() / 2, rect.width() / 2); | |
378 webrtc::RefCountImpl<NativeHandleImpl>* handle = | |
379 new webrtc::RefCountImpl<NativeHandleImpl>(); | |
380 handle->SetHandle(frame.get()); | |
381 decoded_image.set_native_handle(handle); | |
382 decoded_image.set_timestamp(timestamp); | |
383 | |
384 // Send to decode callback. | |
385 webrtc::DecodedImageCallback *callback; | |
386 { | |
387 base::AutoLock auto_lock(lock_); | |
388 callback = decode_complete_callback_; | |
389 } | |
390 callback->Decoded(decoded_image); | |
391 } | |
392 | |
393 void RTCVideoDecoder::ReusePictureBuffer(int64 picture_buffer_id) { | |
394 DCHECK(vda_loop_proxy_->BelongsToCurrentThread()); | |
395 DVLOG(3) << "ReusePictureBuffer. id=" << picture_buffer_id; | |
396 | |
397 if (!vda_) | |
398 return; | |
399 vda_->ReusePictureBuffer(picture_buffer_id); | |
400 } | |
401 | |
402 void RTCVideoDecoder::NotifyEndOfBitstreamBuffer(int32 id) { | |
403 DVLOG(3) << "NotifyEndOfBitstreamBuffer. id=" << id; | |
404 DCHECK(vda_loop_proxy_->BelongsToCurrentThread()); | |
405 | |
406 std::map<int32, SHMBuffer*>::iterator it = | |
407 bitstream_buffers_in_decoder_.find(id); | |
408 if (it == bitstream_buffers_in_decoder_.end()) { | |
409 NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE); | |
410 NOTREACHED() << "Missing bitstream buffer: " << id; | |
411 return; | |
412 } | |
413 | |
414 PutSHM(it->second); | |
415 bitstream_buffers_in_decoder_.erase(it); | |
416 | |
417 RequestBufferDecode(); | |
418 } | |
419 | |
420 void RTCVideoDecoder::NotifyFlushDone() { | |
421 DVLOG(3) << "NotifyFlushDone"; | |
422 } | |
423 | |
424 void RTCVideoDecoder::NotifyResetDone() { | |
425 DCHECK(vda_loop_proxy_->BelongsToCurrentThread()); | |
426 DVLOG(3) << "NotifyResetDone"; | |
427 | |
428 base::AutoLock auto_lock(lock_); | |
429 state_ = kInitialized; | |
430 decoder_waiter_.Signal(); | |
431 } | |
432 | |
433 void RTCVideoDecoder::NotifyError(media::VideoDecodeAccelerator::Error error) { | |
434 DCHECK(vda_loop_proxy_->BelongsToCurrentThread()); | |
435 if (!vda_) | |
436 return; | |
437 | |
438 DLOG(ERROR) << "VDA Error:" << error; | |
439 DestroyVDA(); | |
440 | |
441 base::AutoLock auto_lock(lock_); | |
442 state_ = kDecodeError; | |
443 } | |
444 | |
445 void RTCVideoDecoder::DestroyTextures() { | |
446 DCHECK(vda_loop_proxy_->BelongsToCurrentThread()); | |
447 for (std::map<int32, media::PictureBuffer>::iterator it = | |
448 picture_buffers_in_decoder_.begin(); | |
449 it != picture_buffers_in_decoder_.end(); ++it) { | |
450 factories_->DeleteTexture(it->second.texture_id()); | |
451 } | |
452 picture_buffers_in_decoder_.clear(); | |
453 } | |
454 | |
455 void RTCVideoDecoder::DestroyVDA() { | |
456 DVLOG(2) << "DestroyVDA"; | |
457 DCHECK(vda_loop_proxy_->BelongsToCurrentThread()); | |
458 if (vda_) | |
459 vda_.release()->Destroy(); | |
460 DestroyTextures(); | |
461 } | |
462 | |
463 void RTCVideoDecoder::RecordBufferData(const BufferData& buffer_data) { | |
464 input_buffer_data_.push_front(buffer_data); | |
465 // Why this value? Because why not. avformat.h:MAX_REORDER_DELAY is 16, but | |
466 // that's too small for some pathological B-frame test videos. The cost of | |
467 // using too-high a value is low (192 bits per extra slot). | |
468 static const size_t kMaxInputBufferDataSize = 128; | |
469 // Pop from the back of the list, because that's the oldest and least likely | |
470 // to be useful in the future data. | |
471 if (input_buffer_data_.size() > kMaxInputBufferDataSize) | |
472 input_buffer_data_.pop_back(); | |
473 } | |
474 | |
475 void RTCVideoDecoder::GetBufferData( | |
476 int32 id, uint32_t* timestamp, uint32_t* width, uint32_t* height, | |
477 size_t *size) { | |
478 for (std::list<BufferData>::const_iterator it = | |
479 input_buffer_data_.begin(); it != input_buffer_data_.end(); | |
480 ++it) { | |
481 if (it->bitstream_buffer_id != id) | |
482 continue; | |
483 *timestamp = it->timestamp; | |
484 *width = it->width; | |
485 *height = it->height; | |
486 return; | |
487 } | |
488 NOTREACHED() << "Missing bitstreambuffer id: " << id; | |
489 } | |
490 | |
491 RTCVideoDecoder::SHMBuffer::SHMBuffer(base::SharedMemory* m, size_t s) | |
492 : shm(m), size(s) { | |
493 } | |
494 | |
495 RTCVideoDecoder::SHMBuffer::~SHMBuffer() {} | |
496 | |
497 RTCVideoDecoder::BufferData::BufferData( | |
498 int32 bbid, uint32_t ts, int w, int h, size_t s) | |
499 : bitstream_buffer_id(bbid), timestamp(ts), width(w), | |
500 height(h), size(s) { | |
501 } | |
502 | |
503 RTCVideoDecoder::BufferData::~BufferData() {} | |
504 | |
505 } // namespace content | |
OLD | NEW |