OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/filters/opus_audio_decoder.h" | 5 #include "media/filters/opus_audio_decoder.h" |
6 | 6 |
7 #include <cmath> | 7 #include <cmath> |
8 | 8 |
9 #include "base/single_thread_task_runner.h" | 9 #include "base/single_thread_task_runner.h" |
10 #include "base/sys_byteorder.h" | 10 #include "base/sys_byteorder.h" |
11 #include "media/base/audio_buffer.h" | 11 #include "media/base/audio_buffer.h" |
12 #include "media/base/audio_decoder_config.h" | 12 #include "media/base/audio_decoder_config.h" |
13 #include "media/base/audio_timestamp_helper.h" | 13 #include "media/base/audio_discard_helper.h" |
14 #include "media/base/bind_to_current_loop.h" | 14 #include "media/base/bind_to_current_loop.h" |
15 #include "media/base/buffers.h" | 15 #include "media/base/buffers.h" |
16 #include "media/base/decoder_buffer.h" | 16 #include "media/base/decoder_buffer.h" |
17 #include "third_party/opus/src/include/opus.h" | 17 #include "third_party/opus/src/include/opus.h" |
18 #include "third_party/opus/src/include/opus_multistream.h" | 18 #include "third_party/opus/src/include/opus_multistream.h" |
19 | 19 |
20 namespace media { | 20 namespace media { |
21 | 21 |
22 static uint16 ReadLE16(const uint8* data, size_t data_size, int read_offset) { | 22 static uint16 ReadLE16(const uint8* data, size_t data_size, int read_offset) { |
23 uint16 value = 0; | 23 uint16 value = 0; |
24 DCHECK_LE(read_offset + sizeof(value), data_size); | 24 DCHECK_LE(read_offset + sizeof(value), data_size); |
25 memcpy(&value, data + read_offset, sizeof(value)); | 25 memcpy(&value, data + read_offset, sizeof(value)); |
26 return base::ByteSwapToLE16(value); | 26 return base::ByteSwapToLE16(value); |
27 } | 27 } |
28 | 28 |
29 static int TimeDeltaToAudioFrames(base::TimeDelta time_delta, | |
30 int frame_rate) { | |
31 return std::ceil(time_delta.InSecondsF() * frame_rate); | |
32 } | |
33 | |
34 // The Opus specification is part of IETF RFC 6716: | 29 // The Opus specification is part of IETF RFC 6716: |
35 // http://tools.ietf.org/html/rfc6716 | 30 // http://tools.ietf.org/html/rfc6716 |
36 | 31 |
37 // Opus uses Vorbis channel mapping, and Vorbis channel mapping specifies | 32 // Opus uses Vorbis channel mapping, and Vorbis channel mapping specifies |
38 // mappings for up to 8 channels. This information is part of the Vorbis I | 33 // mappings for up to 8 channels. This information is part of the Vorbis I |
39 // Specification: | 34 // Specification: |
40 // http://www.xiph.org/vorbis/doc/Vorbis_I_spec.html | 35 // http://www.xiph.org/vorbis/doc/Vorbis_I_spec.html |
41 static const int kMaxVorbisChannels = 8; | 36 static const int kMaxVorbisChannels = 8; |
42 | 37 |
43 // Maximum packet size used in Xiph's opusdec and FFmpeg's libopusdec. | 38 // Maximum packet size used in Xiph's opusdec and FFmpeg's libopusdec. |
(...skipping 200 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
244 | 239 |
245 for (int i = 0; i < extra_data->channels; ++i) | 240 for (int i = 0; i < extra_data->channels; ++i) |
246 extra_data->stream_map[i] = *(data + kOpusExtraDataStreamMapOffset + i); | 241 extra_data->stream_map[i] = *(data + kOpusExtraDataStreamMapOffset + i); |
247 return true; | 242 return true; |
248 } | 243 } |
249 | 244 |
250 OpusAudioDecoder::OpusAudioDecoder( | 245 OpusAudioDecoder::OpusAudioDecoder( |
251 const scoped_refptr<base::SingleThreadTaskRunner>& task_runner) | 246 const scoped_refptr<base::SingleThreadTaskRunner>& task_runner) |
252 : task_runner_(task_runner), | 247 : task_runner_(task_runner), |
253 opus_decoder_(NULL), | 248 opus_decoder_(NULL), |
254 last_input_timestamp_(kNoTimestamp()), | |
255 frames_to_discard_(0), | |
256 start_input_timestamp_(kNoTimestamp()) {} | 249 start_input_timestamp_(kNoTimestamp()) {} |
257 | 250 |
258 void OpusAudioDecoder::Initialize(const AudioDecoderConfig& config, | 251 void OpusAudioDecoder::Initialize(const AudioDecoderConfig& config, |
259 const PipelineStatusCB& status_cb) { | 252 const PipelineStatusCB& status_cb) { |
260 DCHECK(task_runner_->BelongsToCurrentThread()); | 253 DCHECK(task_runner_->BelongsToCurrentThread()); |
261 PipelineStatusCB initialize_cb = BindToCurrentLoop(status_cb); | 254 PipelineStatusCB initialize_cb = BindToCurrentLoop(status_cb); |
262 | 255 |
263 config_ = config; | 256 config_ = config; |
264 | 257 |
265 if (!ConfigureDecoder()) { | 258 if (!ConfigureDecoder()) { |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
306 | 299 |
307 // Libopus does not buffer output. Decoding is complete when an end of stream | 300 // Libopus does not buffer output. Decoding is complete when an end of stream |
308 // input buffer is received. | 301 // input buffer is received. |
309 if (input->end_of_stream()) { | 302 if (input->end_of_stream()) { |
310 decode_cb.Run(kOk, AudioBuffer::CreateEOSBuffer()); | 303 decode_cb.Run(kOk, AudioBuffer::CreateEOSBuffer()); |
311 return; | 304 return; |
312 } | 305 } |
313 | 306 |
314 // Make sure we are notified if http://crbug.com/49709 returns. Issue also | 307 // Make sure we are notified if http://crbug.com/49709 returns. Issue also |
315 // occurs with some damaged files. | 308 // occurs with some damaged files. |
316 if (input->timestamp() == kNoTimestamp() && | 309 if (input->timestamp() == kNoTimestamp()) { |
wolenetz
2014/04/28 21:52:07
Ditto of similar question in ffmpeg_audio_decoder.
DaleCurtis
2014/04/28 22:03:51
Ditto. This was copy pasted from the other decoder
| |
317 output_timestamp_helper_->base_timestamp() == kNoTimestamp()) { | |
318 DLOG(ERROR) << "Received a buffer without timestamps!"; | 310 DLOG(ERROR) << "Received a buffer without timestamps!"; |
319 decode_cb.Run(kDecodeError, NULL); | 311 decode_cb.Run(kDecodeError, NULL); |
320 return; | 312 return; |
321 } | 313 } |
322 | 314 |
323 if (last_input_timestamp_ != kNoTimestamp() && | |
324 input->timestamp() != kNoTimestamp() && | |
325 input->timestamp() < last_input_timestamp_) { | |
326 base::TimeDelta diff = input->timestamp() - last_input_timestamp_; | |
327 DLOG(ERROR) << "Input timestamps are not monotonically increasing! " | |
328 << " ts " << input->timestamp().InMicroseconds() << " us" | |
329 << " diff " << diff.InMicroseconds() << " us"; | |
330 decode_cb.Run(kDecodeError, NULL); | |
331 return; | |
332 } | |
333 | |
334 // Apply the necessary codec delay. | 315 // Apply the necessary codec delay. |
wolenetz
2014/04/28 21:52:07
Versus ffmpeg audio decoder, why do we wait until
DaleCurtis
2014/04/28 22:03:51
Because it should only be applied when we see star
| |
335 if (start_input_timestamp_ == kNoTimestamp()) | 316 if (start_input_timestamp_ == kNoTimestamp()) |
336 start_input_timestamp_ = input->timestamp(); | 317 start_input_timestamp_ = input->timestamp(); |
337 if (last_input_timestamp_ == kNoTimestamp() && | 318 if (!discard_helper_->initialized() && |
338 input->timestamp() == start_input_timestamp_) { | 319 input->timestamp() == start_input_timestamp_) { |
339 frames_to_discard_ = config_.codec_delay(); | 320 discard_helper_->Reset(config_.codec_delay()); |
wolenetz
2014/04/28 21:52:07
Is this correctly ignoring possible config_.seek_p
DaleCurtis
2014/04/28 22:03:51
Yes it is ignoring that value when we are at the s
| |
340 } | 321 } |
341 | 322 |
342 last_input_timestamp_ = input->timestamp(); | |
343 | |
344 scoped_refptr<AudioBuffer> output_buffer; | 323 scoped_refptr<AudioBuffer> output_buffer; |
345 | 324 |
346 if (!Decode(input, &output_buffer)) { | 325 if (!Decode(input, &output_buffer)) { |
347 decode_cb.Run(kDecodeError, NULL); | 326 decode_cb.Run(kDecodeError, NULL); |
348 return; | 327 return; |
349 } | 328 } |
350 | 329 |
351 if (output_buffer.get()) { | 330 if (output_buffer.get()) { |
352 // Execute callback to return the decoded audio. | 331 // Execute callback to return the decoded audio. |
353 decode_cb.Run(kOk, output_buffer); | 332 decode_cb.Run(kOk, output_buffer); |
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
428 } | 407 } |
429 | 408 |
430 status = opus_multistream_decoder_ctl( | 409 status = opus_multistream_decoder_ctl( |
431 opus_decoder_, OPUS_SET_GAIN(opus_extra_data.gain_db)); | 410 opus_decoder_, OPUS_SET_GAIN(opus_extra_data.gain_db)); |
432 if (status != OPUS_OK) { | 411 if (status != OPUS_OK) { |
433 DLOG(ERROR) << "Failed to set OPUS header gain; status=" | 412 DLOG(ERROR) << "Failed to set OPUS header gain; status=" |
434 << opus_strerror(status); | 413 << opus_strerror(status); |
435 return false; | 414 return false; |
436 } | 415 } |
437 | 416 |
438 output_timestamp_helper_.reset( | 417 discard_helper_.reset( |
439 new AudioTimestampHelper(config_.samples_per_second())); | 418 new AudioDiscardHelper(config_.samples_per_second())); |
440 start_input_timestamp_ = kNoTimestamp(); | 419 start_input_timestamp_ = kNoTimestamp(); |
441 return true; | 420 return true; |
442 } | 421 } |
443 | 422 |
444 void OpusAudioDecoder::CloseDecoder() { | 423 void OpusAudioDecoder::CloseDecoder() { |
445 if (opus_decoder_) { | 424 if (opus_decoder_) { |
446 opus_multistream_decoder_destroy(opus_decoder_); | 425 opus_multistream_decoder_destroy(opus_decoder_); |
447 opus_decoder_ = NULL; | 426 opus_decoder_ = NULL; |
448 } | 427 } |
449 } | 428 } |
450 | 429 |
451 void OpusAudioDecoder::ResetTimestampState() { | 430 void OpusAudioDecoder::ResetTimestampState() { |
452 output_timestamp_helper_->SetBaseTimestamp(kNoTimestamp()); | 431 discard_helper_->Reset( |
453 last_input_timestamp_ = kNoTimestamp(); | 432 discard_helper_->TimeDeltaToFrames(config_.seek_preroll())); |
454 frames_to_discard_ = TimeDeltaToAudioFrames(config_.seek_preroll(), | |
455 config_.samples_per_second()); | |
456 } | 433 } |
457 | 434 |
458 bool OpusAudioDecoder::Decode(const scoped_refptr<DecoderBuffer>& input, | 435 bool OpusAudioDecoder::Decode(const scoped_refptr<DecoderBuffer>& input, |
459 scoped_refptr<AudioBuffer>* output_buffer) { | 436 scoped_refptr<AudioBuffer>* output_buffer) { |
460 // Allocate a buffer for the output samples. | 437 // Allocate a buffer for the output samples. |
461 *output_buffer = AudioBuffer::CreateBuffer( | 438 *output_buffer = AudioBuffer::CreateBuffer( |
462 config_.sample_format(), | 439 config_.sample_format(), |
463 config_.channel_layout(), | 440 config_.channel_layout(), |
464 ChannelLayoutToChannelCount(config_.channel_layout()), | 441 ChannelLayoutToChannelCount(config_.channel_layout()), |
465 config_.samples_per_second(), | 442 config_.samples_per_second(), |
(...skipping 15 matching lines...) Expand all Loading... | |
481 | 458 |
482 if (frames_decoded < 0) { | 459 if (frames_decoded < 0) { |
483 DLOG(ERROR) << "opus_multistream_decode failed for" | 460 DLOG(ERROR) << "opus_multistream_decode failed for" |
484 << " timestamp: " << input->timestamp().InMicroseconds() | 461 << " timestamp: " << input->timestamp().InMicroseconds() |
485 << " us, duration: " << input->duration().InMicroseconds() | 462 << " us, duration: " << input->duration().InMicroseconds() |
486 << " us, packet size: " << input->data_size() << " bytes with" | 463 << " us, packet size: " << input->data_size() << " bytes with" |
487 << " status: " << opus_strerror(frames_decoded); | 464 << " status: " << opus_strerror(frames_decoded); |
488 return false; | 465 return false; |
489 } | 466 } |
490 | 467 |
491 if (output_timestamp_helper_->base_timestamp() == kNoTimestamp() && | |
492 !input->end_of_stream()) { | |
493 DCHECK(input->timestamp() != kNoTimestamp()); | |
494 output_timestamp_helper_->SetBaseTimestamp(input->timestamp()); | |
495 } | |
496 | |
497 // Trim off any extraneous allocation. | 468 // Trim off any extraneous allocation. |
498 DCHECK_LE(frames_decoded, output_buffer->get()->frame_count()); | 469 DCHECK_LE(frames_decoded, output_buffer->get()->frame_count()); |
499 const int trim_frames = output_buffer->get()->frame_count() - frames_decoded; | 470 const int trim_frames = output_buffer->get()->frame_count() - frames_decoded; |
500 if (trim_frames > 0) | 471 if (trim_frames > 0) |
501 output_buffer->get()->TrimEnd(trim_frames); | 472 output_buffer->get()->TrimEnd(trim_frames); |
502 | 473 |
503 // Handle frame discard and trimming. | 474 // Handles discards and timestamping. Discard the buffer if more data needed. |
504 int frames_to_output = frames_decoded; | 475 if (!discard_helper_->ProcessBuffers(input, *output_buffer)) |
505 if (frames_decoded > frames_to_discard_) { | |
506 if (frames_to_discard_ > 0) { | |
507 output_buffer->get()->TrimStart(frames_to_discard_); | |
508 frames_to_output -= frames_to_discard_; | |
509 frames_to_discard_ = 0; | |
510 } | |
511 if (input->discard_padding().InMicroseconds() > 0) { | |
512 int discard_padding = TimeDeltaToAudioFrames( | |
513 input->discard_padding(), config_.samples_per_second()); | |
514 if (discard_padding < 0 || discard_padding > frames_to_output) { | |
515 DVLOG(1) << "Invalid file. Incorrect discard padding value."; | |
516 return false; | |
517 } | |
518 output_buffer->get()->TrimEnd(discard_padding); | |
519 frames_to_output -= discard_padding; | |
520 } | |
521 } else { | |
522 frames_to_discard_ -= frames_to_output; | |
523 frames_to_output = 0; | |
524 } | |
525 | |
526 // Assign timestamp and duration to the buffer. | |
527 output_buffer->get()->set_timestamp(output_timestamp_helper_->GetTimestamp()); | |
528 output_buffer->get()->set_duration( | |
529 output_timestamp_helper_->GetFrameDuration(frames_to_output)); | |
530 output_timestamp_helper_->AddFrames(frames_decoded); | |
531 | |
532 // Discard the buffer to indicate we need more data. | |
533 if (!frames_to_output) | |
534 *output_buffer = NULL; | 476 *output_buffer = NULL; |
535 | 477 |
536 return true; | 478 return true; |
537 } | 479 } |
538 | 480 |
539 } // namespace media | 481 } // namespace media |
OLD | NEW |