Index: media/cdm/ppapi/ffmpeg_cdm_audio_decoder.cc |
diff --git a/media/cdm/ppapi/ffmpeg_cdm_audio_decoder.cc b/media/cdm/ppapi/ffmpeg_cdm_audio_decoder.cc |
index 1e66736c2b85c54383edf6758dc21dd9502ef81b..c32f8d4e578ec75ab8df823c95968c00940dfb16 100644 |
--- a/media/cdm/ppapi/ffmpeg_cdm_audio_decoder.cc |
+++ b/media/cdm/ppapi/ffmpeg_cdm_audio_decoder.cc |
@@ -80,10 +80,30 @@ static void CdmAudioDecoderConfigToAVCodecContext( |
} |
} |
+cdm::AudioFormat AVSampleFormatToCdmAudioFormat( |
+ AVSampleFormat sample_format) { |
+ switch (sample_format) { |
+ case AV_SAMPLE_FMT_U8: |
+ return cdm::kAudioFormatU8; |
+ case AV_SAMPLE_FMT_S16: |
+ return cdm::kAudioFormatS16; |
+ case AV_SAMPLE_FMT_S32: |
+ return cdm::kAudioFormatS32; |
+ case AV_SAMPLE_FMT_FLT: |
+ return cdm::kAudioFormatF32; |
+ case AV_SAMPLE_FMT_S16P: |
+ return cdm::kAudioFormatPlanarS16; |
+ case AV_SAMPLE_FMT_FLTP: |
+ return cdm::kAudioFormatPlanarF32; |
+ default: |
+ DVLOG(1) << "Unknown AVSampleFormat: " << sample_format; |
+ } |
+ return cdm::kUnknownAudioFormat; |
+} |
+ |
FFmpegCdmAudioDecoder::FFmpegCdmAudioDecoder(cdm::Host* host) |
: is_initialized_(false), |
host_(host), |
- bits_per_channel_(0), |
samples_per_second_(0), |
channels_(0), |
av_sample_format_(0), |
@@ -98,7 +118,6 @@ FFmpegCdmAudioDecoder::~FFmpegCdmAudioDecoder() { |
bool FFmpegCdmAudioDecoder::Initialize(const cdm::AudioDecoderConfig& config) { |
DVLOG(1) << "Initialize()"; |
- |
if (!IsValidConfig(config)) { |
LOG(ERROR) << "Initialize(): invalid audio decoder configuration."; |
return false; |
@@ -131,24 +150,10 @@ bool FFmpegCdmAudioDecoder::Initialize(const cdm::AudioDecoderConfig& config) { |
return false; |
} |
- // Some codecs will only output float data, so we need to convert to integer |
- // before returning the decoded buffer. |
- if (codec_context_->sample_fmt == AV_SAMPLE_FMT_FLTP || |
- codec_context_->sample_fmt == AV_SAMPLE_FMT_FLT) { |
- // Preallocate the AudioBus for float conversions. We can treat interleaved |
- // float data as a single planar channel since our output is expected in an |
- // interleaved format anyways. |
- int channels = codec_context_->channels; |
- if (codec_context_->sample_fmt == AV_SAMPLE_FMT_FLT) |
- channels = 1; |
- converter_bus_ = AudioBus::CreateWrapper(channels); |
- } |
- |
// Success! |
av_frame_.reset(avcodec_alloc_frame()); |
- bits_per_channel_ = config.bits_per_channel; |
samples_per_second_ = config.samples_per_second; |
- bytes_per_frame_ = codec_context_->channels * bits_per_channel_ / 8; |
+ bytes_per_frame_ = codec_context_->channels * config.bits_per_channel / 8; |
output_timestamp_helper_.reset( |
new AudioTimestampHelper(config.samples_per_second)); |
serialized_audio_frames_.reserve(bytes_per_frame_ * samples_per_second_); |
DaleCurtis
2013/10/14 19:04:45
Removed since this allocates 172kb and most output
|
@@ -226,6 +231,12 @@ cdm::Status FFmpegCdmAudioDecoder::DecodeBuffer( |
packet.data = const_cast<uint8_t*>(compressed_buffer); |
packet.size = compressed_buffer_size; |
+ // Tell the CDM what AudioFormat we're using. |
+ const cdm::AudioFormat cdm_format = AVSampleFormatToCdmAudioFormat( |
+ static_cast<AVSampleFormat>(av_sample_format_)); |
+ DCHECK_NE(cdm_format, cdm::kUnknownAudioFormat); |
+ decoded_frames->SetFormat(cdm_format); |
+ |
// Each audio packet may contain several frames, so we must call the decoder |
// until we've exhausted the packet. Regardless of the packet size we always |
// want to hand it to the decoder at least once, otherwise we would end up |
@@ -289,13 +300,6 @@ cdm::Status FFmpegCdmAudioDecoder::DecodeBuffer( |
decoded_audio_size = av_samples_get_buffer_size( |
NULL, codec_context_->channels, av_frame_->nb_samples, |
codec_context_->sample_fmt, 1); |
- // If we're decoding into float, adjust audio size. |
- if (converter_bus_ && bits_per_channel_ / 8 != sizeof(float)) { |
- DCHECK(codec_context_->sample_fmt == AV_SAMPLE_FMT_FLT || |
- codec_context_->sample_fmt == AV_SAMPLE_FMT_FLTP); |
- decoded_audio_size *= |
- static_cast<float>(bits_per_channel_ / 8) / sizeof(float); |
- } |
} |
int start_sample = 0; |
@@ -309,59 +313,75 @@ cdm::Status FFmpegCdmAudioDecoder::DecodeBuffer( |
output_bytes_to_drop_ -= dropped_size; |
} |
- scoped_refptr<DataBuffer> output; |
if (decoded_audio_size > 0) { |
DCHECK_EQ(decoded_audio_size % bytes_per_frame_, 0) |
<< "Decoder didn't output full frames"; |
- // Convert float data using an AudioBus. |
- if (converter_bus_) { |
- // Setup the AudioBus as a wrapper of the AVFrame data and then use |
- // AudioBus::ToInterleaved() to convert the data as necessary. |
- int skip_frames = start_sample; |
- int total_frames = av_frame_->nb_samples; |
- int frames_to_interleave = decoded_audio_size / bytes_per_frame_; |
- if (codec_context_->sample_fmt == AV_SAMPLE_FMT_FLT) { |
- DCHECK_EQ(converter_bus_->channels(), 1); |
- total_frames *= codec_context_->channels; |
- skip_frames *= codec_context_->channels; |
- frames_to_interleave *= codec_context_->channels; |
- } |
+ base::TimeDelta output_timestamp = |
+ output_timestamp_helper_->GetTimestamp(); |
+ output_timestamp_helper_->AddFrames(decoded_audio_size / |
+ bytes_per_frame_); |
- converter_bus_->set_frames(total_frames); |
- for (int i = 0; i < converter_bus_->channels(); ++i) { |
- converter_bus_->SetChannelData(i, reinterpret_cast<float*>( |
- av_frame_->extended_data[i])); |
+ // If we've exhausted the packet in the first decode we can write directly |
+ // into the frame buffer instead of a multistep serialization approach. |
xhwang
2013/10/11 22:45:16
Hmm, how much improvement does this approach buy u
DaleCurtis
2013/10/12 01:51:05
Hard to tell since we don't have pipeline tests fo
DaleCurtis
2013/10/14 19:04:45
Actually this ended up more complicated since we n
|
+ uint8_t* output_buffer = NULL; |
+ if (serialized_audio_frames_.empty() && !packet.size) { |
+ const uint32_t buffer_size = decoded_audio_size + sizeof(int64) * 2; |
+ decoded_frames->SetFrameBuffer(host_->Allocate(buffer_size)); |
+ if (!decoded_frames->FrameBuffer()) { |
+ LOG(ERROR) << "DecodeBuffer() cdm::Host::Allocate failed."; |
+ return cdm::kDecodeError; |
} |
+ decoded_frames->FrameBuffer()->SetSize(buffer_size); |
+ output_buffer = decoded_frames->FrameBuffer()->Data(); |
- output = new DataBuffer(decoded_audio_size); |
- output->set_data_size(decoded_audio_size); |
+ const int64 timestamp = output_timestamp.InMicroseconds(); |
+ memcpy(output_buffer, ×tamp, sizeof(timestamp)); |
+ output_buffer += sizeof(timestamp); |
- DCHECK_EQ(frames_to_interleave, converter_bus_->frames() - skip_frames); |
- converter_bus_->ToInterleavedPartial( |
- skip_frames, frames_to_interleave, bits_per_channel_ / 8, |
- output->writable_data()); |
+ const int64 output_size = decoded_audio_size; |
+ memcpy(output_buffer, &output_size, sizeof(output_size)); |
+ output_buffer += sizeof(output_size); |
} else { |
- output = DataBuffer::CopyFrom( |
- av_frame_->extended_data[0] + start_sample * bytes_per_frame_, |
- decoded_audio_size); |
- } |
+ // Serialize the audio samples into |serialized_audio_frames_|. |
+ SerializeInt64(output_timestamp.InMicroseconds()); |
+ SerializeInt64(decoded_audio_size); |
- base::TimeDelta output_timestamp = |
- output_timestamp_helper_->GetTimestamp(); |
- output_timestamp_helper_->AddFrames(decoded_audio_size / |
- bytes_per_frame_); |
+ const size_t previous_size = serialized_audio_frames_.size(); |
+ serialized_audio_frames_.resize(previous_size + decoded_audio_size); |
+ output_buffer = &serialized_audio_frames_[0] + previous_size; |
+ } |
- // Serialize the audio samples into |serialized_audio_frames_|. |
- SerializeInt64(output_timestamp.InMicroseconds()); |
- SerializeInt64(output->data_size()); |
- serialized_audio_frames_.insert( |
- serialized_audio_frames_.end(), |
- output->data(), |
- output->data() + output->data_size()); |
+ switch (cdm_format) { |
+ case cdm::kAudioFormatU8: |
+ case cdm::kAudioFormatS16: |
+ case cdm::kAudioFormatS32: |
+ case cdm::kAudioFormatF32: |
+ memcpy(output_buffer, av_frame_->data[0], decoded_audio_size); |
+ break; |
+ case cdm::kAudioFormatPlanarS16: |
+ case cdm::kAudioFormatPlanarF32: { |
+ const int decoded_size_per_channel = |
+ decoded_audio_size / av_frame_->channels; |
xhwang
2013/10/11 22:45:16
shall we CHECK(decoded_audio_size % av_frame_->cha
DaleCurtis
2013/10/12 01:51:05
1. That wouldn't overflow since we'd always be rou
|
+ for (int i = 0; i < av_frame_->channels; ++i) { |
+ memcpy(output_buffer, |
+ av_frame_->extended_data[i], |
+ decoded_size_per_channel); |
+ output_buffer += decoded_size_per_channel; |
+ } |
+ break; |
+ } |
+ default: |
+ NOTREACHED() << "Unsupported CDM Audio Format!"; |
+ memset(output_buffer, 0, decoded_audio_size); |
+ } |
} |
} while (packet.size > 0); |
+ // If the decode loop already wrote out the data, we're done. |
+ if (decoded_frames->FrameBuffer()) |
+ return cdm::kSuccess; |
+ |
if (!serialized_audio_frames_.empty()) { |
decoded_frames->SetFrameBuffer( |
host_->Allocate(serialized_audio_frames_.size())); |