OLD | NEW |
---|---|
1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/cdm/ppapi/ffmpeg_cdm_audio_decoder.h" | 5 #include "media/cdm/ppapi/ffmpeg_cdm_audio_decoder.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 | 8 |
9 #include "base/logging.h" | 9 #include "base/logging.h" |
10 #include "media/base/audio_bus.h" | 10 #include "media/base/audio_bus.h" |
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
73 memcpy(codec_context->extradata, config.extra_data, | 73 memcpy(codec_context->extradata, config.extra_data, |
74 config.extra_data_size); | 74 config.extra_data_size); |
75 memset(codec_context->extradata + config.extra_data_size, '\0', | 75 memset(codec_context->extradata + config.extra_data_size, '\0', |
76 FF_INPUT_BUFFER_PADDING_SIZE); | 76 FF_INPUT_BUFFER_PADDING_SIZE); |
77 } else { | 77 } else { |
78 codec_context->extradata = NULL; | 78 codec_context->extradata = NULL; |
79 codec_context->extradata_size = 0; | 79 codec_context->extradata_size = 0; |
80 } | 80 } |
81 } | 81 } |
82 | 82 |
83 static cdm::AudioFormat AVSampleFormatToCdmAudioFormat( | |
84 AVSampleFormat sample_format) { | |
85 switch (sample_format) { | |
86 case AV_SAMPLE_FMT_U8: | |
87 return cdm::kAudioFormatU8; | |
88 case AV_SAMPLE_FMT_S16: | |
89 return cdm::kAudioFormatS16; | |
90 case AV_SAMPLE_FMT_S32: | |
91 return cdm::kAudioFormatS32; | |
92 case AV_SAMPLE_FMT_FLT: | |
93 return cdm::kAudioFormatF32; | |
94 case AV_SAMPLE_FMT_S16P: | |
95 return cdm::kAudioFormatPlanarS16; | |
96 case AV_SAMPLE_FMT_FLTP: | |
97 return cdm::kAudioFormatPlanarF32; | |
98 default: | |
99 DVLOG(1) << "Unknown AVSampleFormat: " << sample_format; | |
100 } | |
101 return cdm::kUnknownAudioFormat; | |
102 } | |
103 | |
104 static void CopySamples(cdm::AudioFormat cdm_format, | |
105 int decoded_audio_size, | |
106 const AVFrame* av_frame, | |
xhwang
2013/10/14 20:06:42
use const-ref for input?
DaleCurtis
2013/10/14 20:14:14
Done.
| |
107 uint8_t* output_buffer) { | |
108 switch (cdm_format) { | |
109 case cdm::kAudioFormatU8: | |
110 case cdm::kAudioFormatS16: | |
111 case cdm::kAudioFormatS32: | |
112 case cdm::kAudioFormatF32: | |
113 memcpy(output_buffer, av_frame->data[0], decoded_audio_size); | |
114 break; | |
115 case cdm::kAudioFormatPlanarS16: | |
116 case cdm::kAudioFormatPlanarF32: { | |
117 const int decoded_size_per_channel = | |
118 decoded_audio_size / av_frame->channels; | |
119 for (int i = 0; i < av_frame->channels; ++i) { | |
120 memcpy(output_buffer, | |
121 av_frame->extended_data[i], | |
122 decoded_size_per_channel); | |
123 output_buffer += decoded_size_per_channel; | |
124 } | |
125 break; | |
126 } | |
127 default: | |
128 NOTREACHED() << "Unsupported CDM Audio Format!"; | |
129 memset(output_buffer, 0, decoded_audio_size); | |
130 } | |
131 } | |
132 | |
83 FFmpegCdmAudioDecoder::FFmpegCdmAudioDecoder(cdm::Host* host) | 133 FFmpegCdmAudioDecoder::FFmpegCdmAudioDecoder(cdm::Host* host) |
84 : is_initialized_(false), | 134 : is_initialized_(false), |
85 host_(host), | 135 host_(host), |
86 bits_per_channel_(0), | |
87 samples_per_second_(0), | 136 samples_per_second_(0), |
88 channels_(0), | 137 channels_(0), |
89 av_sample_format_(0), | 138 av_sample_format_(0), |
90 bytes_per_frame_(0), | 139 bytes_per_frame_(0), |
91 last_input_timestamp_(kNoTimestamp()), | 140 last_input_timestamp_(kNoTimestamp()), |
92 output_bytes_to_drop_(0) { | 141 output_bytes_to_drop_(0) { |
93 } | 142 } |
94 | 143 |
95 FFmpegCdmAudioDecoder::~FFmpegCdmAudioDecoder() { | 144 FFmpegCdmAudioDecoder::~FFmpegCdmAudioDecoder() { |
96 ReleaseFFmpegResources(); | 145 ReleaseFFmpegResources(); |
97 } | 146 } |
98 | 147 |
99 bool FFmpegCdmAudioDecoder::Initialize(const cdm::AudioDecoderConfig& config) { | 148 bool FFmpegCdmAudioDecoder::Initialize(const cdm::AudioDecoderConfig& config) { |
100 DVLOG(1) << "Initialize()"; | 149 DVLOG(1) << "Initialize()"; |
101 | |
102 if (!IsValidConfig(config)) { | 150 if (!IsValidConfig(config)) { |
103 LOG(ERROR) << "Initialize(): invalid audio decoder configuration."; | 151 LOG(ERROR) << "Initialize(): invalid audio decoder configuration."; |
104 return false; | 152 return false; |
105 } | 153 } |
106 | 154 |
107 if (is_initialized_) { | 155 if (is_initialized_) { |
108 LOG(ERROR) << "Initialize(): Already initialized."; | 156 LOG(ERROR) << "Initialize(): Already initialized."; |
109 return false; | 157 return false; |
110 } | 158 } |
111 | 159 |
(...skipping 12 matching lines...) Expand all Loading... | |
124 return false; | 172 return false; |
125 } | 173 } |
126 | 174 |
127 // Ensure avcodec_open2() respected our format request. | 175 // Ensure avcodec_open2() respected our format request. |
128 if (codec_context_->sample_fmt == AV_SAMPLE_FMT_S16P) { | 176 if (codec_context_->sample_fmt == AV_SAMPLE_FMT_S16P) { |
129 DLOG(ERROR) << "Unable to configure a supported sample format: " | 177 DLOG(ERROR) << "Unable to configure a supported sample format: " |
130 << codec_context_->sample_fmt; | 178 << codec_context_->sample_fmt; |
131 return false; | 179 return false; |
132 } | 180 } |
133 | 181 |
134 // Some codecs will only output float data, so we need to convert to integer | |
135 // before returning the decoded buffer. | |
136 if (codec_context_->sample_fmt == AV_SAMPLE_FMT_FLTP || | |
137 codec_context_->sample_fmt == AV_SAMPLE_FMT_FLT) { | |
138 // Preallocate the AudioBus for float conversions. We can treat interleaved | |
139 // float data as a single planar channel since our output is expected in an | |
140 // interleaved format anyways. | |
141 int channels = codec_context_->channels; | |
142 if (codec_context_->sample_fmt == AV_SAMPLE_FMT_FLT) | |
143 channels = 1; | |
144 converter_bus_ = AudioBus::CreateWrapper(channels); | |
145 } | |
146 | |
147 // Success! | 182 // Success! |
148 av_frame_.reset(avcodec_alloc_frame()); | 183 av_frame_.reset(avcodec_alloc_frame()); |
149 bits_per_channel_ = config.bits_per_channel; | |
150 samples_per_second_ = config.samples_per_second; | 184 samples_per_second_ = config.samples_per_second; |
151 bytes_per_frame_ = codec_context_->channels * bits_per_channel_ / 8; | 185 bytes_per_frame_ = codec_context_->channels * config.bits_per_channel / 8; |
152 output_timestamp_helper_.reset( | 186 output_timestamp_helper_.reset( |
153 new AudioTimestampHelper(config.samples_per_second)); | 187 new AudioTimestampHelper(config.samples_per_second)); |
154 serialized_audio_frames_.reserve(bytes_per_frame_ * samples_per_second_); | |
155 is_initialized_ = true; | 188 is_initialized_ = true; |
156 | 189 |
157 // Store initial values to guard against midstream configuration changes. | 190 // Store initial values to guard against midstream configuration changes. |
158 channels_ = codec_context_->channels; | 191 channels_ = codec_context_->channels; |
159 av_sample_format_ = codec_context_->sample_fmt; | 192 av_sample_format_ = codec_context_->sample_fmt; |
160 | 193 |
161 return true; | 194 return true; |
162 } | 195 } |
163 | 196 |
164 void FFmpegCdmAudioDecoder::Deinitialize() { | 197 void FFmpegCdmAudioDecoder::Deinitialize() { |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
219 | 252 |
220 last_input_timestamp_ = timestamp; | 253 last_input_timestamp_ = timestamp; |
221 } | 254 } |
222 } | 255 } |
223 | 256 |
224 AVPacket packet; | 257 AVPacket packet; |
225 av_init_packet(&packet); | 258 av_init_packet(&packet); |
226 packet.data = const_cast<uint8_t*>(compressed_buffer); | 259 packet.data = const_cast<uint8_t*>(compressed_buffer); |
227 packet.size = compressed_buffer_size; | 260 packet.size = compressed_buffer_size; |
228 | 261 |
262 // Tell the CDM what AudioFormat we're using. | |
263 const cdm::AudioFormat cdm_format = AVSampleFormatToCdmAudioFormat( | |
264 static_cast<AVSampleFormat>(av_sample_format_)); | |
265 DCHECK_NE(cdm_format, cdm::kUnknownAudioFormat); | |
266 decoded_frames->SetFormat(cdm_format); | |
267 | |
229 // Each audio packet may contain several frames, so we must call the decoder | 268 // Each audio packet may contain several frames, so we must call the decoder |
230 // until we've exhausted the packet. Regardless of the packet size we always | 269 // until we've exhausted the packet. Regardless of the packet size we always |
231 // want to hand it to the decoder at least once, otherwise we would end up | 270 // want to hand it to the decoder at least once, otherwise we would end up |
232 // skipping end of stream packets since they have a size of zero. | 271 // skipping end of stream packets since they have a size of zero. |
233 do { | 272 do { |
234 // Reset frame to default values. | 273 // Reset frame to default values. |
235 avcodec_get_frame_defaults(av_frame_.get()); | 274 avcodec_get_frame_defaults(av_frame_.get()); |
236 | 275 |
237 int frame_decoded = 0; | 276 int frame_decoded = 0; |
238 int result = avcodec_decode_audio4( | 277 int result = avcodec_decode_audio4( |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
282 << ", Channels: " << av_frame_->channels << " vs " | 321 << ", Channels: " << av_frame_->channels << " vs " |
283 << channels_ | 322 << channels_ |
284 << ", Sample Format: " << av_frame_->format << " vs " | 323 << ", Sample Format: " << av_frame_->format << " vs " |
285 << av_sample_format_; | 324 << av_sample_format_; |
286 return cdm::kDecodeError; | 325 return cdm::kDecodeError; |
287 } | 326 } |
288 | 327 |
289 decoded_audio_size = av_samples_get_buffer_size( | 328 decoded_audio_size = av_samples_get_buffer_size( |
290 NULL, codec_context_->channels, av_frame_->nb_samples, | 329 NULL, codec_context_->channels, av_frame_->nb_samples, |
291 codec_context_->sample_fmt, 1); | 330 codec_context_->sample_fmt, 1); |
292 // If we're decoding into float, adjust audio size. | |
293 if (converter_bus_ && bits_per_channel_ / 8 != sizeof(float)) { | |
294 DCHECK(codec_context_->sample_fmt == AV_SAMPLE_FMT_FLT || | |
295 codec_context_->sample_fmt == AV_SAMPLE_FMT_FLTP); | |
296 decoded_audio_size *= | |
297 static_cast<float>(bits_per_channel_ / 8) / sizeof(float); | |
298 } | |
299 } | 331 } |
300 | 332 |
301 int start_sample = 0; | 333 int start_sample = 0; |
302 if (decoded_audio_size > 0 && output_bytes_to_drop_ > 0) { | 334 if (decoded_audio_size > 0 && output_bytes_to_drop_ > 0) { |
303 DCHECK_EQ(decoded_audio_size % bytes_per_frame_, 0) | 335 DCHECK_EQ(decoded_audio_size % bytes_per_frame_, 0) |
304 << "Decoder didn't output full frames"; | 336 << "Decoder didn't output full frames"; |
305 | 337 |
306 int dropped_size = std::min(decoded_audio_size, output_bytes_to_drop_); | 338 int dropped_size = std::min(decoded_audio_size, output_bytes_to_drop_); |
307 start_sample = dropped_size / bytes_per_frame_; | 339 start_sample = dropped_size / bytes_per_frame_; |
308 decoded_audio_size -= dropped_size; | 340 decoded_audio_size -= dropped_size; |
309 output_bytes_to_drop_ -= dropped_size; | 341 output_bytes_to_drop_ -= dropped_size; |
310 } | 342 } |
311 | 343 |
312 scoped_refptr<DataBuffer> output; | |
313 if (decoded_audio_size > 0) { | 344 if (decoded_audio_size > 0) { |
314 DCHECK_EQ(decoded_audio_size % bytes_per_frame_, 0) | 345 DCHECK_EQ(decoded_audio_size % bytes_per_frame_, 0) |
315 << "Decoder didn't output full frames"; | 346 << "Decoder didn't output full frames"; |
316 | 347 |
317 // Convert float data using an AudioBus. | |
318 if (converter_bus_) { | |
319 // Setup the AudioBus as a wrapper of the AVFrame data and then use | |
320 // AudioBus::ToInterleaved() to convert the data as necessary. | |
321 int skip_frames = start_sample; | |
322 int total_frames = av_frame_->nb_samples; | |
323 int frames_to_interleave = decoded_audio_size / bytes_per_frame_; | |
324 if (codec_context_->sample_fmt == AV_SAMPLE_FMT_FLT) { | |
325 DCHECK_EQ(converter_bus_->channels(), 1); | |
326 total_frames *= codec_context_->channels; | |
327 skip_frames *= codec_context_->channels; | |
328 frames_to_interleave *= codec_context_->channels; | |
329 } | |
330 | |
331 converter_bus_->set_frames(total_frames); | |
332 for (int i = 0; i < converter_bus_->channels(); ++i) { | |
333 converter_bus_->SetChannelData(i, reinterpret_cast<float*>( | |
334 av_frame_->extended_data[i])); | |
335 } | |
336 | |
337 output = new DataBuffer(decoded_audio_size); | |
338 output->set_data_size(decoded_audio_size); | |
339 | |
340 DCHECK_EQ(frames_to_interleave, converter_bus_->frames() - skip_frames); | |
341 converter_bus_->ToInterleavedPartial( | |
342 skip_frames, frames_to_interleave, bits_per_channel_ / 8, | |
343 output->writable_data()); | |
344 } else { | |
345 output = DataBuffer::CopyFrom( | |
346 av_frame_->extended_data[0] + start_sample * bytes_per_frame_, | |
347 decoded_audio_size); | |
348 } | |
349 | |
350 base::TimeDelta output_timestamp = | 348 base::TimeDelta output_timestamp = |
351 output_timestamp_helper_->GetTimestamp(); | 349 output_timestamp_helper_->GetTimestamp(); |
352 output_timestamp_helper_->AddFrames(decoded_audio_size / | 350 output_timestamp_helper_->AddFrames(decoded_audio_size / |
353 bytes_per_frame_); | 351 bytes_per_frame_); |
354 | 352 |
355 // Serialize the audio samples into |serialized_audio_frames_|. | 353 // If we've exhausted the packet in the first decode we can write directly |
354 // into the frame buffer instead of a multistep serialization approach. | |
355 if (serialized_audio_frames_.empty() && !packet.size) { | |
356 const uint32_t buffer_size = decoded_audio_size + sizeof(int64) * 2; | |
357 decoded_frames->SetFrameBuffer(host_->Allocate(buffer_size)); | |
358 if (!decoded_frames->FrameBuffer()) { | |
359 LOG(ERROR) << "DecodeBuffer() cdm::Host::Allocate failed."; | |
360 return cdm::kDecodeError; | |
361 } | |
362 decoded_frames->FrameBuffer()->SetSize(buffer_size); | |
363 uint8_t* output_buffer = decoded_frames->FrameBuffer()->Data(); | |
364 | |
365 const int64 timestamp = output_timestamp.InMicroseconds(); | |
366 memcpy(output_buffer, ×tamp, sizeof(timestamp)); | |
367 output_buffer += sizeof(timestamp); | |
368 | |
369 const int64 output_size = decoded_audio_size; | |
370 memcpy(output_buffer, &output_size, sizeof(output_size)); | |
371 output_buffer += sizeof(output_size); | |
372 | |
373 // Copy the samples and return success. | |
374 CopySamples( | |
375 cdm_format, decoded_audio_size, av_frame_.get(), output_buffer); | |
376 return cdm::kSuccess; | |
377 } | |
378 | |
379 // There are still more frames to decode, so we need to serialize them in | |
380 // a secondary buffer since we don't know their sizes ahead of time (which | |
381 // is required to allocate the FrameBuffer object). | |
356 SerializeInt64(output_timestamp.InMicroseconds()); | 382 SerializeInt64(output_timestamp.InMicroseconds()); |
357 SerializeInt64(output->data_size()); | 383 SerializeInt64(decoded_audio_size); |
358 serialized_audio_frames_.insert( | 384 |
359 serialized_audio_frames_.end(), | 385 const size_t previous_size = serialized_audio_frames_.size(); |
360 output->data(), | 386 serialized_audio_frames_.resize(previous_size + decoded_audio_size); |
361 output->data() + output->data_size()); | 387 uint8_t* output_buffer = &serialized_audio_frames_[0] + previous_size; |
388 CopySamples( | |
389 cdm_format, decoded_audio_size, av_frame_.get(), output_buffer); | |
362 } | 390 } |
363 } while (packet.size > 0); | 391 } while (packet.size > 0); |
364 | 392 |
365 if (!serialized_audio_frames_.empty()) { | 393 if (!serialized_audio_frames_.empty()) { |
366 decoded_frames->SetFrameBuffer( | 394 decoded_frames->SetFrameBuffer( |
367 host_->Allocate(serialized_audio_frames_.size())); | 395 host_->Allocate(serialized_audio_frames_.size())); |
368 if (!decoded_frames->FrameBuffer()) { | 396 if (!decoded_frames->FrameBuffer()) { |
369 LOG(ERROR) << "DecodeBuffer() cdm::Host::Allocate failed."; | 397 LOG(ERROR) << "DecodeBuffer() cdm::Host::Allocate failed."; |
370 return cdm::kDecodeError; | 398 return cdm::kDecodeError; |
371 } | 399 } |
(...skipping 16 matching lines...) Expand all Loading... | |
388 } | 416 } |
389 | 417 |
390 void FFmpegCdmAudioDecoder::ReleaseFFmpegResources() { | 418 void FFmpegCdmAudioDecoder::ReleaseFFmpegResources() { |
391 DVLOG(1) << "ReleaseFFmpegResources()"; | 419 DVLOG(1) << "ReleaseFFmpegResources()"; |
392 | 420 |
393 codec_context_.reset(); | 421 codec_context_.reset(); |
394 av_frame_.reset(); | 422 av_frame_.reset(); |
395 } | 423 } |
396 | 424 |
397 void FFmpegCdmAudioDecoder::SerializeInt64(int64 value) { | 425 void FFmpegCdmAudioDecoder::SerializeInt64(int64 value) { |
398 int previous_size = serialized_audio_frames_.size(); | 426 const size_t previous_size = serialized_audio_frames_.size(); |
399 serialized_audio_frames_.resize(previous_size + sizeof(value)); | 427 serialized_audio_frames_.resize(previous_size + sizeof(value)); |
400 memcpy(&serialized_audio_frames_[0] + previous_size, &value, sizeof(value)); | 428 memcpy(&serialized_audio_frames_[0] + previous_size, &value, sizeof(value)); |
401 } | 429 } |
402 | 430 |
403 } // namespace media | 431 } // namespace media |
OLD | NEW |