OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "webkit/media/crypto/ppapi/ffmpeg_cdm_audio_decoder.h" | 5 #include "webkit/media/crypto/ppapi/ffmpeg_cdm_audio_decoder.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 | 8 |
9 #include "base/logging.h" | 9 #include "base/logging.h" |
10 #include "media/base/audio_bus.h" | |
11 #include "media/base/audio_timestamp_helper.h" | |
10 #include "media/base/buffers.h" | 12 #include "media/base/buffers.h" |
13 #include "media/base/data_buffer.h" | |
11 #include "media/base/limits.h" | 14 #include "media/base/limits.h" |
12 #include "webkit/media/crypto/ppapi/cdm/content_decryption_module.h" | 15 #include "webkit/media/crypto/ppapi/cdm/content_decryption_module.h" |
13 | 16 |
14 // Include FFmpeg header files. | 17 // Include FFmpeg header files. |
15 extern "C" { | 18 extern "C" { |
16 // Temporarily disable possible loss of data warning. | 19 // Temporarily disable possible loss of data warning. |
17 MSVC_PUSH_DISABLE_WARNING(4244); | 20 MSVC_PUSH_DISABLE_WARNING(4244); |
18 #include <libavcodec/avcodec.h> | 21 #include <libavcodec/avcodec.h> |
19 MSVC_POP_WARNING(); | 22 MSVC_POP_WARNING(); |
20 } // extern "C" | 23 } // extern "C" |
(...skipping 16 matching lines...) Expand all Loading... | |
37 return CODEC_ID_NONE; | 40 return CODEC_ID_NONE; |
38 } | 41 } |
39 } | 42 } |
40 | 43 |
41 static void CdmAudioDecoderConfigToAVCodecContext( | 44 static void CdmAudioDecoderConfigToAVCodecContext( |
42 const cdm::AudioDecoderConfig& config, | 45 const cdm::AudioDecoderConfig& config, |
43 AVCodecContext* codec_context) { | 46 AVCodecContext* codec_context) { |
44 codec_context->codec_type = AVMEDIA_TYPE_AUDIO; | 47 codec_context->codec_type = AVMEDIA_TYPE_AUDIO; |
45 codec_context->codec_id = CdmAudioCodecToCodecID(config.codec); | 48 codec_context->codec_id = CdmAudioCodecToCodecID(config.codec); |
46 | 49 |
50 LOG(ERROR) << "ClearKey CDM bpc: " << config.bits_per_channel; | |
DaleCurtis
2013/01/10 01:24:35
Remove?
xhwang
2013/01/10 18:17:23
Done.
| |
51 | |
47 switch (config.bits_per_channel) { | 52 switch (config.bits_per_channel) { |
48 case 8: | 53 case 8: |
49 codec_context->sample_fmt = AV_SAMPLE_FMT_U8; | 54 codec_context->sample_fmt = AV_SAMPLE_FMT_U8; |
50 break; | 55 break; |
51 case 16: | 56 case 16: |
52 codec_context->sample_fmt = AV_SAMPLE_FMT_S16; | 57 codec_context->sample_fmt = AV_SAMPLE_FMT_S16; |
53 break; | 58 break; |
54 case 32: | 59 case 32: |
55 codec_context->sample_fmt = AV_SAMPLE_FMT_S32; | 60 codec_context->sample_fmt = AV_SAMPLE_FMT_S32; |
56 break; | 61 break; |
(...skipping 21 matching lines...) Expand all Loading... | |
78 } | 83 } |
79 | 84 |
80 FFmpegCdmAudioDecoder::FFmpegCdmAudioDecoder(cdm::Allocator* allocator) | 85 FFmpegCdmAudioDecoder::FFmpegCdmAudioDecoder(cdm::Allocator* allocator) |
81 : is_initialized_(false), | 86 : is_initialized_(false), |
82 allocator_(allocator), | 87 allocator_(allocator), |
83 codec_context_(NULL), | 88 codec_context_(NULL), |
84 av_frame_(NULL), | 89 av_frame_(NULL), |
85 bits_per_channel_(0), | 90 bits_per_channel_(0), |
86 samples_per_second_(0), | 91 samples_per_second_(0), |
87 bytes_per_frame_(0), | 92 bytes_per_frame_(0), |
88 output_timestamp_base_(media::kNoTimestamp()), | |
89 total_frames_decoded_(0), | |
90 last_input_timestamp_(media::kNoTimestamp()), | 93 last_input_timestamp_(media::kNoTimestamp()), |
91 output_bytes_to_drop_(0) { | 94 output_bytes_to_drop_(0) { |
92 } | 95 } |
93 | 96 |
94 FFmpegCdmAudioDecoder::~FFmpegCdmAudioDecoder() { | 97 FFmpegCdmAudioDecoder::~FFmpegCdmAudioDecoder() { |
95 ReleaseFFmpegResources(); | 98 ReleaseFFmpegResources(); |
96 } | 99 } |
97 | 100 |
98 bool FFmpegCdmAudioDecoder::Initialize(const cdm::AudioDecoderConfig& config) { | 101 bool FFmpegCdmAudioDecoder::Initialize(const cdm::AudioDecoderConfig& config) { |
99 DVLOG(1) << "Initialize()"; | 102 DVLOG(1) << "Initialize()"; |
100 | 103 |
101 if (!IsValidConfig(config)) { | 104 if (!IsValidConfig(config)) { |
102 LOG(ERROR) << "Initialize(): invalid audio decoder configuration."; | 105 LOG(ERROR) << "Initialize(): invalid audio decoder configuration."; |
103 return false; | 106 return false; |
104 } | 107 } |
105 | 108 |
106 if (is_initialized_) { | 109 if (is_initialized_) { |
107 LOG(ERROR) << "Initialize(): Already initialized."; | 110 LOG(ERROR) << "Initialize(): Already initialized."; |
108 return false; | 111 return false; |
109 } | 112 } |
110 | 113 |
111 // Initialize AVCodecContext structure. | 114 // Initialize AVCodecContext structure. |
112 codec_context_ = avcodec_alloc_context3(NULL); | 115 codec_context_ = avcodec_alloc_context3(NULL); |
113 CdmAudioDecoderConfigToAVCodecContext(config, codec_context_); | 116 CdmAudioDecoderConfigToAVCodecContext(config, codec_context_); |
114 | 117 |
118 // MP3 decodes to S16P which we don't support, tell it to use S16 instead. | |
119 if (codec_context_->sample_fmt == AV_SAMPLE_FMT_S16P) | |
120 codec_context_->request_sample_fmt = AV_SAMPLE_FMT_S16; | |
121 | |
115 AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id); | 122 AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id); |
116 if (!codec) { | 123 if (!codec || avcodec_open2(codec_context_, codec, NULL) < 0) { |
117 LOG(ERROR) << "Initialize(): avcodec_find_decoder failed."; | 124 DLOG(ERROR) << "Could not initialize audio decoder: " |
125 << codec_context_->codec_id; | |
118 return false; | 126 return false; |
119 } | 127 } |
120 | 128 |
121 int status; | 129 // Ensure avcodec_open2() respected our format request. |
122 if ((status = avcodec_open2(codec_context_, codec, NULL)) < 0) { | 130 if (codec_context_->sample_fmt == AV_SAMPLE_FMT_S16P) { |
123 LOG(ERROR) << "Initialize(): avcodec_open2 failed: " << status; | 131 DLOG(ERROR) << "Unable to configure a supported sample format: " |
132 << codec_context_->sample_fmt; | |
124 return false; | 133 return false; |
125 } | 134 } |
126 | 135 |
136 // Some codecs will only output float data, so we need to convert to integer | |
137 // before returning the decoded buffer. | |
138 if (codec_context_->sample_fmt == AV_SAMPLE_FMT_FLTP || | |
139 codec_context_->sample_fmt == AV_SAMPLE_FMT_FLT) { | |
140 // Preallocate the AudioBus for float conversions. We can treat interleaved | |
141 // float data as a single planar channel since our output is expected in an | |
142 // interleaved format anyways. | |
143 int channels = codec_context_->channels; | |
144 if (codec_context_->sample_fmt == AV_SAMPLE_FMT_FLT) | |
145 channels = 1; | |
146 converter_bus_ = media::AudioBus::CreateWrapper(channels); | |
147 } | |
148 | |
149 // Success! | |
127 av_frame_ = avcodec_alloc_frame(); | 150 av_frame_ = avcodec_alloc_frame(); |
128 bits_per_channel_ = config.bits_per_channel; | 151 bits_per_channel_ = config.bits_per_channel; |
129 samples_per_second_ = config.samples_per_second; | 152 samples_per_second_ = config.samples_per_second; |
130 bytes_per_frame_ = codec_context_->channels * bits_per_channel_ / 8; | 153 bytes_per_frame_ = codec_context_->channels * bits_per_channel_ / 8; |
154 output_timestamp_helper_.reset(new media::AudioTimestampHelper( | |
155 bytes_per_frame_, config.samples_per_second)); | |
131 serialized_audio_frames_.reserve(bytes_per_frame_ * samples_per_second_); | 156 serialized_audio_frames_.reserve(bytes_per_frame_ * samples_per_second_); |
132 is_initialized_ = true; | 157 is_initialized_ = true; |
133 | 158 |
134 return true; | 159 return true; |
135 } | 160 } |
136 | 161 |
137 void FFmpegCdmAudioDecoder::Deinitialize() { | 162 void FFmpegCdmAudioDecoder::Deinitialize() { |
138 DVLOG(1) << "Deinitialize()"; | 163 DVLOG(1) << "Deinitialize()"; |
139 ReleaseFFmpegResources(); | 164 ReleaseFFmpegResources(); |
140 is_initialized_ = false; | 165 is_initialized_ = false; |
141 ResetAudioTimingData(); | 166 ResetTimestampState(); |
142 } | 167 } |
143 | 168 |
144 void FFmpegCdmAudioDecoder::Reset() { | 169 void FFmpegCdmAudioDecoder::Reset() { |
145 DVLOG(1) << "Reset()"; | 170 DVLOG(1) << "Reset()"; |
146 avcodec_flush_buffers(codec_context_); | 171 avcodec_flush_buffers(codec_context_); |
147 ResetAudioTimingData(); | 172 ResetTimestampState(); |
148 } | 173 } |
149 | 174 |
150 // static | 175 // static |
151 bool FFmpegCdmAudioDecoder::IsValidConfig( | 176 bool FFmpegCdmAudioDecoder::IsValidConfig( |
152 const cdm::AudioDecoderConfig& config) { | 177 const cdm::AudioDecoderConfig& config) { |
153 return config.codec != cdm::AudioDecoderConfig::kUnknownAudioCodec && | 178 return config.codec != cdm::AudioDecoderConfig::kUnknownAudioCodec && |
154 config.channel_count > 0 && | 179 config.channel_count > 0 && |
155 config.channel_count <= kMaxChannels && | 180 config.channel_count <= kMaxChannels && |
156 config.bits_per_channel > 0 && | 181 config.bits_per_channel > 0 && |
157 config.bits_per_channel <= media::limits::kMaxBitsPerSample && | 182 config.bits_per_channel <= media::limits::kMaxBitsPerSample && |
158 config.samples_per_second > 0 && | 183 config.samples_per_second > 0 && |
159 config.samples_per_second <= media::limits::kMaxSampleRate; | 184 config.samples_per_second <= media::limits::kMaxSampleRate; |
160 } | 185 } |
161 | 186 |
162 cdm::Status FFmpegCdmAudioDecoder::DecodeBuffer( | 187 cdm::Status FFmpegCdmAudioDecoder::DecodeBuffer( |
163 const uint8_t* compressed_buffer, | 188 const uint8_t* compressed_buffer, |
164 int32_t compressed_buffer_size, | 189 int32_t compressed_buffer_size, |
165 int64_t input_timestamp, | 190 int64_t input_timestamp, |
166 cdm::AudioFrames* decoded_frames) { | 191 cdm::AudioFrames* decoded_frames) { |
167 DVLOG(1) << "DecodeBuffer()"; | 192 DVLOG(1) << "DecodeBuffer()"; |
168 const bool is_end_of_stream = compressed_buffer_size == 0; | 193 const bool is_end_of_stream = compressed_buffer_size == 0; |
169 base::TimeDelta timestamp = | 194 base::TimeDelta timestamp = |
170 base::TimeDelta::FromMicroseconds(input_timestamp); | 195 base::TimeDelta::FromMicroseconds(input_timestamp); |
196 | |
197 bool is_vorbis = codec_context_->codec_id == CODEC_ID_VORBIS; | |
171 if (!is_end_of_stream) { | 198 if (!is_end_of_stream) { |
172 if (last_input_timestamp_ == media::kNoTimestamp()) { | 199 if (last_input_timestamp_ == media::kNoTimestamp()) { |
173 if (codec_context_->codec_id == CODEC_ID_VORBIS && | 200 if (is_vorbis && timestamp < base::TimeDelta()) { |
174 timestamp < base::TimeDelta()) { | |
175 // Dropping frames for negative timestamps as outlined in section A.2 | 201 // Dropping frames for negative timestamps as outlined in section A.2 |
176 // in the Vorbis spec. http://xiph.org/vorbis/doc/Vorbis_I_spec.html | 202 // in the Vorbis spec. http://xiph.org/vorbis/doc/Vorbis_I_spec.html |
177 int frames_to_drop = floor( | 203 int frames_to_drop = floor( |
178 0.5 + -timestamp.InSecondsF() * samples_per_second_); | 204 0.5 + -timestamp.InSecondsF() * samples_per_second_); |
179 output_bytes_to_drop_ = bytes_per_frame_ * frames_to_drop; | 205 output_bytes_to_drop_ = bytes_per_frame_ * frames_to_drop; |
180 } else { | 206 } else { |
181 last_input_timestamp_ = timestamp; | 207 last_input_timestamp_ = timestamp; |
182 } | 208 } |
183 } else if (timestamp != media::kNoTimestamp()) { | 209 } else if (timestamp != media::kNoTimestamp()) { |
184 if (timestamp < last_input_timestamp_) { | 210 if (timestamp < last_input_timestamp_) { |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
223 << compressed_buffer_size << " bytes"; | 249 << compressed_buffer_size << " bytes"; |
224 | 250 |
225 return cdm::kDecodeError; | 251 return cdm::kDecodeError; |
226 } | 252 } |
227 | 253 |
228 // Update packet size and data pointer in case we need to call the decoder | 254 // Update packet size and data pointer in case we need to call the decoder |
229 // with the remaining bytes from this packet. | 255 // with the remaining bytes from this packet. |
230 packet.size -= result; | 256 packet.size -= result; |
231 packet.data += result; | 257 packet.data += result; |
232 | 258 |
233 if (output_timestamp_base_ == media::kNoTimestamp() && !is_end_of_stream) { | 259 if (output_timestamp_helper_->base_timestamp() == media::kNoTimestamp() && |
260 !is_end_of_stream) { | |
234 DCHECK(timestamp != media::kNoTimestamp()); | 261 DCHECK(timestamp != media::kNoTimestamp()); |
235 if (output_bytes_to_drop_ > 0) { | 262 if (output_bytes_to_drop_ > 0) { |
263 // Currently Vorbis is the only codec that causes us to drop samples. | |
236 // If we have to drop samples it always means the timeline starts at 0. | 264 // If we have to drop samples it always means the timeline starts at 0. |
237 output_timestamp_base_ = base::TimeDelta(); | 265 DCHECK_EQ(codec_context_->codec_id, CODEC_ID_VORBIS); |
266 output_timestamp_helper_->SetBaseTimestamp(base::TimeDelta()); | |
238 } else { | 267 } else { |
239 output_timestamp_base_ = timestamp; | 268 output_timestamp_helper_->SetBaseTimestamp(timestamp); |
240 } | 269 } |
241 } | 270 } |
242 | 271 |
243 const uint8_t* decoded_audio_data = NULL; | |
244 int decoded_audio_size = 0; | 272 int decoded_audio_size = 0; |
245 if (frame_decoded) { | 273 if (frame_decoded) { |
246 int output_sample_rate = av_frame_->sample_rate; | 274 int output_sample_rate = av_frame_->sample_rate; |
247 if (output_sample_rate != samples_per_second_) { | 275 if (output_sample_rate != samples_per_second_) { |
248 DLOG(ERROR) << "Output sample rate (" << output_sample_rate | 276 DLOG(ERROR) << "Output sample rate (" << output_sample_rate |
249 << ") doesn't match expected rate " << samples_per_second_; | 277 << ") doesn't match expected rate " << samples_per_second_; |
250 return cdm::kDecodeError; | 278 return cdm::kDecodeError; |
251 } | 279 } |
252 | 280 |
253 decoded_audio_data = av_frame_->data[0]; | 281 decoded_audio_size = av_samples_get_buffer_size( |
254 decoded_audio_size = | 282 NULL, codec_context_->channels, av_frame_->nb_samples, |
255 av_samples_get_buffer_size(NULL, | 283 codec_context_->sample_fmt, 1); |
256 codec_context_->channels, | 284 // If we're decoding into float, adjust audio size. |
257 av_frame_->nb_samples, | 285 if (converter_bus_ && bits_per_channel_ / 8 != sizeof(float)) { |
258 codec_context_->sample_fmt, | 286 DCHECK(codec_context_->sample_fmt == AV_SAMPLE_FMT_FLT || |
259 1); | 287 codec_context_->sample_fmt == AV_SAMPLE_FMT_FLTP); |
288 decoded_audio_size *= | |
289 static_cast<float>(bits_per_channel_ / 8) / sizeof(float); | |
290 } | |
260 } | 291 } |
261 | 292 |
293 int start_sample = 0; | |
262 if (decoded_audio_size > 0 && output_bytes_to_drop_ > 0) { | 294 if (decoded_audio_size > 0 && output_bytes_to_drop_ > 0) { |
295 DCHECK_EQ(decoded_audio_size % bytes_per_frame_, 0) | |
296 << "Decoder didn't output full frames"; | |
297 | |
263 int dropped_size = std::min(decoded_audio_size, output_bytes_to_drop_); | 298 int dropped_size = std::min(decoded_audio_size, output_bytes_to_drop_); |
264 decoded_audio_data += dropped_size; | 299 start_sample = dropped_size / bytes_per_frame_; |
265 decoded_audio_size -= dropped_size; | 300 decoded_audio_size -= dropped_size; |
266 output_bytes_to_drop_ -= dropped_size; | 301 output_bytes_to_drop_ -= dropped_size; |
267 } | 302 } |
268 | 303 |
304 scoped_refptr<media::DataBuffer> output; | |
269 if (decoded_audio_size > 0) { | 305 if (decoded_audio_size > 0) { |
270 DCHECK_EQ(decoded_audio_size % bytes_per_frame_, 0) | 306 DCHECK_EQ(decoded_audio_size % bytes_per_frame_, 0) |
271 << "Decoder didn't output full frames"; | 307 << "Decoder didn't output full frames"; |
272 | 308 |
273 base::TimeDelta output_timestamp = GetNextOutputTimestamp(); | 309 // Convert float data using an AudioBus. |
274 total_frames_decoded_ += decoded_audio_size / bytes_per_frame_; | 310 if (converter_bus_) { |
311 // Setup the AudioBus as a wrapper of the AVFrame data and then use | |
312 // AudioBus::ToInterleaved() to convert the data as necessary. | |
313 int skip_frames = start_sample; | |
314 int total_frames = av_frame_->nb_samples - start_sample; | |
315 if (codec_context_->sample_fmt == AV_SAMPLE_FMT_FLT) { | |
316 DCHECK_EQ(converter_bus_->channels(), 1); | |
317 total_frames *= codec_context_->channels; | |
318 skip_frames *= codec_context_->channels; | |
319 } | |
320 converter_bus_->set_frames(total_frames); | |
321 DCHECK_EQ(decoded_audio_size, | |
322 converter_bus_->frames() * bytes_per_frame_); | |
323 | |
324 for (int i = 0; i < converter_bus_->channels(); ++i) { | |
325 converter_bus_->SetChannelData(i, reinterpret_cast<float*>( | |
326 av_frame_->extended_data[i]) + skip_frames); | |
327 } | |
328 | |
329 output = new media::DataBuffer(decoded_audio_size); | |
330 output->SetDataSize(decoded_audio_size); | |
331 converter_bus_->ToInterleaved( | |
332 converter_bus_->frames(), bits_per_channel_ / 8, | |
333 output->GetWritableData()); | |
334 } else { | |
335 output = new media::DataBuffer( | |
336 av_frame_->extended_data[0] + start_sample * bytes_per_frame_, | |
337 decoded_audio_size); | |
338 } | |
339 | |
340 base::TimeDelta output_timestamp = | |
341 output_timestamp_helper_->GetTimestamp(); | |
342 output_timestamp_helper_->AddBytes(decoded_audio_size); | |
275 | 343 |
276 // Serialize the audio samples into |serialized_audio_frames_|. | 344 // Serialize the audio samples into |serialized_audio_frames_|. |
277 SerializeInt64(output_timestamp.InMicroseconds()); | 345 SerializeInt64(output_timestamp.InMicroseconds()); |
278 SerializeInt64(decoded_audio_size); | 346 SerializeInt64(output->GetDataSize()); |
279 serialized_audio_frames_.insert(serialized_audio_frames_.end(), | 347 serialized_audio_frames_.insert( |
280 decoded_audio_data, | 348 serialized_audio_frames_.end(), |
281 decoded_audio_data + decoded_audio_size); | 349 output->GetData(), |
350 output->GetData() + output->GetDataSize()); | |
282 } | 351 } |
283 } while (packet.size > 0); | 352 } while (packet.size > 0); |
284 | 353 |
285 if (!serialized_audio_frames_.empty()) { | 354 if (!serialized_audio_frames_.empty()) { |
286 decoded_frames->SetFrameBuffer( | 355 decoded_frames->SetFrameBuffer( |
287 allocator_->Allocate(serialized_audio_frames_.size())); | 356 allocator_->Allocate(serialized_audio_frames_.size())); |
288 if (!decoded_frames->FrameBuffer()) { | 357 if (!decoded_frames->FrameBuffer()) { |
289 LOG(ERROR) << "DecodeBuffer() cdm::Allocator::Allocate failed."; | 358 LOG(ERROR) << "DecodeBuffer() cdm::Allocator::Allocate failed."; |
290 return cdm::kDecodeError; | 359 return cdm::kDecodeError; |
291 } | 360 } |
292 memcpy(decoded_frames->FrameBuffer()->Data(), | 361 memcpy(decoded_frames->FrameBuffer()->Data(), |
293 &serialized_audio_frames_[0], | 362 &serialized_audio_frames_[0], |
294 serialized_audio_frames_.size()); | 363 serialized_audio_frames_.size()); |
295 decoded_frames->FrameBuffer()->SetSize(serialized_audio_frames_.size()); | 364 decoded_frames->FrameBuffer()->SetSize(serialized_audio_frames_.size()); |
296 serialized_audio_frames_.clear(); | 365 serialized_audio_frames_.clear(); |
297 | 366 |
298 return cdm::kSuccess; | 367 return cdm::kSuccess; |
299 } | 368 } |
300 | 369 |
301 return cdm::kNeedMoreData; | 370 return cdm::kNeedMoreData; |
302 } | 371 } |
303 | 372 |
304 void FFmpegCdmAudioDecoder::ResetAudioTimingData() { | 373 void FFmpegCdmAudioDecoder::ResetTimestampState() { |
305 output_timestamp_base_ = media::kNoTimestamp(); | 374 output_timestamp_helper_->SetBaseTimestamp(media::kNoTimestamp()); |
306 total_frames_decoded_ = 0; | |
307 last_input_timestamp_ = media::kNoTimestamp(); | 375 last_input_timestamp_ = media::kNoTimestamp(); |
308 output_bytes_to_drop_ = 0; | 376 output_bytes_to_drop_ = 0; |
309 } | 377 } |
310 | 378 |
311 void FFmpegCdmAudioDecoder::ReleaseFFmpegResources() { | 379 void FFmpegCdmAudioDecoder::ReleaseFFmpegResources() { |
312 DVLOG(1) << "ReleaseFFmpegResources()"; | 380 DVLOG(1) << "ReleaseFFmpegResources()"; |
313 | 381 |
314 if (codec_context_) { | 382 if (codec_context_) { |
315 av_free(codec_context_->extradata); | 383 av_free(codec_context_->extradata); |
316 avcodec_close(codec_context_); | 384 avcodec_close(codec_context_); |
317 av_free(codec_context_); | 385 av_free(codec_context_); |
318 codec_context_ = NULL; | 386 codec_context_ = NULL; |
319 } | 387 } |
320 if (av_frame_) { | 388 if (av_frame_) { |
321 av_free(av_frame_); | 389 av_free(av_frame_); |
322 av_frame_ = NULL; | 390 av_frame_ = NULL; |
323 } | 391 } |
324 } | 392 } |
325 | 393 |
326 base::TimeDelta FFmpegCdmAudioDecoder::GetNextOutputTimestamp() const { | |
327 DCHECK(output_timestamp_base_ != media::kNoTimestamp()); | |
328 const double total_frames_decoded = total_frames_decoded_; | |
329 const double decoded_us = (total_frames_decoded / samples_per_second_) * | |
330 base::Time::kMicrosecondsPerSecond; | |
331 return output_timestamp_base_ + | |
332 base::TimeDelta::FromMicroseconds(decoded_us); | |
333 } | |
334 | |
335 void FFmpegCdmAudioDecoder::SerializeInt64(int64 value) { | 394 void FFmpegCdmAudioDecoder::SerializeInt64(int64 value) { |
336 int previous_size = serialized_audio_frames_.size(); | 395 int previous_size = serialized_audio_frames_.size(); |
337 serialized_audio_frames_.resize(previous_size + sizeof(value)); | 396 serialized_audio_frames_.resize(previous_size + sizeof(value)); |
338 memcpy(&serialized_audio_frames_[0] + previous_size, &value, sizeof(value)); | 397 memcpy(&serialized_audio_frames_[0] + previous_size, &value, sizeof(value)); |
339 } | 398 } |
340 | 399 |
341 } // namespace webkit_media | 400 } // namespace webkit_media |
OLD | NEW |