OLD | NEW |
---|---|
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/filters/ffmpeg_audio_decoder.h" | 5 #include "media/filters/ffmpeg_audio_decoder.h" |
6 | 6 |
7 #include "base/bind.h" | |
7 #include "media/base/data_buffer.h" | 8 #include "media/base/data_buffer.h" |
9 #include "media/base/demuxer.h" | |
10 #include "media/base/filter_host.h" | |
8 #include "media/base/limits.h" | 11 #include "media/base/limits.h" |
9 #include "media/ffmpeg/ffmpeg_common.h" | 12 #include "media/ffmpeg/ffmpeg_common.h" |
10 #include "media/filters/ffmpeg_demuxer.h" | |
11 | |
12 #if !defined(USE_SSE) | |
13 #if defined(__SSE__) || defined(ARCH_CPU_X86_64) || _M_IX86_FP==1 | |
14 #define USE_SSE 1 | |
15 #else | |
16 #define USE_SSE 0 | |
17 #endif | |
18 #endif | |
19 #if USE_SSE | |
20 #include <xmmintrin.h> | |
21 #endif | |
22 | 13 |
23 namespace media { | 14 namespace media { |
24 | 15 |
25 // Size of the decoded audio buffer. | 16 // Returns true if the decode result was an error. |
26 const size_t FFmpegAudioDecoder::kOutputBufferSize = | 17 static bool IsErrorResult(int result, int decoded_size) { |
27 AVCODEC_MAX_AUDIO_FRAME_SIZE; | 18 return result < 0 || |
19 decoded_size < 0 || | |
20 decoded_size > AVCODEC_MAX_AUDIO_FRAME_SIZE; | |
21 } | |
22 | |
23 // Returns true if the decode result produced audio samples. | |
24 static bool ProducedAudioSamples(int decoded_size) { | |
25 return decoded_size > 0; | |
26 } | |
27 | |
28 // Returns true if the decode result was a timestamp packet and not actual audio | |
29 // data. | |
30 static bool IsTimestampMarkerPacket(int result, Buffer* input) { | |
31 // We can get a positive result but no decoded data. This is ok because this | |
32 // this can be a marker packet that only contains timestamp. | |
33 return result > 0 && !input->IsEndOfStream() && | |
34 input->GetTimestamp() != kNoTimestamp && | |
35 input->GetDuration() != kNoTimestamp; | |
36 } | |
37 | |
38 // Returns true if the decode result was end of stream. | |
39 static bool IsEndOfStream(int result, int decoded_size, Buffer* input) { | |
40 // Three conditions to meet to declare end of stream for this decoder: | |
41 // 1. FFmpeg didn't read anything. | |
42 // 2. FFmpeg didn't output anything. | |
43 // 3. An end of stream buffer is received. | |
44 return result == 0 && decoded_size == 0 && input->IsEndOfStream(); | |
45 } | |
46 | |
28 | 47 |
29 FFmpegAudioDecoder::FFmpegAudioDecoder(MessageLoop* message_loop) | 48 FFmpegAudioDecoder::FFmpegAudioDecoder(MessageLoop* message_loop) |
30 : DecoderBase<AudioDecoder, Buffer>(message_loop), | 49 : message_loop_(message_loop), |
31 codec_context_(NULL), | 50 codec_context_(NULL), |
32 config_(0, CHANNEL_LAYOUT_NONE, 0), | 51 config_(0, CHANNEL_LAYOUT_NONE, 0), |
33 estimated_next_timestamp_(kNoTimestamp) { | 52 estimated_next_timestamp_(kNoTimestamp), |
53 decoded_audio_size_(AVCODEC_MAX_AUDIO_FRAME_SIZE), | |
54 decoded_audio_(static_cast<uint8*>(av_malloc(decoded_audio_size_))), | |
55 pending_reads_(0) { | |
34 } | 56 } |
35 | 57 |
36 FFmpegAudioDecoder::~FFmpegAudioDecoder() { | 58 FFmpegAudioDecoder::~FFmpegAudioDecoder() { |
59 av_free(decoded_audio_); | |
37 } | 60 } |
38 | 61 |
39 void FFmpegAudioDecoder::DoInitialize(DemuxerStream* demuxer_stream, | 62 void FFmpegAudioDecoder::Flush(FilterCallback* callback) { |
40 bool* success, | 63 message_loop_->PostTask( |
41 Task* done_cb) { | 64 FROM_HERE, |
42 base::ScopedTaskRunner done_runner(done_cb); | 65 NewRunnableMethod(this, &FFmpegAudioDecoder::DoFlush, callback)); |
43 *success = false; | 66 } |
44 | 67 |
45 AVStream* av_stream = demuxer_stream->GetAVStream(); | 68 void FFmpegAudioDecoder::Initialize( |
46 if (!av_stream) { | 69 DemuxerStream* stream, |
47 return; | 70 FilterCallback* callback, |
48 } | 71 StatisticsCallback* stats_callback) { |
72 // TODO(scherkus): change Initialize() signature to pass |stream| as a | |
73 // scoped_refptr<>. | |
acolwell GONE FROM CHROMIUM
2011/09/16 19:04:37
const scoped_refptr<>&
| |
74 scoped_refptr<DemuxerStream> ref_stream(stream); | |
75 message_loop_->PostTask( | |
76 FROM_HERE, | |
77 NewRunnableMethod(this, &FFmpegAudioDecoder::DoInitialize, | |
78 ref_stream, callback, stats_callback)); | |
79 } | |
80 | |
81 AudioDecoderConfig FFmpegAudioDecoder::config() { | |
82 return config_; | |
83 } | |
84 | |
85 void FFmpegAudioDecoder::ProduceAudioSamples(scoped_refptr<Buffer> buffer) { | |
86 message_loop_->PostTask( | |
87 FROM_HERE, | |
88 NewRunnableMethod(this, &FFmpegAudioDecoder::DoProduceAudioSamples, | |
89 buffer)); | |
90 } | |
91 | |
92 void FFmpegAudioDecoder::DoInitialize( | |
93 scoped_refptr<DemuxerStream> stream, | |
acolwell GONE FROM CHROMIUM
2011/09/16 19:04:37
const&
| |
94 FilterCallback* callback, | |
95 StatisticsCallback* stats_callback) { | |
96 scoped_ptr<FilterCallback> c(callback); | |
97 | |
98 demuxer_stream_ = stream; | |
99 AVStream* av_stream = demuxer_stream_->GetAVStream(); | |
100 CHECK(av_stream); | |
101 | |
102 stats_callback_.reset(stats_callback); | |
49 | 103 |
50 // Grab the AVStream's codec context and make sure we have sensible values. | 104 // Grab the AVStream's codec context and make sure we have sensible values. |
51 codec_context_ = av_stream->codec; | 105 codec_context_ = av_stream->codec; |
52 int bps = av_get_bits_per_sample_fmt(codec_context_->sample_fmt); | 106 int bps = av_get_bits_per_sample_fmt(codec_context_->sample_fmt); |
53 if (codec_context_->channels <= 0 || | 107 if (codec_context_->channels <= 0 || |
54 codec_context_->channels > Limits::kMaxChannels || | 108 codec_context_->channels > Limits::kMaxChannels || |
55 (codec_context_->channel_layout == 0 && codec_context_->channels > 2) || | 109 (codec_context_->channel_layout == 0 && codec_context_->channels > 2) || |
56 bps <= 0 || bps > Limits::kMaxBitsPerSample || | 110 bps <= 0 || bps > Limits::kMaxBitsPerSample || |
57 codec_context_->sample_rate <= 0 || | 111 codec_context_->sample_rate <= 0 || |
58 codec_context_->sample_rate > Limits::kMaxSampleRate) { | 112 codec_context_->sample_rate > Limits::kMaxSampleRate) { |
59 DLOG(WARNING) << "Invalid audio stream -" | 113 DLOG(ERROR) << "Invalid audio stream -" |
60 << " channels: " << codec_context_->channels | 114 << " channels: " << codec_context_->channels |
61 << " channel layout:" << codec_context_->channel_layout | 115 << " channel layout:" << codec_context_->channel_layout |
62 << " bps: " << bps | 116 << " bps: " << bps |
63 << " sample rate: " << codec_context_->sample_rate; | 117 << " sample rate: " << codec_context_->sample_rate; |
118 | |
119 host()->SetError(PIPELINE_ERROR_DECODE); | |
120 callback->Run(); | |
64 return; | 121 return; |
65 } | 122 } |
66 | 123 |
67 // Serialize calls to avcodec_open(). | 124 // Serialize calls to avcodec_open(). |
68 AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id); | 125 AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id); |
69 if (!codec || avcodec_open(codec_context_, codec) < 0) { | 126 if (!codec || avcodec_open(codec_context_, codec) < 0) { |
127 DLOG(ERROR) << "Could not initialize audio decoder: " | |
128 << codec_context_->codec_id; | |
129 | |
130 host()->SetError(PIPELINE_ERROR_DECODE); | |
131 callback->Run(); | |
70 return; | 132 return; |
71 } | 133 } |
72 | 134 |
135 // Success! | |
73 config_.bits_per_channel = | 136 config_.bits_per_channel = |
74 av_get_bits_per_sample_fmt(codec_context_->sample_fmt); | 137 av_get_bits_per_sample_fmt(codec_context_->sample_fmt); |
75 config_.channel_layout = | 138 config_.channel_layout = |
76 ChannelLayoutToChromeChannelLayout(codec_context_->channel_layout, | 139 ChannelLayoutToChromeChannelLayout(codec_context_->channel_layout, |
77 codec_context_->channels); | 140 codec_context_->channels); |
78 config_.sample_rate = codec_context_->sample_rate; | 141 config_.sample_rate = codec_context_->sample_rate; |
79 | 142 |
80 // Prepare the output buffer. | 143 callback->Run(); |
81 output_buffer_.reset(static_cast<uint8*>(av_malloc(kOutputBufferSize))); | |
82 if (!output_buffer_.get()) { | |
83 host()->SetError(PIPELINE_ERROR_OUT_OF_MEMORY); | |
84 return; | |
85 } | |
86 *success = true; | |
87 } | 144 } |
88 | 145 |
89 AudioDecoderConfig FFmpegAudioDecoder::config() { | 146 void FFmpegAudioDecoder::DoFlush(FilterCallback* callback) { |
90 return config_; | 147 avcodec_flush_buffers(codec_context_); |
148 estimated_next_timestamp_ = kNoTimestamp; | |
149 | |
150 callback->Run(); | |
151 delete callback; | |
91 } | 152 } |
92 | 153 |
93 void FFmpegAudioDecoder::ProduceAudioSamples(scoped_refptr<Buffer> output) { | 154 void FFmpegAudioDecoder::DoProduceAudioSamples(scoped_refptr<Buffer> output) { |
acolwell GONE FROM CHROMIUM
2011/09/16 19:04:37
const&
scherkus (not reviewing)
2011/09/16 20:39:40
Done.
| |
94 DecoderBase<AudioDecoder, Buffer>::PostReadTaskHack(output); | 155 output_buffers_.push_back(output); |
156 ReadFromDemuxerStream(); | |
95 } | 157 } |
96 | 158 |
97 void FFmpegAudioDecoder::DoSeek(base::TimeDelta time, Task* done_cb) { | 159 void FFmpegAudioDecoder::DoDecodeBuffer(const scoped_refptr<Buffer>& input) { |
98 avcodec_flush_buffers(codec_context_); | 160 DCHECK(!output_buffers_.empty()); |
99 estimated_next_timestamp_ = kNoTimestamp; | 161 DCHECK_GT(pending_reads_, 0); |
100 done_cb->Run(); | 162 pending_reads_--; |
101 delete done_cb; | |
102 } | |
103 | |
104 // ConvertAudioF32ToS32() converts float audio (F32) to int (S32) in place. | |
105 // This is a temporary solution. | |
106 // The purpose of this short term fix is to enable WMApro, which decodes to | |
107 // float. | |
108 // The audio driver has been tested by passing the float audio thru. | |
109 // FFmpeg for ChromeOS only exposes U8, S16 and F32. | |
110 // To properly expose new audio sample types at the audio driver layer, a enum | |
111 // should be created to represent all suppported types, including types | |
112 // for Pepper. FFmpeg should be queried for type and passed along. | |
113 | |
114 // TODO(fbarchard): Remove this function. Expose all FFmpeg types to driver. | |
115 // TODO(fbarchard): If this function is kept, move it to audio_util.cc | |
116 | |
117 #if USE_SSE | |
118 const __m128 kFloatScaler = _mm_set1_ps( 2147483648.0f ); | |
119 static void FloatToIntSaturate(float* p) { | |
120 __m128 a = _mm_set1_ps(*p); | |
121 a = _mm_mul_ss(a, kFloatScaler); | |
122 *reinterpret_cast<int32*>(p) = _mm_cvtss_si32(a); | |
123 } | |
124 #else | |
125 const float kFloatScaler = 2147483648.0f; | |
126 const int kMinSample = std::numeric_limits<int32>::min(); | |
127 const int kMaxSample = std::numeric_limits<int32>::max(); | |
128 const float kMinSampleFloat = | |
129 static_cast<float>(std::numeric_limits<int32>::min()); | |
130 const float kMaxSampleFloat = | |
131 static_cast<float>(std::numeric_limits<int32>::max()); | |
132 static void FloatToIntSaturate(float* p) { | |
133 float f = *p * kFloatScaler + 0.5f; | |
134 int sample; | |
135 if (f <= kMinSampleFloat) { | |
136 sample = kMinSample; | |
137 } else if (f >= kMaxSampleFloat) { | |
138 sample = kMaxSample; | |
139 } else { | |
140 sample = static_cast<int32>(f); | |
141 } | |
142 *reinterpret_cast<int32*>(p) = sample; | |
143 } | |
144 #endif | |
145 static void ConvertAudioF32ToS32(void* buffer, int buffer_size) { | |
146 for (int i = 0; i < buffer_size / 4; ++i) { | |
147 FloatToIntSaturate(reinterpret_cast<float*>(buffer) + i); | |
148 } | |
149 } | |
150 | |
151 void FFmpegAudioDecoder::DoDecode(Buffer* input) { | |
152 PipelineStatistics statistics; | |
153 | 163 |
154 // FFmpeg tends to seek Ogg audio streams in the middle of nowhere, giving us | 164 // FFmpeg tends to seek Ogg audio streams in the middle of nowhere, giving us |
155 // a whole bunch of AV_NOPTS_VALUE packets. Discard them until we find | 165 // a whole bunch of AV_NOPTS_VALUE packets. Discard them until we find |
156 // something valid. Refer to http://crbug.com/49709 | 166 // something valid. Refer to http://crbug.com/49709 |
157 // TODO(hclam): remove this once fixing the issue in FFmpeg. | |
158 if (input->GetTimestamp() == kNoTimestamp && | 167 if (input->GetTimestamp() == kNoTimestamp && |
159 estimated_next_timestamp_ == kNoTimestamp && | 168 estimated_next_timestamp_ == kNoTimestamp && |
160 !input->IsEndOfStream()) { | 169 !input->IsEndOfStream()) { |
161 DecoderBase<AudioDecoder, Buffer>::OnDecodeComplete(statistics); | 170 ReadFromDemuxerStream(); |
162 return; | 171 return; |
163 } | 172 } |
164 | 173 |
165 // Due to FFmpeg API changes we no longer have const read-only pointers. | |
166 AVPacket packet; | 174 AVPacket packet; |
167 av_init_packet(&packet); | 175 av_init_packet(&packet); |
168 packet.data = const_cast<uint8*>(input->GetData()); | 176 if (input->IsEndOfStream()) { |
169 packet.size = input->GetDataSize(); | 177 packet.data = NULL; |
178 packet.size = 0; | |
179 } else { | |
180 packet.data = const_cast<uint8*>(input->GetData()); | |
181 packet.size = input->GetDataSize(); | |
182 } | |
170 | 183 |
184 PipelineStatistics statistics; | |
171 statistics.audio_bytes_decoded = input->GetDataSize(); | 185 statistics.audio_bytes_decoded = input->GetDataSize(); |
172 | 186 |
173 int16_t* output_buffer = reinterpret_cast<int16_t*>(output_buffer_.get()); | 187 int decoded_audio_size = decoded_audio_size_; |
174 int output_buffer_size = kOutputBufferSize; | 188 int result = avcodec_decode_audio3( |
175 int result = avcodec_decode_audio3(codec_context_, | 189 codec_context_, reinterpret_cast<int16_t*>(decoded_audio_), |
176 output_buffer, | 190 &decoded_audio_size, &packet); |
177 &output_buffer_size, | |
178 &packet); | |
179 | 191 |
180 if (codec_context_->sample_fmt == SAMPLE_FMT_FLT) { | 192 if (IsErrorResult(result, decoded_audio_size)) { |
181 ConvertAudioF32ToS32(output_buffer, output_buffer_size); | 193 DCHECK(!input->IsEndOfStream()) |
182 } | 194 << "End of stream buffer produced an error! " |
195 << "This is quite possibly a bug in the audio decoder not handling " | |
196 << "end of stream AVPackets correctly."; | |
183 | 197 |
184 // TODO(ajwong): Consider if kOutputBufferSize should just be an int instead | 198 DLOG(ERROR) << "Error decoding an audio frame with timestamp: " |
185 // of a size_t. | 199 << input->GetTimestamp().InMicroseconds() << " us, duration: " |
186 if (result < 0 || | 200 << input->GetDuration().InMicroseconds() << " us, packet size: " |
187 output_buffer_size < 0 || | 201 << input->GetDataSize() << " bytes"; |
188 static_cast<size_t>(output_buffer_size) > kOutputBufferSize) { | 202 |
189 VLOG(1) << "Error decoding an audio frame with timestamp: " | 203 ReadFromDemuxerStream(); |
190 << input->GetTimestamp().InMicroseconds() << " us, duration: " | |
191 << input->GetDuration().InMicroseconds() << " us, packet size: " | |
192 << input->GetDataSize() << " bytes"; | |
193 DecoderBase<AudioDecoder, Buffer>::OnDecodeComplete(statistics); | |
194 return; | 204 return; |
195 } | 205 } |
196 | 206 |
197 // If we have decoded something, enqueue the result. | 207 scoped_refptr<DataBuffer> output; |
198 if (output_buffer_size) { | |
199 DataBuffer* result_buffer = new DataBuffer(output_buffer_size); | |
200 result_buffer->SetDataSize(output_buffer_size); | |
201 uint8* data = result_buffer->GetWritableData(); | |
202 memcpy(data, output_buffer, output_buffer_size); | |
203 | 208 |
204 // We don't trust the demuxer, so always calculate the duration based on | 209 if (ProducedAudioSamples(decoded_audio_size)) { |
205 // the actual number of samples decoded. | 210 // Copy the audio samples into an output buffer. |
206 base::TimeDelta duration = CalculateDuration(output_buffer_size); | 211 output = new DataBuffer(decoded_audio_size); |
207 result_buffer->SetDuration(duration); | 212 output->SetDataSize(decoded_audio_size); |
213 uint8* data = output->GetWritableData(); | |
214 memcpy(data, decoded_audio_, decoded_audio_size); | |
208 | 215 |
209 // Use an estimated timestamp unless the incoming buffer has a valid one. | 216 UpdateDurationAndTimestamp(input, output); |
210 if (input->GetTimestamp() == kNoTimestamp) { | 217 } else if (IsTimestampMarkerPacket(result, input)) { |
211 result_buffer->SetTimestamp(estimated_next_timestamp_); | 218 // Nothing else to do here but update our estimation. |
219 estimated_next_timestamp_ = input->GetTimestamp() + input->GetDuration(); | |
220 } else if (IsEndOfStream(result, decoded_audio_size, input)) { | |
221 // Create an end of stream output buffer. | |
222 output = new DataBuffer(0); | |
223 output->SetTimestamp(input->GetTimestamp()); | |
224 output->SetDuration(input->GetDuration()); | |
225 } | |
212 | 226 |
213 // Keep the estimated timestamp invalid until we get an incoming buffer | 227 // Decoding finished successfully, update stats and execute callback. |
214 // with a valid timestamp. This can happen during seeks, etc... | 228 stats_callback_->Run(statistics); |
215 if (estimated_next_timestamp_ != kNoTimestamp) { | 229 if (output) { |
216 estimated_next_timestamp_ += duration; | 230 DCHECK_GT(output_buffers_.size(), 0u); |
217 } | 231 output_buffers_.pop_front(); |
218 } else { | |
219 result_buffer->SetTimestamp(input->GetTimestamp()); | |
220 estimated_next_timestamp_ = input->GetTimestamp() + duration; | |
221 } | |
222 | 232 |
223 EnqueueResult(result_buffer); | 233 ConsumeAudioSamples(output); |
224 DecoderBase<AudioDecoder, Buffer>::OnDecodeComplete(statistics); | 234 } else { |
235 ReadFromDemuxerStream(); | |
236 } | |
237 } | |
238 | |
239 void FFmpegAudioDecoder::ReadFromDemuxerStream() { | |
240 DCHECK(!output_buffers_.empty()) | |
241 << "Reads should only occur if there are output buffers."; | |
242 | |
243 pending_reads_++; | |
244 demuxer_stream_->Read(base::Bind(&FFmpegAudioDecoder::DecodeBuffer, this)); | |
245 } | |
246 | |
247 void FFmpegAudioDecoder::DecodeBuffer(Buffer* buffer) { | |
248 // TODO(scherkus): change DemuxerStream::Read() to use scoped_refptr<> for | |
249 // callback. | |
250 scoped_refptr<Buffer> ref_buffer(buffer); | |
251 message_loop_->PostTask( | |
252 FROM_HERE, | |
253 NewRunnableMethod(this, &FFmpegAudioDecoder::DoDecodeBuffer, ref_buffer)); | |
254 } | |
255 | |
256 void FFmpegAudioDecoder::UpdateDurationAndTimestamp( | |
257 const Buffer* input, | |
258 DataBuffer* output) { | |
259 // Always calculate duration based on the actual number of samples decoded. | |
260 base::TimeDelta duration = CalculateDuration(output->GetDataSize()); | |
261 output->SetDuration(duration); | |
262 | |
263 // Use the incoming timestamp if it's valid. | |
264 if (input->GetTimestamp() != kNoTimestamp) { | |
265 output->SetTimestamp(input->GetTimestamp()); | |
266 estimated_next_timestamp_ = input->GetTimestamp() + duration; | |
225 return; | 267 return; |
226 } | 268 } |
227 | 269 |
228 // We can get a positive result but no decoded data. This is ok because this | 270 // Otherwise use an estimated timestamp and attempt to update the estimation |
229 // this can be a marker packet that only contains timestamp. In this case we | 271 // as long as it's valid. |
230 // save the timestamp for later use. | 272 output->SetTimestamp(estimated_next_timestamp_); |
231 if (result && !input->IsEndOfStream() && | 273 if (estimated_next_timestamp_ != kNoTimestamp) { |
232 input->GetTimestamp() != kNoTimestamp && | 274 estimated_next_timestamp_ += duration; |
233 input->GetDuration() != kNoTimestamp) { | |
234 estimated_next_timestamp_ = input->GetTimestamp() + input->GetDuration(); | |
235 DecoderBase<AudioDecoder, Buffer>::OnDecodeComplete(statistics); | |
236 return; | |
237 } | 275 } |
238 | |
239 // Three conditions to meet to declare end of stream for this decoder: | |
240 // 1. FFmpeg didn't read anything. | |
241 // 2. FFmpeg didn't output anything. | |
242 // 3. An end of stream buffer is received. | |
243 if (result == 0 && output_buffer_size == 0 && input->IsEndOfStream()) { | |
244 DataBuffer* result_buffer = new DataBuffer(0); | |
245 result_buffer->SetTimestamp(input->GetTimestamp()); | |
246 result_buffer->SetDuration(input->GetDuration()); | |
247 EnqueueResult(result_buffer); | |
248 } | |
249 DecoderBase<AudioDecoder, Buffer>::OnDecodeComplete(statistics); | |
250 } | 276 } |
251 | 277 |
252 base::TimeDelta FFmpegAudioDecoder::CalculateDuration(size_t size) { | 278 base::TimeDelta FFmpegAudioDecoder::CalculateDuration(int size) { |
253 int64 denominator = codec_context_->channels * | 279 int64 denominator = ChannelLayoutToChannelCount(config_.channel_layout) * |
254 av_get_bits_per_sample_fmt(codec_context_->sample_fmt) / 8 * | 280 config_.bits_per_channel / 8 * config_.sample_rate; |
255 codec_context_->sample_rate; | |
256 double microseconds = size / | 281 double microseconds = size / |
257 (denominator / static_cast<double>(base::Time::kMicrosecondsPerSecond)); | 282 (denominator / static_cast<double>(base::Time::kMicrosecondsPerSecond)); |
258 return base::TimeDelta::FromMicroseconds(static_cast<int64>(microseconds)); | 283 return base::TimeDelta::FromMicroseconds(static_cast<int64>(microseconds)); |
259 } | 284 } |
260 | 285 |
261 } // namespace | 286 } // namespace media |
OLD | NEW |