Index: media/filters/ffmpeg_audio_decoder.cc |
diff --git a/media/filters/ffmpeg_audio_decoder.cc b/media/filters/ffmpeg_audio_decoder.cc |
index 807f1d16002f43da1b0771c7023dc0f3f8339a89..367ba095f3bf246b8ecd0107642111c9518d8d6b 100644 |
--- a/media/filters/ffmpeg_audio_decoder.cc |
+++ b/media/filters/ffmpeg_audio_decoder.cc |
@@ -4,68 +4,149 @@ |
#include "media/filters/ffmpeg_audio_decoder.h" |
-#include "media/base/callback.h" |
-#include "media/base/data_buffer.h" |
+#include "media/base/filter_host.h" |
#include "media/base/limits.h" |
#include "media/ffmpeg/ffmpeg_common.h" |
-#include "media/filters/ffmpeg_demuxer.h" |
- |
-#if !defined(USE_SSE) |
-#if defined(__SSE__) || defined(ARCH_CPU_X86_64) || _M_IX86_FP==1 |
-#define USE_SSE 1 |
-#else |
-#define USE_SSE 0 |
-#endif |
-#endif |
-#if USE_SSE |
-#include <xmmintrin.h> |
-#endif |
namespace media { |
-// Size of the decoded audio buffer. |
-const size_t FFmpegAudioDecoder::kOutputBufferSize = |
- AVCODEC_MAX_AUDIO_FRAME_SIZE; |
+static bool IsErrorResult(int result, int decoded_size) { |
+ return result < 0 || |
+ decoded_size < 0 || |
+ decoded_size > AVCODEC_MAX_AUDIO_FRAME_SIZE; |
+} |
+ |
+static bool ProducedAudioSamples(int decoded_size) { |
+ return decoded_size != 0; |
acolwell GONE FROM CHROMIUM
2011/05/13 19:35:37
do we want this to return true if decoded_size < 0
scherkus (not reviewing)
2011/09/11 14:49:22
Hmm... yeah these helper methods are a bit weird s
|
+} |
+ |
+static bool IsTimestampMarkerPacket(int result, Buffer* input) { |
+ // We can get a positive result but no decoded data. This is ok because this |
+ // this can be a marker packet that only contains timestamp. |
+ return result && !input->IsEndOfStream() && |
acolwell GONE FROM CHROMIUM
2011/05/13 19:35:37
It's ok if result < 0? Perhaps a comment indicatin
scherkus (not reviewing)
2011/09/11 14:49:22
Done.
|
+ input->GetTimestamp() != kNoTimestamp && |
+ input->GetDuration() != kNoTimestamp; |
+} |
+ |
+static bool IsEndOfStream(int result, int decoded_size, Buffer* input) { |
+ // Three conditions to meet to declare end of stream for this decoder: |
+ // 1. FFmpeg didn't read anything. |
+ // 2. FFmpeg didn't output anything. |
+ // 3. An end of stream buffer is received. |
+ return result == 0 && decoded_size == 0 && input->IsEndOfStream(); |
acolwell GONE FROM CHROMIUM
2011/05/13 19:35:37
result -> read_size?
|
+} |
FFmpegAudioDecoder::FFmpegAudioDecoder(MessageLoop* message_loop) |
- : DecoderBase<AudioDecoder, Buffer>(message_loop), |
+ : message_loop_(message_loop), |
+ demuxer_stream_(NULL), |
codec_context_(NULL), |
config_(0, 0, 0), |
- estimated_next_timestamp_(kNoTimestamp) { |
+ estimated_next_timestamp_(kNoTimestamp), |
+ pending_reads_(0) { |
} |
FFmpegAudioDecoder::~FFmpegAudioDecoder() { |
+ if (codec_context_) { |
acolwell GONE FROM CHROMIUM
2011/05/13 19:35:37
nit:braces not necesssary
scherkus (not reviewing)
2011/09/11 14:49:22
Done.
|
+ avcodec_close(codec_context_); |
+ } |
} |
-void FFmpegAudioDecoder::DoInitialize(DemuxerStream* demuxer_stream, |
- bool* success, |
- Task* done_cb) { |
- AutoTaskRunner done_runner(done_cb); |
- *success = false; |
+void FFmpegAudioDecoder::Play(FilterCallback* callback) { |
acolwell GONE FROM CHROMIUM
2011/05/13 19:35:37
remove
scherkus (not reviewing)
2011/09/11 14:49:22
Done.
|
+ // XXX: anything else? |
+ callback->Run(); |
+ delete callback; |
+} |
- AVStream* av_stream = demuxer_stream->GetAVStream(); |
- if (!av_stream) { |
- return; |
- } |
+void FFmpegAudioDecoder::Pause(FilterCallback* callback) { |
acolwell GONE FROM CHROMIUM
2011/05/13 19:35:37
remove
scherkus (not reviewing)
2011/09/11 14:49:22
Done.
|
+ // XXX: anything else? |
+ callback->Run(); |
+ delete callback; |
+} |
+ |
+void FFmpegAudioDecoder::Flush(FilterCallback* callback) { |
+ // XXX: move to proper thread. |
acolwell GONE FROM CHROMIUM
2011/05/13 19:35:37
Or you could just protect this with a lock?
scherkus (not reviewing)
2011/09/11 14:49:22
Done.
|
+ avcodec_flush_buffers(codec_context_); |
+ estimated_next_timestamp_ = kNoTimestamp; |
+ |
+ callback->Run(); |
+ delete callback; |
+} |
+ |
+void FFmpegAudioDecoder::Stop(FilterCallback* callback) { |
+ // XXX: anything else? |
acolwell GONE FROM CHROMIUM
2011/05/13 19:35:37
Clear consume callback so we won't send any data d
|
+ callback->Run(); |
+ delete callback; |
+} |
+ |
+void FFmpegAudioDecoder::Seek(base::TimeDelta time, FilterCallback* callback) { |
acolwell GONE FROM CHROMIUM
2011/05/13 19:35:37
remove
scherkus (not reviewing)
2011/09/11 14:49:22
Done.
|
+ // XXX: anything else? |
+ callback->Run(); |
+ delete callback; |
+} |
+ |
+void FFmpegAudioDecoder::Initialize( |
+ DemuxerStream* stream, |
+ FilterCallback* callback, |
+ StatisticsCallback* stats_callback) { |
+ // XXX: do we still care about initializing on our own message_loop_? |
acolwell GONE FROM CHROMIUM
2011/05/13 19:35:37
I'm fine with keeping a message_loop for audio dec
|
+ // XXX: do we want to try and use MessageLoopProxy at this time? |
acolwell GONE FROM CHROMIUM
2011/05/13 19:35:37
separate CL IMO
|
+ // TODO(scherkus): change Initialize() signature to pass |stream| as a |
+ // scoped_refptr<>. |
+ scoped_refptr<DemuxerStream> ref_stream(stream); |
+ message_loop_->PostTask( |
+ FROM_HERE, |
+ NewRunnableMethod(this, &FFmpegAudioDecoder::DoInitialize, |
+ ref_stream, callback, stats_callback)); |
+} |
+ |
+AudioDecoderConfig FFmpegAudioDecoder::config() { |
+ return config_; |
+} |
+ |
+void FFmpegAudioDecoder::ProduceAudioSamples(scoped_refptr<Buffer> buffer) { |
+ message_loop_->PostTask( |
acolwell GONE FROM CHROMIUM
2011/05/13 19:35:37
What about just putting this in an "if(MessageLoo
scherkus (not reviewing)
2011/09/11 14:49:22
I prefer the extra methods for clarity and saner d
|
+ FROM_HERE, |
+ NewRunnableMethod(this, &FFmpegAudioDecoder::DoQueueOutputBuffer, |
+ buffer)); |
+} |
- // Grab the AVStream's codec context and make sure we have sensible values. |
- codec_context_ = av_stream->codec; |
- int bps = av_get_bits_per_sample_fmt(codec_context_->sample_fmt); |
- if (codec_context_->channels <= 0 || |
- codec_context_->channels > Limits::kMaxChannels || |
- bps <= 0 || bps > Limits::kMaxBitsPerSample || |
- codec_context_->sample_rate <= 0 || |
- codec_context_->sample_rate > Limits::kMaxSampleRate) { |
- DLOG(WARNING) << "Invalid audio stream -" |
- << " channels: " << codec_context_->channels |
- << " bps: " << bps |
- << " sample rate: " << codec_context_->sample_rate; |
+void FFmpegAudioDecoder::DoInitialize( |
+ DemuxerStream* stream, |
acolwell GONE FROM CHROMIUM
2011/05/13 19:35:37
consider moving this to Initialize() and using an
|
+ FilterCallback* callback, |
+ StatisticsCallback* stats_callback) { |
+ scoped_ptr<FilterCallback> c(callback); |
+ |
+ demuxer_stream_ = stream; |
+ AVStream* av_stream = demuxer_stream_->GetAVStream(); |
+ CHECK(av_stream); |
+ |
+ stats_callback_.reset(stats_callback); |
+ |
+ // Sanity checks. |
+ AVCodecContext* codec_context = av_stream->codec; |
+ if (!codec_context || |
+ codec_context->channels <= 0 || |
+ codec_context->channels > Limits::kMaxChannels || |
+ codec_context->sample_rate <= 0 || |
+ codec_context->sample_rate > Limits::kMaxSampleRate) { |
+ DLOG(ERROR) << "Invalid audio format (" |
+ << " channels: " << codec_context->channels |
+ << " sample rate: " << codec_context->sample_rate << ")"; |
+ |
+ // XXX: should this be a "format" error? |
+ host()->SetError(PIPELINE_ERROR_DECODE); |
+ callback->Run(); |
return; |
} |
- // Serialize calls to avcodec_open(). |
- AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id); |
- if (!codec || avcodec_open(codec_context_, codec) < 0) { |
+ AVCodec* codec = avcodec_find_decoder(codec_context->codec_id); |
+ if (!codec) { |
+ DLOG(ERROR) << "Audio codec not found (" |
+ << " codec_id: " << codec_context->codec_id << ")"; |
+ |
+ // XXX: should this be a "format" error? |
+ host()->SetError(PIPELINE_ERROR_DECODE); |
+ callback->Run(); |
acolwell GONE FROM CHROMIUM
2011/05/13 19:35:37
Would this be a good time to convert Initialize()
scherkus (not reviewing)
2011/09/11 14:49:22
Follow up CL
|
return; |
} |
@@ -74,185 +155,163 @@ void FFmpegAudioDecoder::DoInitialize(DemuxerStream* demuxer_stream, |
config_.channels_per_sample = codec_context_->channels; |
config_.sample_rate = codec_context_->sample_rate; |
- // Prepare the output buffer. |
- output_buffer_.reset(static_cast<uint8*>(av_malloc(kOutputBufferSize))); |
- if (!output_buffer_.get()) { |
- host()->SetError(PIPELINE_ERROR_OUT_OF_MEMORY); |
+ // Prep our own AVCodecContext struct. |
+ codec_context_ = avcodec_alloc_context(); |
+ codec_context_->channels = codec_context->channels; |
+ codec_context_->sample_rate = codec_context->sample_rate; |
+ codec_context_->sample_fmt = codec_context->sample_fmt; |
+ |
+ if (avcodec_open(codec_context_, codec) < 0) { |
+ DLOG(ERROR) << "Could not initialize audio decoder (" |
+ << " codec_id: " << codec_context->codec_id |
+ << " channels: " << codec_context->channels |
+ << " sample rate: " << codec_context->sample_rate << ")"; |
+ |
+ // XXX: should this be a "format" error? |
+ host()->SetError(PIPELINE_ERROR_DECODE); |
+ callback->Run(); |
return; |
} |
- *success = true; |
-} |
- |
-AudioDecoderConfig FFmpegAudioDecoder::config() { |
- return config_; |
-} |
-void FFmpegAudioDecoder::ProduceAudioSamples(scoped_refptr<Buffer> output) { |
- DecoderBase<AudioDecoder, Buffer>::PostReadTaskHack(output); |
-} |
+ // XXX: the div-by-2 scares me a tiny bit for some reason. |
+ decoded_audio_.reset(new int16_t[AVCODEC_MAX_AUDIO_FRAME_SIZE / 2]); |
acolwell GONE FROM CHROMIUM
2011/05/13 19:35:37
AVCODEC_MAX_AUDIO_FRAME_SIZE in bytes. We're alloc
scherkus (not reviewing)
2011/09/11 14:49:22
Done and we'll do a reinterpret_cast<> in decode c
|
-void FFmpegAudioDecoder::DoSeek(base::TimeDelta time, Task* done_cb) { |
- avcodec_flush_buffers(codec_context_); |
- estimated_next_timestamp_ = kNoTimestamp; |
- done_cb->Run(); |
- delete done_cb; |
+ callback->Run(); |
} |
-// ConvertAudioF32ToS32() converts float audio (F32) to int (S32) in place. |
-// This is a temporary solution. |
-// The purpose of this short term fix is to enable WMApro, which decodes to |
-// float. |
-// The audio driver has been tested by passing the float audio thru. |
-// FFmpeg for ChromeOS only exposes U8, S16 and F32. |
-// To properly expose new audio sample types at the audio driver layer, a enum |
-// should be created to represent all suppported types, including types |
-// for Pepper. FFmpeg should be queried for type and passed along. |
- |
-// TODO(fbarchard): Remove this function. Expose all FFmpeg types to driver. |
-// TODO(fbarchard): If this function is kept, move it to audio_util.cc |
- |
-#if USE_SSE |
-const __m128 kFloatScaler = _mm_set1_ps( 2147483648.0f ); |
-static void FloatToIntSaturate(float* p) { |
- __m128 a = _mm_set1_ps(*p); |
- a = _mm_mul_ss(a, kFloatScaler); |
- *reinterpret_cast<int32*>(p) = _mm_cvtss_si32(a); |
-} |
-#else |
-const float kFloatScaler = 2147483648.0f; |
-const int kMinSample = std::numeric_limits<int32>::min(); |
-const int kMaxSample = std::numeric_limits<int32>::max(); |
-const float kMinSampleFloat = |
- static_cast<float>(std::numeric_limits<int32>::min()); |
-const float kMaxSampleFloat = |
- static_cast<float>(std::numeric_limits<int32>::max()); |
-static void FloatToIntSaturate(float* p) { |
- float f = *p * kFloatScaler + 0.5f; |
- int sample; |
- if (f <= kMinSampleFloat) { |
- sample = kMinSample; |
- } else if (f >= kMaxSampleFloat) { |
- sample = kMaxSample; |
- } else { |
- sample = static_cast<int32>(f); |
- } |
- *reinterpret_cast<int32*>(p) = sample; |
-} |
-#endif |
-static void ConvertAudioF32ToS32(void* buffer, int buffer_size) { |
- for (int i = 0; i < buffer_size / 4; ++i) { |
- FloatToIntSaturate(reinterpret_cast<float*>(buffer) + i); |
- } |
+void FFmpegAudioDecoder::DoQueueOutputBuffer(scoped_refptr<Buffer> output) { |
+ output_buffers_.push_back(output); |
+ ReadFromDemuxerStream(); |
} |
-void FFmpegAudioDecoder::DoDecode(Buffer* input) { |
- PipelineStatistics statistics; |
+void FFmpegAudioDecoder::DoDecodeInputBuffer(scoped_refptr<Buffer> input) { |
+ DCHECK(!output_buffers_.empty()); |
+ DCHECK_GT(pending_reads_, 0); |
+ pending_reads_--; |
// FFmpeg tends to seek Ogg audio streams in the middle of nowhere, giving us |
// a whole bunch of AV_NOPTS_VALUE packets. Discard them until we find |
// something valid. Refer to http://crbug.com/49709 |
- // TODO(hclam): remove this once fixing the issue in FFmpeg. |
if (input->GetTimestamp() == kNoTimestamp && |
estimated_next_timestamp_ == kNoTimestamp && |
!input->IsEndOfStream()) { |
- DecoderBase<AudioDecoder, Buffer>::OnDecodeComplete(statistics); |
+ ReadFromDemuxerStream(); |
return; |
} |
- // Due to FFmpeg API changes we no longer have const read-only pointers. |
AVPacket packet; |
av_init_packet(&packet); |
packet.data = const_cast<uint8*>(input->GetData()); |
packet.size = input->GetDataSize(); |
+ PipelineStatistics statistics; |
statistics.audio_bytes_decoded = input->GetDataSize(); |
- int16_t* output_buffer = reinterpret_cast<int16_t*>(output_buffer_.get()); |
- int output_buffer_size = kOutputBufferSize; |
+ int decoded_audio_size = AVCODEC_MAX_AUDIO_FRAME_SIZE; |
acolwell GONE FROM CHROMIUM
2011/05/13 19:35:37
This should be based on the size allocated. Perhap
scherkus (not reviewing)
2011/09/11 14:49:22
Done.
|
int result = avcodec_decode_audio3(codec_context_, |
- output_buffer, |
- &output_buffer_size, |
+ decoded_audio_.get(), |
+ &decoded_audio_size, |
&packet); |
+ // XXX: this is only for WMAPro audio codec?! remove?! |
acolwell GONE FROM CHROMIUM
2011/05/13 19:35:37
I'm fine with removing this since I don't think we
scherkus (not reviewing)
2011/09/11 14:49:22
Done.
|
+#if 0 |
if (codec_context_->sample_fmt == SAMPLE_FMT_FLT) { |
- ConvertAudioF32ToS32(output_buffer, output_buffer_size); |
+ ConvertAudioF32ToS32(decoded_audio_.get(), decoded_audio_size); |
} |
+#endif |
- // TODO(ajwong): Consider if kOutputBufferSize should just be an int instead |
- // of a size_t. |
- if (result < 0 || |
- output_buffer_size < 0 || |
- static_cast<size_t>(output_buffer_size) > kOutputBufferSize) { |
- VLOG(1) << "Error decoding an audio frame with timestamp: " |
- << input->GetTimestamp().InMicroseconds() << " us, duration: " |
- << input->GetDuration().InMicroseconds() << " us, packet size: " |
- << input->GetDataSize() << " bytes"; |
- DecoderBase<AudioDecoder, Buffer>::OnDecodeComplete(statistics); |
+ if (IsErrorResult(result, decoded_audio_size)) { |
+ DLOG(ERROR) << "Error decoding an audio frame with timestamp: " |
+ << input->GetTimestamp().InMicroseconds() << " us, duration: " |
+ << input->GetDuration().InMicroseconds() << " us, packet size: " |
+ << input->GetDataSize() << " bytes"; |
+ ReadFromDemuxerStream(); |
return; |
} |
- // If we have decoded something, enqueue the result. |
- if (output_buffer_size) { |
- DataBuffer* result_buffer = new DataBuffer(output_buffer_size); |
- result_buffer->SetDataSize(output_buffer_size); |
- uint8* data = result_buffer->GetWritableData(); |
- memcpy(data, output_buffer, output_buffer_size); |
- |
- // We don't trust the demuxer, so always calculate the duration based on |
- // the actual number of samples decoded. |
- base::TimeDelta duration = CalculateDuration(output_buffer_size); |
- result_buffer->SetDuration(duration); |
- |
- // Use an estimated timestamp unless the incoming buffer has a valid one. |
- if (input->GetTimestamp() == kNoTimestamp) { |
- result_buffer->SetTimestamp(estimated_next_timestamp_); |
- |
- // Keep the estimated timestamp invalid until we get an incoming buffer |
- // with a valid timestamp. This can happen during seeks, etc... |
- if (estimated_next_timestamp_ != kNoTimestamp) { |
- estimated_next_timestamp_ += duration; |
- } |
- } else { |
- result_buffer->SetTimestamp(input->GetTimestamp()); |
- estimated_next_timestamp_ = input->GetTimestamp() + duration; |
- } |
- |
- EnqueueResult(result_buffer); |
- DecoderBase<AudioDecoder, Buffer>::OnDecodeComplete(statistics); |
- return; |
- } |
+ scoped_refptr<DataBuffer> output; |
- // We can get a positive result but no decoded data. This is ok because this |
- // this can be a marker packet that only contains timestamp. In this case we |
- // save the timestamp for later use. |
- if (result && !input->IsEndOfStream() && |
- input->GetTimestamp() != kNoTimestamp && |
- input->GetDuration() != kNoTimestamp) { |
+ if (ProducedAudioSamples(decoded_audio_size)) { |
+ // Copy the audio samples into an output buffer. |
+ output = new DataBuffer(decoded_audio_size); |
+ output->SetDataSize(decoded_audio_size); |
+ uint8* data = output->GetWritableData(); |
+ memcpy(data, decoded_audio_.get(), decoded_audio_size); |
+ } else if (IsTimestampMarkerPacket(result, input)) { |
+ // Nothing else to do here but update our estimation. |
estimated_next_timestamp_ = input->GetTimestamp() + input->GetDuration(); |
- DecoderBase<AudioDecoder, Buffer>::OnDecodeComplete(statistics); |
+ } else if (IsEndOfStream(result, decoded_audio_size, input)) { |
+ // Create an end of stream output buffer. |
+ output = new DataBuffer(0); |
+ output->SetTimestamp(input->GetTimestamp()); |
+ output->SetDuration(input->GetDuration()); |
+ } |
+ |
+ DecodeFinished(output, statistics); |
+} |
+ |
+void FFmpegAudioDecoder::DecodeFinished( |
+ scoped_refptr<Buffer> output, |
+ const PipelineStatistics& statistics) { |
+ stats_callback_->Run(statistics); |
+ |
+ if (output) { |
+ DCHECK_GT(output_buffers_.size(), 0u); |
+ output_buffers_.pop_front(); |
+ |
+ ConsumeAudioSamples(output); |
+ } |
+} |
+ |
+void FFmpegAudioDecoder::ReadFromDemuxerStream() { |
+ DCHECK(!output_buffers_.empty()) |
+ << "Reads should only occur if there are output buffers."; |
+ |
+ pending_reads_++; |
acolwell GONE FROM CHROMIUM
2011/05/13 19:35:37
Add stopped check to make sure we don't call the d
|
+ demuxer_stream_->Read( |
+ NewCallback(this, &FFmpegAudioDecoder::OnReadComplete)); |
+} |
+ |
+void FFmpegAudioDecoder::OnReadComplete(Buffer* buffer) { |
+ // TODO(scherkus): change DemuxerStream::Read() to use scoped_refptr<> for |
+ // callback. |
+ scoped_refptr<Buffer> ref_buffer(buffer); |
+ message_loop_->PostTask( |
+ FROM_HERE, |
+ NewRunnableMethod(this, &FFmpegAudioDecoder::DoDecodeInputBuffer, |
acolwell GONE FROM CHROMIUM
2011/05/13 19:35:37
Put this in an message loop guard and copy DoDecod
|
+ ref_buffer)); |
+} |
+ |
+void FFmpegAudioDecoder::UpdateDurationAndTimestamp( |
+ Buffer* input, |
+ DataBuffer* output) { |
+ // Always calculate duration based on the actual number of samples decoded. |
+ base::TimeDelta duration = CalculateDuration(output->GetDataSize()); |
+ output->SetDuration(duration); |
+ |
+ // Use the incoming timestamp if it's valid. |
+ if (input->GetTimestamp() != kNoTimestamp) { |
+ output->SetTimestamp(input->GetTimestamp()); |
+ estimated_next_timestamp_ = input->GetTimestamp() + duration; |
return; |
} |
- // Three conditions to meet to declare end of stream for this decoder: |
- // 1. FFmpeg didn't read anything. |
- // 2. FFmpeg didn't output anything. |
- // 3. An end of stream buffer is received. |
- if (result == 0 && output_buffer_size == 0 && input->IsEndOfStream()) { |
- DataBuffer* result_buffer = new DataBuffer(0); |
- result_buffer->SetTimestamp(input->GetTimestamp()); |
- result_buffer->SetDuration(input->GetDuration()); |
- EnqueueResult(result_buffer); |
+ // Otherwise use an estimated timestamp and attempt to update the estimation |
+ // as long as it's valid. |
+ output->SetTimestamp(estimated_next_timestamp_); |
+ if (estimated_next_timestamp_ != kNoTimestamp) { |
+ estimated_next_timestamp_ += duration; |
} |
- DecoderBase<AudioDecoder, Buffer>::OnDecodeComplete(statistics); |
} |
-base::TimeDelta FFmpegAudioDecoder::CalculateDuration(size_t size) { |
- int64 denominator = codec_context_->channels * |
- av_get_bits_per_sample_fmt(codec_context_->sample_fmt) / 8 * |
- codec_context_->sample_rate; |
+base::TimeDelta FFmpegAudioDecoder::CalculateDuration(int size) { |
+ // XXX: Could we make this function part of AudioDecoderConfig? Something like |
+ // "I want a duration for this config based on # of bytes"? |
+ int64 denominator = config_.channels_per_sample * |
+ config_.bits_per_channel / 8 * config_.sample_rate; |
double microseconds = size / |
(denominator / static_cast<double>(base::Time::kMicrosecondsPerSecond)); |
return base::TimeDelta::FromMicroseconds(static_cast<int64>(microseconds)); |
} |
-} // namespace |
+} // namespace media |