Index: content/renderer/media/audio_renderer_impl.cc |
diff --git a/content/renderer/media/audio_renderer_impl.cc b/content/renderer/media/audio_renderer_impl.cc |
index 3e617f549dcd2f12f8e3e465ad0a8e1ef5d2941b..768142ad5ee726b8b777cb577252724eebc588ea 100644 |
--- a/content/renderer/media/audio_renderer_impl.cc |
+++ b/content/renderer/media/audio_renderer_impl.cc |
@@ -9,61 +9,47 @@ |
#include <algorithm> |
#include "base/bind.h" |
-#include "base/command_line.h" |
#include "content/common/child_process.h" |
#include "content/common/media/audio_messages.h" |
-#include "content/public/common/content_switches.h" |
#include "content/renderer/render_thread_impl.h" |
#include "media/audio/audio_buffers_state.h" |
-#include "media/audio/audio_output_controller.h" |
#include "media/audio/audio_util.h" |
-#include "media/base/filter_host.h" |
-// Static variable that says what code path we are using -- low or high |
-// latency. Made separate variable so we don't have to go to command line |
-// for every DCHECK(). |
-AudioRendererImpl::LatencyType AudioRendererImpl::latency_type_ = |
- AudioRendererImpl::kUninitializedLatency; |
+// We define GetBufferSizeForSampleRate() instead of using |
+// GetAudioHardwareBufferSize() in audio_util because we're using |
+// the AUDIO_PCM_LINEAR flag, instead of AUDIO_PCM_LOW_LATENCY, |
+// which the audio_util functions assume. |
+// |
+// See: http://code.google.com/p/chromium/issues/detail?id=103627 |
+// for a more detailed description of the subtleties. |
+static size_t GetBufferSizeForSampleRate(int sample_rate) { |
+ // kNominalBufferSize has been tested on Windows, Mac OS X, and Linux |
+ // using the low-latency audio codepath (SyncSocket implementation) |
+ // with the AUDIO_PCM_LINEAR flag. |
+ const size_t kNominalBufferSize = 2048; |
+ |
+ if (sample_rate <= 48000) |
+ return kNominalBufferSize; |
+ else if (sample_rate <= 96000) |
+ return kNominalBufferSize * 2; |
+ return kNominalBufferSize * 4; |
+} |
AudioRendererImpl::AudioRendererImpl() |
: AudioRendererBase(), |
bytes_per_second_(0), |
- stream_created_(false), |
- stream_id_(0), |
- shared_memory_(NULL), |
- shared_memory_size_(0), |
- stopped_(false), |
- pending_request_(false) { |
- filter_ = RenderThreadImpl::current()->audio_message_filter(); |
- // Figure out if we are planning to use high or low latency code path. |
- // We are initializing only one variable and double initialization is Ok, |
- // so there would not be any issues caused by CPU memory model. |
- if (latency_type_ == kUninitializedLatency) { |
- // Urgent workaround for |
- // http://code.google.com/p/chromium-os/issues/detail?id=21491 |
- // TODO(enal): Fix it properly. |
-#if defined(OS_CHROMEOS) |
- latency_type_ = kHighLatency; |
-#else |
- if (!CommandLine::ForCurrentProcess()->HasSwitch( |
- switches::kHighLatencyAudio)) { |
- latency_type_ = kLowLatency; |
- } else { |
- latency_type_ = kHighLatency; |
- } |
-#endif |
- } |
+ stopped_(false) { |
+ // We create the AudioDevice here because it must be created in the |
+ // main thread. But we don't yet know the audio format (sample-rate, etc.) |
+ // at this point. Later, when OnInitialize() is called, we have |
+ // the audio format information and call the AudioDevice::Initialize() |
+ // method to fully initialize it. |
+ audio_device_ = new AudioDevice(); |
} |
AudioRendererImpl::~AudioRendererImpl() { |
} |
-// static |
-void AudioRendererImpl::set_latency_type(LatencyType latency_type) { |
- DCHECK_EQ(kUninitializedLatency, latency_type_); |
- latency_type_ = latency_type; |
-} |
- |
base::TimeDelta AudioRendererImpl::ConvertToDuration(int bytes) { |
if (bytes_per_second_) { |
return base::TimeDelta::FromMicroseconds( |
@@ -92,69 +78,47 @@ void AudioRendererImpl::UpdateEarliestEndTime(int bytes_filled, |
bool AudioRendererImpl::OnInitialize(int bits_per_channel, |
ChannelLayout channel_layout, |
int sample_rate) { |
- AudioParameters params(AudioParameters::AUDIO_PCM_LINEAR, channel_layout, |
- sample_rate, bits_per_channel, 0); |
- |
- bytes_per_second_ = params.GetBytesPerSecond(); |
+ // We use the AUDIO_PCM_LINEAR flag because AUDIO_PCM_LOW_LATENCY |
+ // does not currently support all the sample-rates that we require. |
+ // Please see: http://code.google.com/p/chromium/issues/detail?id=103627 |
+ // for more details. |
+ audio_parameters_ = AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, |
+ channel_layout, |
+ sample_rate, |
+ bits_per_channel, |
+ 0); |
- ChildProcess::current()->io_message_loop()->PostTask( |
- FROM_HERE, |
- base::Bind(&AudioRendererImpl::CreateStreamTask, this, params)); |
- return true; |
-} |
+ bytes_per_second_ = audio_parameters_.GetBytesPerSecond(); |
-void AudioRendererImpl::OnStop() { |
- // Since joining with the audio thread can acquire lock_, we make sure to |
- // Join() with it not under lock. |
- base::DelegateSimpleThread* audio_thread = NULL; |
- { |
- base::AutoLock auto_lock(lock_); |
- if (stopped_) |
- return; |
- stopped_ = true; |
- |
- DCHECK_EQ(!audio_thread_.get(), !socket_.get()); |
- if (socket_.get()) |
- socket_->Close(); |
- if (audio_thread_.get()) |
- audio_thread = audio_thread_.get(); |
- |
- ChildProcess::current()->io_message_loop()->PostTask( |
- FROM_HERE, |
- base::Bind(&AudioRendererImpl::DestroyTask, this)); |
- } |
+ DCHECK(audio_device_.get()); |
- if (audio_thread) |
- audio_thread->Join(); |
-} |
+ if (!audio_device_->IsInitialized()) { |
+ audio_device_->Initialize( |
+ GetBufferSizeForSampleRate(sample_rate), |
+ audio_parameters_.channels, |
+ audio_parameters_.sample_rate, |
+ audio_parameters_.format, |
+ this); |
-void AudioRendererImpl::NotifyDataAvailableIfNecessary() { |
- if (latency_type_ == kHighLatency) { |
- // Post a task to render thread to notify a packet reception. |
- ChildProcess::current()->io_message_loop()->PostTask( |
- FROM_HERE, |
- base::Bind(&AudioRendererImpl::NotifyPacketReadyTask, this)); |
+ audio_device_->Start(); |
} |
+ |
+ return true; |
} |
-void AudioRendererImpl::ConsumeAudioSamples( |
- scoped_refptr<media::Buffer> buffer_in) { |
- base::AutoLock auto_lock(lock_); |
+void AudioRendererImpl::OnStop() { |
if (stopped_) |
return; |
- // TODO(hclam): handle end of stream here. |
+ DCHECK(audio_device_.get()); |
+ audio_device_->Stop(); |
- // Use the base class to queue the buffer. |
- AudioRendererBase::ConsumeAudioSamples(buffer_in); |
- |
- NotifyDataAvailableIfNecessary(); |
+ stopped_ = true; |
} |
void AudioRendererImpl::SetPlaybackRate(float rate) { |
DCHECK_LE(0.0f, rate); |
- base::AutoLock auto_lock(lock_); |
// Handle the case where we stopped due to IO message loop dying. |
if (stopped_) { |
AudioRendererBase::SetPlaybackRate(rate); |
@@ -165,363 +129,121 @@ void AudioRendererImpl::SetPlaybackRate(float rate) { |
// Play: GetPlaybackRate() == 0.0 && rate != 0.0 |
// Pause: GetPlaybackRate() != 0.0 && rate == 0.0 |
if (GetPlaybackRate() == 0.0f && rate != 0.0f) { |
- ChildProcess::current()->io_message_loop()->PostTask( |
- FROM_HERE, |
- base::Bind(&AudioRendererImpl::PlayTask, this)); |
+ DoPlay(); |
} else if (GetPlaybackRate() != 0.0f && rate == 0.0f) { |
// Pause is easy, we can always pause. |
- ChildProcess::current()->io_message_loop()->PostTask( |
- FROM_HERE, |
- base::Bind(&AudioRendererImpl::PauseTask, this)); |
+ DoPause(); |
} |
AudioRendererBase::SetPlaybackRate(rate); |
- |
- // If we are playing, give a kick to try fulfilling the packet request as |
- // the previous packet request may be stalled by a pause. |
- if (rate > 0.0f) { |
- NotifyDataAvailableIfNecessary(); |
- } |
} |
void AudioRendererImpl::Pause(const base::Closure& callback) { |
AudioRendererBase::Pause(callback); |
- base::AutoLock auto_lock(lock_); |
if (stopped_) |
return; |
- ChildProcess::current()->io_message_loop()->PostTask( |
- FROM_HERE, |
- base::Bind(&AudioRendererImpl::PauseTask, this)); |
+ DoPause(); |
} |
void AudioRendererImpl::Seek(base::TimeDelta time, |
const media::FilterStatusCB& cb) { |
AudioRendererBase::Seek(time, cb); |
- base::AutoLock auto_lock(lock_); |
if (stopped_) |
return; |
- ChildProcess::current()->io_message_loop()->PostTask( |
- FROM_HERE, |
- base::Bind(&AudioRendererImpl::SeekTask, this)); |
+ DoSeek(); |
} |
- |
void AudioRendererImpl::Play(const base::Closure& callback) { |
AudioRendererBase::Play(callback); |
- base::AutoLock auto_lock(lock_); |
if (stopped_) |
return; |
if (GetPlaybackRate() != 0.0f) { |
- ChildProcess::current()->io_message_loop()->PostTask( |
- FROM_HERE, |
- base::Bind(&AudioRendererImpl::PlayTask, this)); |
+ DoPlay(); |
} else { |
- ChildProcess::current()->io_message_loop()->PostTask( |
- FROM_HERE, |
- base::Bind(&AudioRendererImpl::PauseTask, this)); |
+ DoPause(); |
} |
} |
void AudioRendererImpl::SetVolume(float volume) { |
- base::AutoLock auto_lock(lock_); |
if (stopped_) |
return; |
- ChildProcess::current()->io_message_loop()->PostTask( |
- FROM_HERE, |
- base::Bind(&AudioRendererImpl::SetVolumeTask, this, volume)); |
+ DCHECK(audio_device_.get()); |
+ audio_device_->SetVolume(volume); |
} |
-void AudioRendererImpl::OnCreated(base::SharedMemoryHandle handle, |
- uint32 length) { |
- DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop()); |
- DCHECK_EQ(kHighLatency, latency_type_); |
- |
- base::AutoLock auto_lock(lock_); |
- if (stopped_) |
- return; |
- |
- shared_memory_.reset(new base::SharedMemory(handle, false)); |
- shared_memory_->Map(length); |
- shared_memory_size_ = length; |
-} |
- |
-void AudioRendererImpl::CreateSocket(base::SyncSocket::Handle socket_handle) { |
- DCHECK_EQ(kLowLatency, latency_type_); |
-#if defined(OS_WIN) |
- DCHECK(socket_handle); |
-#else |
- DCHECK_GE(socket_handle, 0); |
-#endif |
- socket_.reset(new base::SyncSocket(socket_handle)); |
-} |
- |
-void AudioRendererImpl::CreateAudioThread() { |
- DCHECK_EQ(kLowLatency, latency_type_); |
- audio_thread_.reset( |
- new base::DelegateSimpleThread(this, "renderer_audio_thread")); |
- audio_thread_->Start(); |
-} |
- |
-void AudioRendererImpl::OnLowLatencyCreated( |
- base::SharedMemoryHandle handle, |
- base::SyncSocket::Handle socket_handle, |
- uint32 length) { |
- DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop()); |
- DCHECK_EQ(kLowLatency, latency_type_); |
-#if defined(OS_WIN) |
- DCHECK(handle); |
-#else |
- DCHECK_GE(handle.fd, 0); |
-#endif |
- DCHECK_NE(0u, length); |
- |
- base::AutoLock auto_lock(lock_); |
- if (stopped_) |
- return; |
- |
- shared_memory_.reset(new base::SharedMemory(handle, false)); |
- shared_memory_->Map(media::TotalSharedMemorySizeInBytes(length)); |
- shared_memory_size_ = length; |
- |
- CreateSocket(socket_handle); |
- CreateAudioThread(); |
-} |
- |
-void AudioRendererImpl::OnRequestPacket(AudioBuffersState buffers_state) { |
- DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop()); |
- DCHECK_EQ(kHighLatency, latency_type_); |
- { |
- base::AutoLock auto_lock(lock_); |
- DCHECK(!pending_request_); |
- pending_request_ = true; |
- request_buffers_state_ = buffers_state; |
- } |
- |
- // Try to fill in the fulfill the packet request. |
- NotifyPacketReadyTask(); |
-} |
- |
-void AudioRendererImpl::OnStateChanged(AudioStreamState state) { |
- DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop()); |
- |
- base::AutoLock auto_lock(lock_); |
- if (stopped_) |
- return; |
- |
- switch (state) { |
- case kAudioStreamError: |
- // We receive this error if we counter an hardware error on the browser |
- // side. We can proceed with ignoring the audio stream. |
- // TODO(hclam): We need more handling of these kind of error. For example |
- // re-try creating the audio output stream on the browser side or fail |
- // nicely and report to demuxer that the whole audio stream is discarded. |
- host()->DisableAudioRenderer(); |
- break; |
- // TODO(hclam): handle these events. |
- case kAudioStreamPlaying: |
- case kAudioStreamPaused: |
- break; |
- default: |
- NOTREACHED(); |
- break; |
- } |
-} |
- |
-void AudioRendererImpl::OnVolume(double volume) { |
- // TODO(hclam): decide whether we need to report the current volume to |
- // pipeline. |
-} |
- |
-void AudioRendererImpl::CreateStreamTask(const AudioParameters& audio_params) { |
- DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop()); |
- |
- base::AutoLock auto_lock(lock_); |
- if (stopped_) |
- return; |
- |
- stream_created_ = true; |
- |
- // Make sure we don't call create more than once. |
- DCHECK_EQ(0, stream_id_); |
- stream_id_ = filter_->AddDelegate(this); |
- ChildProcess::current()->io_message_loop()->AddDestructionObserver(this); |
- |
- AudioParameters params_to_send(audio_params); |
- // Let the browser choose packet size. |
- params_to_send.samples_per_packet = 0; |
- |
- Send(new AudioHostMsg_CreateStream(stream_id_, |
- params_to_send, |
- latency_type_ == kLowLatency)); |
-} |
- |
-void AudioRendererImpl::PlayTask() { |
- DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop()); |
- |
+void AudioRendererImpl::DoPlay() { |
earliest_end_time_ = base::Time::Now(); |
- Send(new AudioHostMsg_PlayStream(stream_id_)); |
+ DCHECK(audio_device_.get()); |
+ audio_device_->Play(); |
} |
-void AudioRendererImpl::PauseTask() { |
- DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop()); |
- |
- Send(new AudioHostMsg_PauseStream(stream_id_)); |
+void AudioRendererImpl::DoPause() { |
+ DCHECK(audio_device_.get()); |
+ audio_device_->Pause(false); |
} |
-void AudioRendererImpl::SeekTask() { |
- DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop()); |
- |
+void AudioRendererImpl::DoSeek() { |
earliest_end_time_ = base::Time::Now(); |
- // We have to pause the audio stream before we can flush. |
- Send(new AudioHostMsg_PauseStream(stream_id_)); |
- Send(new AudioHostMsg_FlushStream(stream_id_)); |
-} |
- |
-void AudioRendererImpl::DestroyTask() { |
- DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop()); |
- |
- base::AutoLock auto_lock(lock_); |
- // Errors can cause us to get here before CreateStreamTask ever ran, in which |
- // case there's nothing to do. |
- if (!stream_created_) |
- return; |
- |
- // Make sure we don't call destroy more than once. |
- DCHECK_NE(0, stream_id_); |
- filter_->RemoveDelegate(stream_id_); |
- Send(new AudioHostMsg_CloseStream(stream_id_)); |
- // During shutdown this may be NULL; don't worry about deregistering in that |
- // case. |
- if (ChildProcess::current()) |
- ChildProcess::current()->io_message_loop()->RemoveDestructionObserver(this); |
- stream_id_ = 0; |
-} |
- |
-void AudioRendererImpl::SetVolumeTask(double volume) { |
- DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop()); |
- base::AutoLock auto_lock(lock_); |
- if (stopped_) |
- return; |
- Send(new AudioHostMsg_SetVolume(stream_id_, volume)); |
+ // Pause and flush the stream when we seek to a new location. |
+ DCHECK(audio_device_.get()); |
+ audio_device_->Pause(true); |
} |
-void AudioRendererImpl::NotifyPacketReadyTask() { |
- DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop()); |
- DCHECK_EQ(kHighLatency, latency_type_); |
- |
- base::AutoLock auto_lock(lock_); |
- if (stopped_) |
+void AudioRendererImpl::Render(const std::vector<float*>& audio_data, |
+ size_t number_of_frames, |
+ size_t audio_delay_milliseconds) { |
+ if (stopped_ || GetPlaybackRate() == 0.0f) { |
+ // Output silence if stopped. |
+ for (size_t i = 0; i < audio_data.size(); ++i) |
+ memset(audio_data[i], 0, sizeof(float) * number_of_frames); |
return; |
- if (pending_request_ && GetPlaybackRate() > 0.0f) { |
- DCHECK(shared_memory_.get()); |
- |
- // Adjust the playback delay. |
- base::Time current_time = base::Time::Now(); |
- |
- base::TimeDelta request_delay = |
- ConvertToDuration(request_buffers_state_.total_bytes()); |
- |
- // Add message delivery delay. |
- if (current_time > request_buffers_state_.timestamp) { |
- base::TimeDelta receive_latency = |
- current_time - request_buffers_state_.timestamp; |
- |
- // If the receive latency is too much it may offset all the delay. |
- if (receive_latency >= request_delay) { |
- request_delay = base::TimeDelta(); |
- } else { |
- request_delay -= receive_latency; |
- } |
- } |
- |
- // Finally we need to adjust the delay according to playback rate. |
- if (GetPlaybackRate() != 1.0f) { |
- request_delay = base::TimeDelta::FromMicroseconds( |
- static_cast<int64>(ceil(request_delay.InMicroseconds() * |
- GetPlaybackRate()))); |
- } |
- |
- bool buffer_empty = (request_buffers_state_.pending_bytes == 0) && |
- (current_time >= earliest_end_time_); |
- |
- // For high latency mode we don't write length into shared memory, |
- // it is explicit part of AudioHostMsg_NotifyPacketReady() message, |
- // so no need to reserve first word of buffer for length. |
- uint32 filled = FillBuffer(static_cast<uint8*>(shared_memory_->memory()), |
- shared_memory_size_, request_delay, |
- buffer_empty); |
- UpdateEarliestEndTime(filled, request_delay, current_time); |
- pending_request_ = false; |
- |
- // Then tell browser process we are done filling into the buffer. |
- Send(new AudioHostMsg_NotifyPacketReady(stream_id_, filled)); |
} |
-} |
-void AudioRendererImpl::WillDestroyCurrentMessageLoop() { |
- DCHECK(!ChildProcess::current() || // During shutdown. |
- (MessageLoop::current() == |
- ChildProcess::current()->io_message_loop())); |
+ // Adjust the playback delay. |
+ base::Time current_time = base::Time::Now(); |
- // We treat the IO loop going away the same as stopping. |
- { |
- base::AutoLock auto_lock(lock_); |
- if (stopped_) |
- return; |
+ base::TimeDelta request_delay = |
+ base::TimeDelta::FromMilliseconds(audio_delay_milliseconds); |
- stopped_ = true; |
+ // Finally we need to adjust the delay according to playback rate. |
+ if (GetPlaybackRate() != 1.0f) { |
+ request_delay = base::TimeDelta::FromMicroseconds( |
+ static_cast<int64>(ceil(request_delay.InMicroseconds() * |
+ GetPlaybackRate()))); |
} |
- DestroyTask(); |
-} |
-// Our audio thread runs here. We receive requests for more data and send it |
-// on this thread. |
-void AudioRendererImpl::Run() { |
- DCHECK_EQ(kLowLatency, latency_type_); |
- audio_thread_->SetThreadPriority(base::kThreadPriority_RealtimeAudio); |
- |
- int bytes; |
- while (sizeof(bytes) == socket_->Receive(&bytes, sizeof(bytes))) { |
- if (bytes == media::AudioOutputController::kPauseMark) { |
- // When restarting playback, host should get new data, |
- // not what is currently in the buffer. |
- media::SetActualDataSizeInBytes(shared_memory_.get(), |
- shared_memory_size_, |
- 0); |
- continue; |
- } |
- else if (bytes < 0) |
- break; |
- base::AutoLock auto_lock(lock_); |
- if (stopped_) |
- break; |
- float playback_rate = GetPlaybackRate(); |
- if (playback_rate <= 0.0f) |
- continue; |
- DCHECK(shared_memory_.get()); |
- base::TimeDelta request_delay = ConvertToDuration(bytes); |
+ uint32 bytes_per_frame = |
+ audio_parameters_.bits_per_sample * audio_parameters_.channels / 8; |
- // We need to adjust the delay according to playback rate. |
- if (playback_rate != 1.0f) { |
- request_delay = base::TimeDelta::FromMicroseconds( |
- static_cast<int64>(ceil(request_delay.InMicroseconds() * |
- playback_rate))); |
- } |
- base::Time time_now = base::Time::Now(); |
- uint32 size = FillBuffer(static_cast<uint8*>(shared_memory_->memory()), |
- shared_memory_size_, |
+ const size_t buf_size = number_of_frames * bytes_per_frame; |
+ scoped_array<uint8> buf(new uint8[buf_size]); |
+ |
+ base::Time time_now = base::Time::Now(); |
+ uint32 filled = FillBuffer(buf.get(), |
+ buf_size, |
request_delay, |
time_now >= earliest_end_time_); |
- media::SetActualDataSizeInBytes(shared_memory_.get(), |
- shared_memory_size_, |
- size); |
- UpdateEarliestEndTime(size, request_delay, time_now); |
+ DCHECK_LE(filled, buf_size); |
+ |
+ uint32 filled_frames = filled / bytes_per_frame; |
+ |
+ // Deinterleave each audio channel. |
+ int channels = audio_data.size(); |
+ for (int channel_index = 0; channel_index < channels; ++channel_index) { |
+ media::DeinterleaveAudioChannel(buf.get(), |
+ audio_data[channel_index], |
+ channels, |
+ channel_index, |
+ bytes_per_frame / channels, |
+ filled_frames); |
+ |
+ // If FillBuffer() didn't give us enough data then zero out the remainder. |
+ if (filled_frames < number_of_frames) { |
+ int frames_to_zero = number_of_frames - filled_frames; |
+ memset(audio_data[channel_index], 0, sizeof(float) * frames_to_zero); |
+ } |
} |
} |
- |
-void AudioRendererImpl::Send(IPC::Message* message) { |
- filter_->Send(message); |
-} |