Index: content/renderer/media/webrtc_local_audio_track.cc |
diff --git a/content/renderer/media/webrtc_local_audio_track.cc b/content/renderer/media/webrtc_local_audio_track.cc |
index 137e4a33a830ec120d64988ce19aced262b969c6..3465ae0f2208f84394d5a28fff8d8e98e0c4336a 100644 |
--- a/content/renderer/media/webrtc_local_audio_track.cc |
+++ b/content/renderer/media/webrtc_local_audio_track.cc |
@@ -7,117 +7,26 @@ |
#include "content/renderer/media/webaudio_capturer_source.h" |
#include "content/renderer/media/webrtc_audio_capturer.h" |
#include "content/renderer/media/webrtc_audio_capturer_sink_owner.h" |
+#include "content/renderer/media/webrtc_audio_processor.h" |
#include "content/renderer/media/webrtc_local_audio_source_provider.h" |
#include "media/base/audio_fifo.h" |
#include "third_party/libjingle/source/talk/media/base/audiorenderer.h" |
namespace content { |
-static const size_t kMaxNumberOfBuffersInFifo = 2; |
static const char kAudioTrackKind[] = "audio"; |
-namespace { |
- |
-using webrtc::MediaConstraintsInterface; |
- |
-// This helper function checks if any audio constraints are set that require |
-// audio processing to be applied. Right now this is a big, single switch for |
-// all of the properties, but in the future they'll be handled one by one. |
-bool NeedsAudioProcessing( |
- const webrtc::MediaConstraintsInterface* constraints) { |
- if (!constraints) |
- return false; |
- |
- static const char* kAudioProcessingProperties[] = { |
- MediaConstraintsInterface::kEchoCancellation, |
- MediaConstraintsInterface::kExperimentalEchoCancellation, |
- MediaConstraintsInterface::kAutoGainControl, |
- MediaConstraintsInterface::kExperimentalAutoGainControl, |
- MediaConstraintsInterface::kNoiseSuppression, |
- MediaConstraintsInterface::kHighpassFilter, |
- MediaConstraintsInterface::kTypingNoiseDetection, |
- }; |
- |
- for (size_t i = 0; i < arraysize(kAudioProcessingProperties); ++i) { |
- bool value = false; |
- if (webrtc::FindConstraint(constraints, kAudioProcessingProperties[i], |
- &value, NULL) && |
- value) { |
- return true; |
- } |
- } |
- |
- return false; |
-} |
- |
-} // namespace. |
- |
-// This is a temporary audio buffer with parameters used to send data to |
-// callbacks. |
-class WebRtcLocalAudioTrack::ConfiguredBuffer : |
- public base::RefCounted<WebRtcLocalAudioTrack::ConfiguredBuffer> { |
- public: |
- ConfiguredBuffer() : sink_buffer_size_(0) {} |
- |
- void Initialize(const media::AudioParameters& params) { |
- DCHECK(params.IsValid()); |
- params_ = params; |
- |
- // Use 10ms as the sink buffer size since that is the native packet size |
- // WebRtc is running on. |
- sink_buffer_size_ = params.sample_rate() / 100; |
- audio_wrapper_ = |
- media::AudioBus::Create(params.channels(), sink_buffer_size_); |
- buffer_.reset(new int16[sink_buffer_size_ * params.channels()]); |
- |
- // The size of the FIFO should be at least twice of the source buffer size |
- // or twice of the sink buffer size. |
- int buffer_size = std::max( |
- kMaxNumberOfBuffersInFifo * params.frames_per_buffer(), |
- kMaxNumberOfBuffersInFifo * sink_buffer_size_); |
- fifo_.reset(new media::AudioFifo(params.channels(), buffer_size)); |
- } |
- |
- void Push(media::AudioBus* audio_source) { |
- DCHECK(fifo_->frames() + audio_source->frames() <= fifo_->max_frames()); |
- fifo_->Push(audio_source); |
- } |
- |
- bool Consume() { |
- if (fifo_->frames() < audio_wrapper_->frames()) |
- return false; |
- |
- fifo_->Consume(audio_wrapper_.get(), 0, audio_wrapper_->frames()); |
- audio_wrapper_->ToInterleaved(audio_wrapper_->frames(), |
- params_.bits_per_sample() / 8, |
- buffer()); |
- return true; |
- } |
- |
- int16* buffer() const { return buffer_.get(); } |
- const media::AudioParameters& params() const { return params_; } |
- int sink_buffer_size() const { return sink_buffer_size_; } |
- |
- private: |
- ~ConfiguredBuffer() {} |
- friend class base::RefCounted<WebRtcLocalAudioTrack::ConfiguredBuffer>; |
- |
- media::AudioParameters params_; |
- scoped_ptr<media::AudioBus> audio_wrapper_; |
- scoped_ptr<media::AudioFifo> fifo_; |
- scoped_ptr<int16[]> buffer_; |
- int sink_buffer_size_; |
-}; |
- |
scoped_refptr<WebRtcLocalAudioTrack> WebRtcLocalAudioTrack::Create( |
const std::string& id, |
const scoped_refptr<WebRtcAudioCapturer>& capturer, |
WebAudioCapturerSource* webaudio_source, |
webrtc::AudioSourceInterface* track_source, |
- const webrtc::MediaConstraintsInterface* constraints) { |
+ const webrtc::MediaConstraintsInterface* constraints, |
+ WebRtcAudioDeviceImpl* audio_device) { |
talk_base::RefCountedObject<WebRtcLocalAudioTrack>* track = |
new talk_base::RefCountedObject<WebRtcLocalAudioTrack>( |
- id, capturer, webaudio_source, track_source, constraints); |
+ id, capturer, webaudio_source, track_source, |
+ constraints, audio_device); |
return track; |
} |
@@ -126,13 +35,19 @@ WebRtcLocalAudioTrack::WebRtcLocalAudioTrack( |
const scoped_refptr<WebRtcAudioCapturer>& capturer, |
WebAudioCapturerSource* webaudio_source, |
webrtc::AudioSourceInterface* track_source, |
- const webrtc::MediaConstraintsInterface* constraints) |
+ const webrtc::MediaConstraintsInterface* constraints, |
+ WebRtcAudioDeviceImpl* audio_device) |
: webrtc::MediaStreamTrack<webrtc::AudioTrackInterface>(label), |
capturer_(capturer), |
webaudio_source_(webaudio_source), |
track_source_(track_source), |
- need_audio_processing_(NeedsAudioProcessing(constraints)) { |
+ audio_processor_(new WebRtcAudioProcessor(constraints)), |
+ source_provider_(new WebRtcLocalAudioSourceProvider()), |
+ audio_device_(audio_device) { |
DCHECK(capturer.get() || webaudio_source); |
+ DCHECK(audio_processor_.get()); |
+ DCHECK(source_provider_.get()); |
+ AddSink(source_provider_.get()); |
DVLOG(1) << "WebRtcLocalAudioTrack::WebRtcLocalAudioTrack()"; |
} |
@@ -149,38 +64,35 @@ void WebRtcLocalAudioTrack::Capture(media::AudioBus* audio_source, |
bool key_pressed) { |
scoped_refptr<WebRtcAudioCapturer> capturer; |
std::vector<int> voe_channels; |
- int sample_rate = 0; |
- int number_of_channels = 0; |
- int number_of_frames = 0; |
SinkList sinks; |
+ media::AudioParameters params; |
bool is_webaudio_source = false; |
- scoped_refptr<ConfiguredBuffer> current_buffer; |
{ |
base::AutoLock auto_lock(lock_); |
capturer = capturer_; |
voe_channels = voe_channels_; |
- current_buffer = buffer_; |
- sample_rate = current_buffer->params().sample_rate(); |
- number_of_channels = current_buffer->params().channels(); |
- number_of_frames = current_buffer->sink_buffer_size(); |
sinks = sinks_; |
+ params = sink_params_; |
is_webaudio_source = (webaudio_source_.get() != NULL); |
} |
+ DCHECK(params.IsValid()); |
+ DCHECK(params.frames_per_buffer() == params.sample_rate() / 100); |
- // Push the data to the fifo. |
- current_buffer->Push(audio_source); |
+ audio_processor_->Push(audio_source); |
+ // Turn off the audio processing in WebRtc when the audio processor in Chrome |
+ // is on. |
+ bool need_audio_processing = !audio_processor_->has_audio_processing(); |
// When the source is WebAudio, turn off the audio processing if the delay |
// value is 0 even though the constraint is set to true. In such case, it |
// indicates the data is not from microphone. |
- // TODO(xians): remove the flag when supporting one APM per audio track. |
- // See crbug/264611 for details. |
- bool need_audio_processing = need_audio_processing_; |
if (is_webaudio_source && need_audio_processing) |
need_audio_processing = (audio_delay_milliseconds != 0); |
int current_volume = volume; |
- while (current_buffer->Consume()) { |
+ while (audio_processor_->ProcessAndConsume10MsData( |
+ audio_delay_milliseconds, volume, key_pressed)) { |
+ // TODO(xians): Get the new volume and set it to |current_volume|. |
// Feed the data to the sinks. |
// TODO (jiayl): we should not pass the real audio data down if the track is |
// disabled. This is currently done so to feed input to WebRTC typing |
@@ -188,10 +100,10 @@ void WebRtcLocalAudioTrack::Capture(media::AudioBus* audio_source, |
// WebRTC to the track. |
for (SinkList::const_iterator it = sinks.begin(); it != sinks.end(); ++it) { |
int new_volume = (*it)->CaptureData(voe_channels, |
- current_buffer->buffer(), |
- sample_rate, |
- number_of_channels, |
- number_of_frames, |
+ audio_processor_->OutputBuffer(), |
+ params.sample_rate(), |
+ params.channels(), |
+ params.frames_per_buffer(), |
audio_delay_milliseconds, |
current_volume, |
need_audio_processing, |
@@ -208,23 +120,21 @@ void WebRtcLocalAudioTrack::Capture(media::AudioBus* audio_source, |
void WebRtcLocalAudioTrack::SetCaptureFormat( |
const media::AudioParameters& params) { |
- if (!params.IsValid()) |
- return; |
- |
- scoped_refptr<ConfiguredBuffer> new_buffer(new ConfiguredBuffer()); |
- new_buffer->Initialize(params); |
+ DCHECK(params.IsValid()); |
+ audio_processor_->SetFormat(params); |
SinkList sinks; |
{ |
base::AutoLock auto_lock(lock_); |
- buffer_ = new_buffer; |
+ source_params_ = params; |
+ sink_params_ = audio_processor_->OutputFormat(); |
sinks = sinks_; |
} |
// Update all the existing sinks with the new format. |
for (SinkList::const_iterator it = sinks.begin(); |
it != sinks.end(); ++it) { |
- (*it)->SetCaptureFormat(params); |
+ (*it)->SetCaptureFormat(sink_params_); |
} |
} |
@@ -269,8 +179,9 @@ void WebRtcLocalAudioTrack::AddSink(WebRtcAudioCapturerSink* sink) { |
DCHECK(thread_checker_.CalledOnValidThread()); |
DVLOG(1) << "WebRtcLocalAudioTrack::AddSink()"; |
base::AutoLock auto_lock(lock_); |
- if (buffer_.get()) |
- sink->SetCaptureFormat(buffer_->params()); |
+ |
+ if (sink_params_.IsValid()) |
+ sink->SetCaptureFormat(sink_params_); |
// Verify that |sink| is not already added to the list. |
DCHECK(std::find_if( |
@@ -302,6 +213,23 @@ void WebRtcLocalAudioTrack::RemoveSink( |
} |
} |
+void WebRtcLocalAudioTrack::OnRenderData(const int16* render_audio, |
+ int sample_rate, |
+ int number_of_channels, |
+ int number_of_frames, |
+ int render_delay_ms) { |
+ audio_processor_->FeedRenderDataToAudioProcessing(render_audio, |
+ sample_rate, |
+ number_of_channels, |
+ number_of_frames, |
+ render_delay_ms); |
+} |
+ |
+void WebRtcLocalAudioTrack::OnRenderClosing() { |
+ base::AutoLock auto_lock(lock_); |
+ audio_device_ = NULL; |
+} |
+ |
void WebRtcLocalAudioTrack::Start() { |
DCHECK(thread_checker_.CalledOnValidThread()); |
DVLOG(1) << "WebRtcLocalAudioTrack::Start()"; |
@@ -309,17 +237,15 @@ void WebRtcLocalAudioTrack::Start() { |
// If the track is hooking up with WebAudio, do NOT add the track to the |
// capturer as its sink otherwise two streams in different clock will be |
// pushed through the same track. |
- WebRtcLocalAudioSourceProvider* source_provider = NULL; |
- if (capturer_.get()) { |
- source_provider = static_cast<WebRtcLocalAudioSourceProvider*>( |
- capturer_->audio_source_provider()); |
- } |
- webaudio_source_->Start(this, source_provider); |
+ webaudio_source_->Start(this, capturer_.get()); |
return; |
} |
if (capturer_.get()) |
capturer_->AddTrack(this); |
+ |
+ if (audio_device_) |
+ audio_device_->RemoveRenderDataObserver(this); |
} |
void WebRtcLocalAudioTrack::Stop() { |
@@ -334,8 +260,6 @@ void WebRtcLocalAudioTrack::Stop() { |
// Also note that the track is not registered as a sink to the |capturer_| |
// in such case and no need to call RemoveTrack(). |
webaudio_source_->Stop(); |
- } else { |
- capturer_->RemoveTrack(this); |
} |
// Protect the pointers using the lock when accessing |sinks_| and |
@@ -346,6 +270,9 @@ void WebRtcLocalAudioTrack::Stop() { |
sinks = sinks_; |
webaudio_source_ = NULL; |
capturer_ = NULL; |
+ if (audio_device_) |
+ audio_device_->RemoveRenderDataObserver(this); |
+ audio_device_ = NULL; |
} |
for (SinkList::const_iterator it = sinks.begin(); it != sinks.end(); ++it) |