| Index: content/renderer/media/webrtc_local_audio_track.cc
|
| diff --git a/content/renderer/media/webrtc_local_audio_track.cc b/content/renderer/media/webrtc_local_audio_track.cc
|
| index 137e4a33a830ec120d64988ce19aced262b969c6..c4e98d5527bbee3b56033ff92a066ae3914d2b1a 100644
|
| --- a/content/renderer/media/webrtc_local_audio_track.cc
|
| +++ b/content/renderer/media/webrtc_local_audio_track.cc
|
| @@ -52,63 +52,6 @@ bool NeedsAudioProcessing(
|
|
|
| } // namespace.
|
|
|
| -// This is a temporary audio buffer with parameters used to send data to
|
| -// callbacks.
|
| -class WebRtcLocalAudioTrack::ConfiguredBuffer :
|
| - public base::RefCounted<WebRtcLocalAudioTrack::ConfiguredBuffer> {
|
| - public:
|
| - ConfiguredBuffer() : sink_buffer_size_(0) {}
|
| -
|
| - void Initialize(const media::AudioParameters& params) {
|
| - DCHECK(params.IsValid());
|
| - params_ = params;
|
| -
|
| - // Use 10ms as the sink buffer size since that is the native packet size
|
| - // WebRtc is running on.
|
| - sink_buffer_size_ = params.sample_rate() / 100;
|
| - audio_wrapper_ =
|
| - media::AudioBus::Create(params.channels(), sink_buffer_size_);
|
| - buffer_.reset(new int16[sink_buffer_size_ * params.channels()]);
|
| -
|
| - // The size of the FIFO should be at least twice of the source buffer size
|
| - // or twice of the sink buffer size.
|
| - int buffer_size = std::max(
|
| - kMaxNumberOfBuffersInFifo * params.frames_per_buffer(),
|
| - kMaxNumberOfBuffersInFifo * sink_buffer_size_);
|
| - fifo_.reset(new media::AudioFifo(params.channels(), buffer_size));
|
| - }
|
| -
|
| - void Push(media::AudioBus* audio_source) {
|
| - DCHECK(fifo_->frames() + audio_source->frames() <= fifo_->max_frames());
|
| - fifo_->Push(audio_source);
|
| - }
|
| -
|
| - bool Consume() {
|
| - if (fifo_->frames() < audio_wrapper_->frames())
|
| - return false;
|
| -
|
| - fifo_->Consume(audio_wrapper_.get(), 0, audio_wrapper_->frames());
|
| - audio_wrapper_->ToInterleaved(audio_wrapper_->frames(),
|
| - params_.bits_per_sample() / 8,
|
| - buffer());
|
| - return true;
|
| - }
|
| -
|
| - int16* buffer() const { return buffer_.get(); }
|
| - const media::AudioParameters& params() const { return params_; }
|
| - int sink_buffer_size() const { return sink_buffer_size_; }
|
| -
|
| - private:
|
| - ~ConfiguredBuffer() {}
|
| - friend class base::RefCounted<WebRtcLocalAudioTrack::ConfiguredBuffer>;
|
| -
|
| - media::AudioParameters params_;
|
| - scoped_ptr<media::AudioBus> audio_wrapper_;
|
| - scoped_ptr<media::AudioFifo> fifo_;
|
| - scoped_ptr<int16[]> buffer_;
|
| - int sink_buffer_size_;
|
| -};
|
| -
|
| scoped_refptr<WebRtcLocalAudioTrack> WebRtcLocalAudioTrack::Create(
|
| const std::string& id,
|
| const scoped_refptr<WebRtcAudioCapturer>& capturer,
|
| @@ -143,66 +86,31 @@ WebRtcLocalAudioTrack::~WebRtcLocalAudioTrack() {
|
| Stop();
|
| }
|
|
|
| -void WebRtcLocalAudioTrack::Capture(media::AudioBus* audio_source,
|
| - int audio_delay_milliseconds,
|
| - int volume,
|
| - bool key_pressed) {
|
| - scoped_refptr<WebRtcAudioCapturer> capturer;
|
| +void WebRtcLocalAudioTrack::Capture(const int16* audio_data,
|
| + int sample_rate,
|
| + int number_of_channels,
|
| + int number_of_frames) {
|
| + DCHECK(number_of_frames == (sample_rate / 100));
|
| +
|
| std::vector<int> voe_channels;
|
| - int sample_rate = 0;
|
| - int number_of_channels = 0;
|
| - int number_of_frames = 0;
|
| SinkList sinks;
|
| - bool is_webaudio_source = false;
|
| - scoped_refptr<ConfiguredBuffer> current_buffer;
|
| {
|
| base::AutoLock auto_lock(lock_);
|
| - capturer = capturer_;
|
| voe_channels = voe_channels_;
|
| - current_buffer = buffer_;
|
| - sample_rate = current_buffer->params().sample_rate();
|
| - number_of_channels = current_buffer->params().channels();
|
| - number_of_frames = current_buffer->sink_buffer_size();
|
| sinks = sinks_;
|
| - is_webaudio_source = (webaudio_source_.get() != NULL);
|
| }
|
|
|
| - // Push the data to the fifo.
|
| - current_buffer->Push(audio_source);
|
| -
|
| - // When the source is WebAudio, turn off the audio processing if the delay
|
| - // value is 0 even though the constraint is set to true. In such case, it
|
| - // indicates the data is not from microphone.
|
| - // TODO(xians): remove the flag when supporting one APM per audio track.
|
| - // See crbug/264611 for details.
|
| - bool need_audio_processing = need_audio_processing_;
|
| - if (is_webaudio_source && need_audio_processing)
|
| - need_audio_processing = (audio_delay_milliseconds != 0);
|
| -
|
| - int current_volume = volume;
|
| - while (current_buffer->Consume()) {
|
| - // Feed the data to the sinks.
|
| - // TODO (jiayl): we should not pass the real audio data down if the track is
|
| - // disabled. This is currently done so to feed input to WebRTC typing
|
| - // detection and should be changed when audio processing is moved from
|
| - // WebRTC to the track.
|
| - for (SinkList::const_iterator it = sinks.begin(); it != sinks.end(); ++it) {
|
| - int new_volume = (*it)->CaptureData(voe_channels,
|
| - current_buffer->buffer(),
|
| - sample_rate,
|
| - number_of_channels,
|
| - number_of_frames,
|
| - audio_delay_milliseconds,
|
| - current_volume,
|
| - need_audio_processing,
|
| - key_pressed);
|
| - if (new_volume != 0 && capturer.get()) {
|
| - // Feed the new volume to WebRtc while changing the volume on the
|
| - // browser.
|
| - capturer->SetVolume(new_volume);
|
| - current_volume = new_volume;
|
| - }
|
| - }
|
| + // Feed the data to the sinks.
|
| + // TODO (jiayl): we should not pass the real audio data down if the track is
|
| + // disabled. This is currently done so to feed input to WebRTC typing
|
| + // detection and should be changed when audio processing is moved from
|
| + // WebRTC to the track.
|
| + for (SinkList::const_iterator it = sinks.begin(); it != sinks.end(); ++it) {
|
| + (*it)->CaptureData(voe_channels,
|
| + audio_data,
|
| + sample_rate,
|
| + number_of_channels,
|
| + number_of_frames);
|
| }
|
| }
|
|
|
| @@ -211,13 +119,10 @@ void WebRtcLocalAudioTrack::SetCaptureFormat(
|
| if (!params.IsValid())
|
| return;
|
|
|
| - scoped_refptr<ConfiguredBuffer> new_buffer(new ConfiguredBuffer());
|
| - new_buffer->Initialize(params);
|
| -
|
| SinkList sinks;
|
| {
|
| base::AutoLock auto_lock(lock_);
|
| - buffer_ = new_buffer;
|
| + params_ = params;
|
| sinks = sinks_;
|
| }
|
|
|
| @@ -269,8 +174,8 @@ void WebRtcLocalAudioTrack::AddSink(WebRtcAudioCapturerSink* sink) {
|
| DCHECK(thread_checker_.CalledOnValidThread());
|
| DVLOG(1) << "WebRtcLocalAudioTrack::AddSink()";
|
| base::AutoLock auto_lock(lock_);
|
| - if (buffer_.get())
|
| - sink->SetCaptureFormat(buffer_->params());
|
| +
|
| + sink->SetCaptureFormat(params_);
|
|
|
| // Verify that |sink| is not already added to the list.
|
| DCHECK(std::find_if(
|
|
|