Index: content/renderer/media/webrtc_audio_processor.h |
diff --git a/content/renderer/media/webrtc_audio_processor.h b/content/renderer/media/webrtc_audio_processor.h |
new file mode 100644 |
index 0000000000000000000000000000000000000000..f0c8212135353221babdb641ebdbd5f66d692e4b |
--- /dev/null |
+++ b/content/renderer/media/webrtc_audio_processor.h |
@@ -0,0 +1,111 @@ |
+// Copyright 2013 The Chromium Authors. All rights reserved. |
+// Use of this source code is governed by a BSD-style license that can be |
+// found in the LICENSE file. |
+ |
+#ifndef CONTENT_RENDERER_MEDIA_WEBRTC_AUDIO_PROCESSOR_H_ |
+#define CONTENT_RENDERER_MEDIA_WEBRTC_AUDIO_PROCESSOR_H_ |
+ |
+#include "base/synchronization/lock.h" |
+#include "content/common/content_export.h" |
+#include "media/base/audio_converter.h" |
+#include "third_party/libjingle/source/talk/app/webrtc/mediaconstraintsinterface.h" |
+#include "third_party/webrtc/modules/audio_processing/include/audio_processing.h" |
+#include "third_party/webrtc/modules/interface/module_common_types.h" |
+ |
+namespace media { |
+class AudioBus; |
+class AudioFifo; |
+class AudioParameters; |
+} // namespace media |
+ |
+namespace webrtc { |
+class AudioFrame; |
+} |
+ |
+namespace content { |
+ |
+// This class owns an object of webrtc::AudioProcessing which contains signal |
+// processing components like AGC, AEC and NS. It enables the components based |
+// on the constraints, processes the data and outputs it in a unit of 10 ms |
+// data chunk. |
+class CONTENT_EXPORT WebRtcAudioProcessor { |
+ public: |
+ explicit WebRtcAudioProcessor( |
+ const webrtc::MediaConstraintsInterface* constraints); |
DaleCurtis
2013/11/06 00:21:18
Should this be a const& instead?
no longer working on chromium
2013/11/06 16:45:14
It is following the caller WebRtcLocalAudioTrack,
DaleCurtis
2013/11/07 01:36:00
sgtm
|
+ ~WebRtcAudioProcessor(); |
+ |
+ // Pushes capture data in |audio_source| to the internal FIFO. |
+ // Called on the capture audio thread. |
+ void PushCaptureData(media::AudioBus* audio_source); |
+ |
+ // Processes a block of 10 ms data from the internal FIFO and outputs it via |
+ // |out|. |
+ // Returns true if the internal FIFO has at least 10ms data for processing, |
+ // otherwise false. |
+ // Called on the capture audio thread. |
+ bool ProcessAndConsume10MsData(int capture_audio_delay_ms, |
DaleCurtis
2013/11/06 00:21:18
Are you sure you want 10ms in the name. Kind of a
no longer working on chromium
2013/11/06 16:45:14
I am glad to remove 10ms. Done
|
+ int volume, |
+ bool key_pressed, |
+ int16** out); |
+ |
+ // Called when the format of the capture data has changed. |
+ // Called on the main render thread. |
+ void SetCaptureFormat(const media::AudioParameters& source_params); |
+ |
+ // Push the render audio to WebRtc::AudioProcessing for analysis. This is |
+ // needed iff echo processing is enabled. |
+ // Called on the render audio thread. |
+ void PushRenderData(const int16* render_audio, |
+ int sample_rate, |
+ int number_of_channels, |
+ int number_of_frames, |
+ int render_delay_ms); |
DaleCurtis
2013/11/06 00:21:18
How about using base::TimeDelta everywhere instead
no longer working on chromium
2013/11/06 16:45:14
May I ask why base::TimeDelta is preferred?
I thin
DaleCurtis
2013/11/07 01:36:00
As time goes forward we want to replace all _ms va
no longer working on chromium
2013/11/07 14:43:12
Could you please explain why you think base::TimeD
DaleCurtis
2013/11/07 20:44:08
You should be using base::TimeDelta (or Time, Time
|
+ |
+ // The audio format of the output from the processor. |
+ const media::AudioParameters& OutputFormat() const; |
+ |
+ // Accessor to check if the audio processing is enabled or not. |
+ bool has_audio_processing() const { return audio_processing_.get() != NULL; } |
+ |
+ private: |
+ class WebRtcAudioConverter; |
+ |
+ // Helper to initialize the WebRtc AudioProcessing. |
+ void InitializeAudioProcessingModule( |
+ const webrtc::MediaConstraintsInterface* constraints); |
DaleCurtis
2013/11/06 00:21:18
const&?
no longer working on chromium
2013/11/06 16:45:14
a separate refactor CL?
|
+ |
+ // Helper to initialize the render converter. |
+ void InitializeRenderConverterIfNeeded(int sample_rate, |
+ int number_of_channels, |
+ int frames_per_buffer); |
+ |
+ // Called by ProcessAndConsume10MsData(). |
+ void ProcessData(int audio_delay_milliseconds, |
+ int volume, |
+ bool key_pressed); |
+ |
+ // Called when the processor is going away. |
+ void StopAudioProcessing(); |
+ |
+ // Cached value for the render delay latency. |
+ int render_delay_ms_; |
+ |
+ // Protects |render_delay_ms_|. |
+ // TODO(xians): Can we get rid of the lock? |
+ mutable base::Lock lock_; |
+ |
+ // WebRtc AudioProcessing module which does AEC, AGC, NS, HighPass filter, |
+ // ..etc. |
+ scoped_ptr<webrtc::AudioProcessing> audio_processing_; |
+ |
+ // Converter used for the down-mixing and resampling of the capture data. |
+ scoped_ptr<WebRtcAudioConverter> capture_converter_; |
+ |
+ // Converter used for the down-mixing and resampling of the render data when |
+ // the AEC is enabled. |
+ scoped_ptr<WebRtcAudioConverter> render_converter_; |
+}; |
+ |
+} // namespace content |
+ |
+#endif // CONTENT_RENDERER_MEDIA_WEBRTC_AUDIO_PROCESSOR_H_ |