| OLD | NEW |
| 1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef CONTENT_RENDERER_MEDIA_MEDIA_STREAM_AUDIO_PROCESSOR_H_ | 5 #ifndef CONTENT_RENDERER_MEDIA_MEDIA_STREAM_AUDIO_PROCESSOR_H_ |
| 6 #define CONTENT_RENDERER_MEDIA_MEDIA_STREAM_AUDIO_PROCESSOR_H_ | 6 #define CONTENT_RENDERER_MEDIA_MEDIA_STREAM_AUDIO_PROCESSOR_H_ |
| 7 | 7 |
| 8 #include "base/atomicops.h" | 8 #include "base/atomicops.h" |
| 9 #include "base/files/file.h" | 9 #include "base/files/file.h" |
| 10 #include "base/synchronization/lock.h" | 10 #include "base/synchronization/lock.h" |
| (...skipping 17 matching lines...) Expand all Loading... |
| 28 class AudioParameters; | 28 class AudioParameters; |
| 29 } // namespace media | 29 } // namespace media |
| 30 | 30 |
| 31 namespace webrtc { | 31 namespace webrtc { |
| 32 class AudioFrame; | 32 class AudioFrame; |
| 33 class TypingDetection; | 33 class TypingDetection; |
| 34 } | 34 } |
| 35 | 35 |
| 36 namespace content { | 36 namespace content { |
| 37 | 37 |
| 38 class MediaStreamAudioFifo; |
| 38 class RTCMediaConstraints; | 39 class RTCMediaConstraints; |
| 39 | 40 |
| 40 using webrtc::AudioProcessorInterface; | 41 using webrtc::AudioProcessorInterface; |
| 41 | 42 |
| 42 // This class owns an object of webrtc::AudioProcessing which contains signal | 43 // This class owns an object of webrtc::AudioProcessing which contains signal |
| 43 // processing components like AGC, AEC and NS. It enables the components based | 44 // processing components like AGC, AEC and NS. It enables the components based |
| 44 // on the getUserMedia constraints, processes the data and outputs it in a unit | 45 // on the getUserMedia constraints, processes the data and outputs it in a unit |
| 45 // of 10 ms data chunk. | 46 // of 10 ms data chunk. |
| 46 class CONTENT_EXPORT MediaStreamAudioProcessor : | 47 class CONTENT_EXPORT MediaStreamAudioProcessor : |
| 47 NON_EXPORTED_BASE(public WebRtcPlayoutDataSource::Sink), | 48 NON_EXPORTED_BASE(public WebRtcPlayoutDataSource::Sink), |
| 48 NON_EXPORTED_BASE(public AudioProcessorInterface), | 49 NON_EXPORTED_BASE(public AudioProcessorInterface), |
| 49 NON_EXPORTED_BASE(public AecDumpMessageFilter::AecDumpDelegate) { | 50 NON_EXPORTED_BASE(public AecDumpMessageFilter::AecDumpDelegate) { |
| 50 public: | 51 public: |
| 51 // Returns false if |kDisableAudioTrackProcessing| is set to true, otherwise | 52 // Returns false if |kDisableAudioTrackProcessing| is set to true, otherwise |
| 52 // returns true. | 53 // returns true. |
| 53 static bool IsAudioTrackProcessingEnabled(); | 54 static bool IsAudioTrackProcessingEnabled(); |
| 54 | 55 |
| 55 // |playout_data_source| is used to register this class as a sink to the | 56 // |playout_data_source| is used to register this class as a sink to the |
| 56 // WebRtc playout data for processing AEC. If clients do not enable AEC, | 57 // WebRtc playout data for processing AEC. If clients do not enable AEC, |
| 57 // |playout_data_source| won't be used. | 58 // |playout_data_source| won't be used. |
| 58 MediaStreamAudioProcessor(const blink::WebMediaConstraints& constraints, | 59 MediaStreamAudioProcessor(const blink::WebMediaConstraints& constraints, |
| 59 int effects, | 60 int effects, |
| 60 WebRtcPlayoutDataSource* playout_data_source); | 61 WebRtcPlayoutDataSource* playout_data_source); |
| 61 | 62 |
| 62 // Called when format of the capture data has changed. | 63 // Called when the format of the capture data has changed. |
| 63 // Called on the main render thread. The caller is responsible for stopping | 64 // Called on the main render thread. The caller is responsible for stopping |
| 64 // the capture thread before calling this method. | 65 // the capture thread before calling this method. |
| 65 // After this method, the capture thread will be changed to a new capture | 66 // After this method, the capture thread will be changed to a new capture |
| 66 // thread. | 67 // thread. |
| 67 void OnCaptureFormatChanged(const media::AudioParameters& source_params); | 68 void OnCaptureFormatChanged(const media::AudioParameters& source_params); |
| 68 | 69 |
| 69 // Pushes capture data in |audio_source| to the internal FIFO. | 70 // Pushes capture data in |audio_source| to the internal FIFO. Each call to |
| 71 // this method should be followed by calls to ProcessAndConsumeData() while |
| 72 // it returns false, to pull out all available data. |
| 70 // Called on the capture audio thread. | 73 // Called on the capture audio thread. |
| 71 void PushCaptureData(const media::AudioBus* audio_source); | 74 void PushCaptureData(const media::AudioBus* audio_source); |
| 72 | 75 |
| 73 // Processes a block of 10 ms data from the internal FIFO and outputs it via | 76 // Processes a block of 10 ms data from the internal FIFO and outputs it via |
| 74 // |out|. |out| is the address of the pointer that will be pointed to | 77 // |out|. |out| is the address of the pointer that will be pointed to |
| 75 // the post-processed data if the method is returning a true. The lifetime | 78 // the post-processed data if the method is returning a true. The lifetime |
| 76 // of the data represeted by |out| is guaranteed to outlive the method call. | 79 // of the data represeted by |out| is guaranteed until this method is called |
| 77 // That also says *|out| won't change until this method is called again. | 80 // again. |
| 78 // |new_volume| receives the new microphone volume from the AGC. | 81 // |new_volume| receives the new microphone volume from the AGC. |
| 79 // The new microphoen volume range is [0, 255], and the value will be 0 if | 82 // The new microphone volume range is [0, 255], and the value will be 0 if |
| 80 // the microphone volume should not be adjusted. | 83 // the microphone volume should not be adjusted. |
| 81 // Returns true if the internal FIFO has at least 10 ms data for processing, | 84 // Returns true if the internal FIFO has at least 10 ms data for processing, |
| 82 // otherwise false. | 85 // otherwise false. |
| 83 // |capture_delay|, |volume| and |key_pressed| will be passed to | |
| 84 // webrtc::AudioProcessing to help processing the data. | |
| 85 // Called on the capture audio thread. | 86 // Called on the capture audio thread. |
| 87 // |
| 88 // TODO(ajm): Don't we want this to output float? |
| 86 bool ProcessAndConsumeData(base::TimeDelta capture_delay, | 89 bool ProcessAndConsumeData(base::TimeDelta capture_delay, |
| 87 int volume, | 90 int volume, |
| 88 bool key_pressed, | 91 bool key_pressed, |
| 89 int* new_volume, | 92 int* new_volume, |
| 90 int16** out); | 93 int16** out); |
| 91 | 94 |
| 92 // Stops the audio processor, no more AEC dump or render data after calling | 95 // Stops the audio processor, no more AEC dump or render data after calling |
| 93 // this method. | 96 // this method. |
| 94 void Stop(); | 97 void Stop(); |
| 95 | 98 |
| 96 // The audio format of the input to the processor. | 99 // The audio formats of the capture input to and output from the processor. |
| 100 // Must only be called on the main render or audio capture threads. |
| 97 const media::AudioParameters& InputFormat() const; | 101 const media::AudioParameters& InputFormat() const; |
| 98 | |
| 99 // The audio format of the output from the processor. | |
| 100 const media::AudioParameters& OutputFormat() const; | 102 const media::AudioParameters& OutputFormat() const; |
| 101 | 103 |
| 102 // Accessor to check if the audio processing is enabled or not. | 104 // Accessor to check if the audio processing is enabled or not. |
| 103 bool has_audio_processing() const { return audio_processing_ != NULL; } | 105 bool has_audio_processing() const { return audio_processing_ != NULL; } |
| 104 | 106 |
| 105 // AecDumpMessageFilter::AecDumpDelegate implementation. | 107 // AecDumpMessageFilter::AecDumpDelegate implementation. |
| 106 // Called on the main render thread. | 108 // Called on the main render thread. |
| 107 virtual void OnAecDumpFile( | 109 virtual void OnAecDumpFile( |
| 108 const IPC::PlatformFileForTransit& file_handle) OVERRIDE; | 110 const IPC::PlatformFileForTransit& file_handle) OVERRIDE; |
| 109 virtual void OnDisableAecDump() OVERRIDE; | 111 virtual void OnDisableAecDump() OVERRIDE; |
| 110 virtual void OnIpcClosing() OVERRIDE; | 112 virtual void OnIpcClosing() OVERRIDE; |
| 111 | 113 |
| 112 protected: | 114 protected: |
| 113 friend class base::RefCountedThreadSafe<MediaStreamAudioProcessor>; | 115 friend class base::RefCountedThreadSafe<MediaStreamAudioProcessor>; |
| 114 virtual ~MediaStreamAudioProcessor(); | 116 virtual ~MediaStreamAudioProcessor(); |
| 115 | 117 |
| 116 private: | 118 private: |
| 117 friend class MediaStreamAudioProcessorTest; | 119 friend class MediaStreamAudioProcessorTest; |
| 118 FRIEND_TEST_ALL_PREFIXES(MediaStreamAudioProcessorTest, | 120 FRIEND_TEST_ALL_PREFIXES(MediaStreamAudioProcessorTest, |
| 119 GetAecDumpMessageFilter); | 121 GetAecDumpMessageFilter); |
| 120 | 122 |
| 121 class MediaStreamAudioConverter; | |
| 122 | |
| 123 // WebRtcPlayoutDataSource::Sink implementation. | 123 // WebRtcPlayoutDataSource::Sink implementation. |
| 124 virtual void OnPlayoutData(media::AudioBus* audio_bus, | 124 virtual void OnPlayoutData(media::AudioBus* audio_bus, |
| 125 int sample_rate, | 125 int sample_rate, |
| 126 int audio_delay_milliseconds) OVERRIDE; | 126 int audio_delay_milliseconds) OVERRIDE; |
| 127 virtual void OnPlayoutDataSourceChanged() OVERRIDE; | 127 virtual void OnPlayoutDataSourceChanged() OVERRIDE; |
| 128 | 128 |
| 129 // webrtc::AudioProcessorInterface implementation. | 129 // webrtc::AudioProcessorInterface implementation. |
| 130 // This method is called on the libjingle thread. | 130 // This method is called on the libjingle thread. |
| 131 virtual void GetStats(AudioProcessorStats* stats) OVERRIDE; | 131 virtual void GetStats(AudioProcessorStats* stats) OVERRIDE; |
| 132 | 132 |
| 133 // Helper to initialize the WebRtc AudioProcessing. | 133 // Helper to initialize the WebRtc AudioProcessing. |
| 134 void InitializeAudioProcessingModule( | 134 void InitializeAudioProcessingModule( |
| 135 const blink::WebMediaConstraints& constraints, int effects); | 135 const blink::WebMediaConstraints& constraints, int effects); |
| 136 | 136 |
| 137 // Helper to initialize the capture converter. | 137 // Helper to initialize the capture converter. |
| 138 void InitializeCaptureConverter(const media::AudioParameters& source_params); | 138 void InitializeCaptureFifo(const media::AudioParameters& input_format); |
| 139 | 139 |
| 140 // Helper to initialize the render converter. | 140 // Helper to initialize the render converter. |
| 141 void InitializeRenderConverterIfNeeded(int sample_rate, | 141 void InitializeRenderFifoIfNeeded(int sample_rate, |
| 142 int number_of_channels, | 142 int number_of_channels, |
| 143 int frames_per_buffer); | 143 int frames_per_buffer); |
| 144 | 144 |
| 145 // Called by ProcessAndConsumeData(). | 145 // Called by ProcessAndConsumeData(). |
| 146 // Returns the new microphone volume in the range of |0, 255]. | 146 // Returns the new microphone volume in the range of |0, 255]. |
| 147 // When the volume does not need to be updated, it returns 0. | 147 // When the volume does not need to be updated, it returns 0. |
| 148 int ProcessData(webrtc::AudioFrame* audio_frame, | 148 int ProcessData(const media::AudioBus* input, |
| 149 base::TimeDelta capture_delay, | 149 base::TimeDelta capture_delay, |
| 150 int volume, | 150 int volume, |
| 151 bool key_pressed); | 151 bool key_pressed, |
| 152 media::AudioBus* output); |
| 152 | 153 |
| 153 // Cached value for the render delay latency. This member is accessed by | 154 // Cached value for the render delay latency. This member is accessed by |
| 154 // both the capture audio thread and the render audio thread. | 155 // both the capture audio thread and the render audio thread. |
| 155 base::subtle::Atomic32 render_delay_ms_; | 156 base::subtle::Atomic32 render_delay_ms_; |
| 156 | 157 |
| 157 // webrtc::AudioProcessing module which does AEC, AGC, NS, HighPass filter, | 158 // Module to handle processing and format conversion. |
| 158 // ..etc. | |
| 159 scoped_ptr<webrtc::AudioProcessing> audio_processing_; | 159 scoped_ptr<webrtc::AudioProcessing> audio_processing_; |
| 160 | 160 |
| 161 // Converter used for the down-mixing and resampling of the capture data. | 161 // FIFO to provide 10 ms capture chunks. |
| 162 scoped_ptr<MediaStreamAudioConverter> capture_converter_; | 162 scoped_ptr<MediaStreamAudioFifo> capture_fifo_; |
| 163 // Receives processing output. |
| 164 scoped_ptr<media::AudioBus> output_bus_; |
| 165 // Receives interleaved int16 data for output. |
| 166 scoped_ptr<int16[]> output_data_; |
| 163 | 167 |
| 164 // AudioFrame used to hold the output of |capture_converter_|. | 168 // FIFO to provide 10 ms render chunks when the AEC is enabled. |
| 165 webrtc::AudioFrame capture_frame_; | 169 scoped_ptr<MediaStreamAudioFifo> render_fifo_; |
| 166 | 170 |
| 167 // Converter used for the down-mixing and resampling of the render data when | 171 // These are mutated on the main render thread in OnCaptureFormatChanged(). |
| 168 // the AEC is enabled. | 172 // The caller guarantees this does not run concurrently with accesses on the |
| 169 scoped_ptr<MediaStreamAudioConverter> render_converter_; | 173 // capture audio thread. |
| 170 | 174 media::AudioParameters input_format_; |
| 171 // AudioFrame used to hold the output of |render_converter_|. | 175 media::AudioParameters output_format_; |
| 172 webrtc::AudioFrame render_frame_; | 176 // Only used on the render audio thread. |
| 173 | 177 media::AudioParameters render_format_; |
| 174 // Data bus to help converting interleaved data to an AudioBus. | |
| 175 scoped_ptr<media::AudioBus> render_data_bus_; | |
| 176 | 178 |
| 177 // Raw pointer to the WebRtcPlayoutDataSource, which is valid for the | 179 // Raw pointer to the WebRtcPlayoutDataSource, which is valid for the |
| 178 // lifetime of RenderThread. | 180 // lifetime of RenderThread. |
| 179 WebRtcPlayoutDataSource* playout_data_source_; | 181 WebRtcPlayoutDataSource* playout_data_source_; |
| 180 | 182 |
| 181 // Used to DCHECK that the destructor is called on the main render thread. | 183 // Used to DCHECK that some methods are called on the main render thread. |
| 182 base::ThreadChecker main_thread_checker_; | 184 base::ThreadChecker main_thread_checker_; |
| 183 | |
| 184 // Used to DCHECK that some methods are called on the capture audio thread. | 185 // Used to DCHECK that some methods are called on the capture audio thread. |
| 185 base::ThreadChecker capture_thread_checker_; | 186 base::ThreadChecker capture_thread_checker_; |
| 186 | 187 // Used to DCHECK that some methods are called on the render audio thread. |
| 187 // Used to DCHECK that PushRenderData() is called on the render audio thread. | |
| 188 base::ThreadChecker render_thread_checker_; | 188 base::ThreadChecker render_thread_checker_; |
| 189 | 189 |
| 190 // Flag to enable the stereo channels mirroring. | 190 // Flag to enable stereo channel mirroring. |
| 191 bool audio_mirroring_; | 191 bool audio_mirroring_; |
| 192 | 192 |
| 193 // Used by the typing detection. | |
| 194 scoped_ptr<webrtc::TypingDetection> typing_detector_; | 193 scoped_ptr<webrtc::TypingDetection> typing_detector_; |
| 195 | |
| 196 // This flag is used to show the result of typing detection. | 194 // This flag is used to show the result of typing detection. |
| 197 // It can be accessed by the capture audio thread and by the libjingle thread | 195 // It can be accessed by the capture audio thread and by the libjingle thread |
| 198 // which calls GetStats(). | 196 // which calls GetStats(). |
| 199 base::subtle::Atomic32 typing_detected_; | 197 base::subtle::Atomic32 typing_detected_; |
| 200 | 198 |
| 201 // Communication with browser for AEC dump. | 199 // Communication with browser for AEC dump. |
| 202 scoped_refptr<AecDumpMessageFilter> aec_dump_message_filter_; | 200 scoped_refptr<AecDumpMessageFilter> aec_dump_message_filter_; |
| 203 | 201 |
| 204 // Flag to avoid executing Stop() more than once. | 202 // Flag to avoid executing Stop() more than once. |
| 205 bool stopped_; | 203 bool stopped_; |
| 206 }; | 204 }; |
| 207 | 205 |
| 208 } // namespace content | 206 } // namespace content |
| 209 | 207 |
| 210 #endif // CONTENT_RENDERER_MEDIA_MEDIA_STREAM_AUDIO_PROCESSOR_H_ | 208 #endif // CONTENT_RENDERER_MEDIA_MEDIA_STREAM_AUDIO_PROCESSOR_H_ |
| OLD | NEW |