| OLD | NEW |
| 1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef CONTENT_RENDERER_MEDIA_MEDIA_STREAM_AUDIO_PROCESSOR_H_ | 5 #ifndef CONTENT_RENDERER_MEDIA_MEDIA_STREAM_AUDIO_PROCESSOR_H_ |
| 6 #define CONTENT_RENDERER_MEDIA_MEDIA_STREAM_AUDIO_PROCESSOR_H_ | 6 #define CONTENT_RENDERER_MEDIA_MEDIA_STREAM_AUDIO_PROCESSOR_H_ |
| 7 | 7 |
| 8 #include "base/atomicops.h" | 8 #include "base/atomicops.h" |
| 9 #include "base/platform_file.h" |
| 9 #include "base/synchronization/lock.h" | 10 #include "base/synchronization/lock.h" |
| 10 #include "base/threading/thread_checker.h" | 11 #include "base/threading/thread_checker.h" |
| 11 #include "base/time/time.h" | 12 #include "base/time/time.h" |
| 12 #include "content/common/content_export.h" | 13 #include "content/common/content_export.h" |
| 13 #include "content/renderer/media/webrtc_audio_device_impl.h" | 14 #include "content/renderer/media/webrtc_audio_device_impl.h" |
| 14 #include "media/base/audio_converter.h" | 15 #include "media/base/audio_converter.h" |
| 15 #include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h" | 16 #include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h" |
| 16 #include "third_party/webrtc/modules/audio_processing/include/audio_processing.h
" | 17 #include "third_party/webrtc/modules/audio_processing/include/audio_processing.h
" |
| 17 #include "third_party/webrtc/modules/interface/module_common_types.h" | 18 #include "third_party/webrtc/modules/interface/module_common_types.h" |
| 18 | 19 |
| (...skipping 22 matching lines...) Expand all Loading... |
| 41 // processing components like AGC, AEC and NS. It enables the components based | 42 // processing components like AGC, AEC and NS. It enables the components based |
| 42 // on the getUserMedia constraints, processes the data and outputs it in a unit | 43 // on the getUserMedia constraints, processes the data and outputs it in a unit |
| 43 // of 10 ms data chunk. | 44 // of 10 ms data chunk. |
| 44 class CONTENT_EXPORT MediaStreamAudioProcessor : | 45 class CONTENT_EXPORT MediaStreamAudioProcessor : |
| 45 NON_EXPORTED_BASE(public WebRtcPlayoutDataSource::Sink), | 46 NON_EXPORTED_BASE(public WebRtcPlayoutDataSource::Sink), |
| 46 NON_EXPORTED_BASE(public AudioProcessorInterface) { | 47 NON_EXPORTED_BASE(public AudioProcessorInterface) { |
| 47 public: | 48 public: |
| 48 // |playout_data_source| is used to register this class as a sink to the | 49 // |playout_data_source| is used to register this class as a sink to the |
| 49 // WebRtc playout data for processing AEC. If clients do not enable AEC, | 50 // WebRtc playout data for processing AEC. If clients do not enable AEC, |
| 50 // |playout_data_source| won't be used. | 51 // |playout_data_source| won't be used. |
| 51 MediaStreamAudioProcessor(const media::AudioParameters& source_params, | 52 MediaStreamAudioProcessor(const blink::WebMediaConstraints& constraints, |
| 52 const blink::WebMediaConstraints& constraints, | |
| 53 int effects, | 53 int effects, |
| 54 WebRtcPlayoutDataSource* playout_data_source); | 54 WebRtcPlayoutDataSource* playout_data_source); |
| 55 | 55 |
| 56 // Called when format of the capture data has changed. |
| 57 // Called on the main render thread. The caller is responsible for stopping |
| 58 // the capture thread before calling this method. |
| 59 // After this method, the capture thread will be changed to a new capture |
| 60 // thread. |
| 61 void OnCaptureFormatChanged(const media::AudioParameters& source_params); |
| 62 |
| 56 // Pushes capture data in |audio_source| to the internal FIFO. | 63 // Pushes capture data in |audio_source| to the internal FIFO. |
| 57 // Called on the capture audio thread. | 64 // Called on the capture audio thread. |
| 58 void PushCaptureData(media::AudioBus* audio_source); | 65 void PushCaptureData(media::AudioBus* audio_source); |
| 59 | 66 |
| 60 // Processes a block of 10 ms data from the internal FIFO and outputs it via | 67 // Processes a block of 10 ms data from the internal FIFO and outputs it via |
| 61 // |out|. |out| is the address of the pointer that will be pointed to | 68 // |out|. |out| is the address of the pointer that will be pointed to |
| 62 // the post-processed data if the method is returning a true. The lifetime | 69 // the post-processed data if the method is returning a true. The lifetime |
| 63 // of the data represeted by |out| is guaranteed to outlive the method call. | 70 // of the data represeted by |out| is guaranteed to outlive the method call. |
| 64 // That also says *|out| won't change until this method is called again. | 71 // That also says *|out| won't change until this method is called again. |
| 65 // |new_volume| receives the new microphone volume from the AGC. | 72 // |new_volume| receives the new microphone volume from the AGC. |
| 66 // The new microphoen volume range is [0, 255], and the value will be 0 if | 73 // The new microphoen volume range is [0, 255], and the value will be 0 if |
| 67 // the microphone volume should not be adjusted. | 74 // the microphone volume should not be adjusted. |
| 68 // Returns true if the internal FIFO has at least 10 ms data for processing, | 75 // Returns true if the internal FIFO has at least 10 ms data for processing, |
| 69 // otherwise false. | 76 // otherwise false. |
| 70 // |capture_delay|, |volume| and |key_pressed| will be passed to | 77 // |capture_delay|, |volume| and |key_pressed| will be passed to |
| 71 // webrtc::AudioProcessing to help processing the data. | 78 // webrtc::AudioProcessing to help processing the data. |
| 72 // Called on the capture audio thread. | 79 // Called on the capture audio thread. |
| 73 bool ProcessAndConsumeData(base::TimeDelta capture_delay, | 80 bool ProcessAndConsumeData(base::TimeDelta capture_delay, |
| 74 int volume, | 81 int volume, |
| 75 bool key_pressed, | 82 bool key_pressed, |
| 76 int* new_volume, | 83 int* new_volume, |
| 77 int16** out); | 84 int16** out); |
| 78 | 85 |
| 79 | |
| 80 // The audio format of the input to the processor. | 86 // The audio format of the input to the processor. |
| 81 const media::AudioParameters& InputFormat() const; | 87 const media::AudioParameters& InputFormat() const; |
| 82 | 88 |
| 83 // The audio format of the output from the processor. | 89 // The audio format of the output from the processor. |
| 84 const media::AudioParameters& OutputFormat() const; | 90 const media::AudioParameters& OutputFormat() const; |
| 85 | 91 |
| 86 // Accessor to check if the audio processing is enabled or not. | 92 // Accessor to check if the audio processing is enabled or not. |
| 87 bool has_audio_processing() const { return audio_processing_ != NULL; } | 93 bool has_audio_processing() const { return audio_processing_ != NULL; } |
| 88 | 94 |
| 95 // Starts/Stops the Aec dump on the |audio_processing_|. |
| 96 // Called on the main render thread. |
| 97 // This method takes the ownership of |aec_dump_file|. |
| 98 void StartAecDump(const base::PlatformFile& aec_dump_file); |
| 99 void StopAecDump(); |
| 100 |
| 89 protected: | 101 protected: |
| 90 friend class base::RefCountedThreadSafe<MediaStreamAudioProcessor>; | 102 friend class base::RefCountedThreadSafe<MediaStreamAudioProcessor>; |
| 91 virtual ~MediaStreamAudioProcessor(); | 103 virtual ~MediaStreamAudioProcessor(); |
| 92 | 104 |
| 93 private: | 105 private: |
| 94 friend class MediaStreamAudioProcessorTest; | 106 friend class MediaStreamAudioProcessorTest; |
| 95 | 107 |
| 96 class MediaStreamAudioConverter; | 108 class MediaStreamAudioConverter; |
| 97 | 109 |
| 98 // WebRtcPlayoutDataSource::Sink implementation. | 110 // WebRtcPlayoutDataSource::Sink implementation. |
| (...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 173 | 185 |
| 174 // This flag is used to show the result of typing detection. | 186 // This flag is used to show the result of typing detection. |
| 175 // It can be accessed by the capture audio thread and by the libjingle thread | 187 // It can be accessed by the capture audio thread and by the libjingle thread |
| 176 // which calls GetStats(). | 188 // which calls GetStats(). |
| 177 base::subtle::Atomic32 typing_detected_; | 189 base::subtle::Atomic32 typing_detected_; |
| 178 }; | 190 }; |
| 179 | 191 |
| 180 } // namespace content | 192 } // namespace content |
| 181 | 193 |
| 182 #endif // CONTENT_RENDERER_MEDIA_MEDIA_STREAM_AUDIO_PROCESSOR_H_ | 194 #endif // CONTENT_RENDERER_MEDIA_MEDIA_STREAM_AUDIO_PROCESSOR_H_ |
| OLD | NEW |