OLD | NEW |
| (Empty) |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #ifndef CONTENT_RENDERER_MEDIA_MEDIA_STREAM_AUDIO_PROCESSOR_H_ | |
6 #define CONTENT_RENDERER_MEDIA_MEDIA_STREAM_AUDIO_PROCESSOR_H_ | |
7 | |
8 #include "base/atomicops.h" | |
9 #include "base/synchronization/lock.h" | |
10 #include "base/threading/thread_checker.h" | |
11 #include "base/time/time.h" | |
12 #include "content/common/content_export.h" | |
13 #include "media/base/audio_converter.h" | |
14 #include "third_party/libjingle/source/talk/app/webrtc/mediaconstraintsinterface
.h" | |
15 #include "third_party/webrtc/modules/audio_processing/include/audio_processing.h
" | |
16 #include "third_party/webrtc/modules/interface/module_common_types.h" | |
17 | |
18 namespace media { | |
19 class AudioBus; | |
20 class AudioFifo; | |
21 class AudioParameters; | |
22 } // namespace media | |
23 | |
24 namespace webrtc { | |
25 class AudioFrame; | |
26 } | |
27 | |
28 namespace content { | |
29 | |
30 // This class owns an object of webrtc::AudioProcessing which contains signal | |
31 // processing components like AGC, AEC and NS. It enables the components based | |
32 // on the getUserMedia constraints, processes the data and outputs it in a unit | |
33 // of 10 ms data chunk. | |
34 class CONTENT_EXPORT MediaStreamAudioProcessor { | |
35 public: | |
36 explicit MediaStreamAudioProcessor( | |
37 const webrtc::MediaConstraintsInterface* constraints); | |
38 ~MediaStreamAudioProcessor(); | |
39 | |
40 // Pushes capture data in |audio_source| to the internal FIFO. | |
41 // Called on the capture audio thread. | |
42 void PushCaptureData(media::AudioBus* audio_source); | |
43 | |
44 // Push the render audio to webrtc::AudioProcessing for analysis. This is | |
45 // needed iff echo processing is enabled. | |
46 // |render_audio| is the pointer to the render audio data, its format | |
47 // is specified by |sample_rate|, |number_of_channels| and |number_of_frames|. | |
48 // Called on the render audio thread. | |
49 void PushRenderData(const int16* render_audio, | |
50 int sample_rate, | |
51 int number_of_channels, | |
52 int number_of_frames, | |
53 base::TimeDelta render_delay); | |
54 | |
55 // Processes a block of 10 ms data from the internal FIFO and outputs it via | |
56 // |out|. |out| is the address of the pointer that will be pointed to | |
57 // the post-processed data if the method is returning a true. The lifetime | |
58 // of the data represeted by |out| is guaranteed to outlive the method call. | |
59 // That also says *|out| won't change until this method is called again. | |
60 // Returns true if the internal FIFO has at least 10 ms data for processing, | |
61 // otherwise false. | |
62 // |capture_delay|, |volume| and |key_pressed| will be passed to | |
63 // webrtc::AudioProcessing to help processing the data. | |
64 // Called on the capture audio thread. | |
65 bool ProcessAndConsumeData(base::TimeDelta capture_delay, | |
66 int volume, | |
67 bool key_pressed, | |
68 int16** out); | |
69 | |
70 // Called when the format of the capture data has changed. | |
71 // This has to be called before PushCaptureData() and ProcessAndConsumeData(). | |
72 // Called on the main render thread. | |
73 void SetCaptureFormat(const media::AudioParameters& source_params); | |
74 | |
75 // The audio format of the output from the processor. | |
76 const media::AudioParameters& OutputFormat() const; | |
77 | |
78 // Accessor to check if the audio processing is enabled or not. | |
79 bool has_audio_processing() const { return audio_processing_.get() != NULL; } | |
80 | |
81 private: | |
82 class MediaStreamAudioConverter; | |
83 | |
84 // Helper to initialize the WebRtc AudioProcessing. | |
85 void InitializeAudioProcessingModule( | |
86 const webrtc::MediaConstraintsInterface* constraints); | |
87 | |
88 // Helper to initialize the render converter. | |
89 void InitializeRenderConverterIfNeeded(int sample_rate, | |
90 int number_of_channels, | |
91 int frames_per_buffer); | |
92 | |
93 // Called by ProcessAndConsumeData(). | |
94 void ProcessData(webrtc::AudioFrame* audio_frame, | |
95 base::TimeDelta capture_delay, | |
96 int volume, | |
97 bool key_pressed); | |
98 | |
99 // Called when the processor is going away. | |
100 void StopAudioProcessing(); | |
101 | |
102 // Cached value for the render delay latency. This member is accessed by | |
103 // both the capture audio thread and the render audio thread. | |
104 base::subtle::Atomic32 render_delay_ms_; | |
105 | |
106 // webrtc::AudioProcessing module which does AEC, AGC, NS, HighPass filter, | |
107 // ..etc. | |
108 scoped_ptr<webrtc::AudioProcessing> audio_processing_; | |
109 | |
110 // Converter used for the down-mixing and resampling of the capture data. | |
111 scoped_ptr<MediaStreamAudioConverter> capture_converter_; | |
112 | |
113 // AudioFrame used to hold the output of |capture_converter_|. | |
114 webrtc::AudioFrame capture_frame_; | |
115 | |
116 // Converter used for the down-mixing and resampling of the render data when | |
117 // the AEC is enabled. | |
118 scoped_ptr<MediaStreamAudioConverter> render_converter_; | |
119 | |
120 // AudioFrame used to hold the output of |render_converter_|. | |
121 webrtc::AudioFrame render_frame_; | |
122 | |
123 // Data bus to help converting interleaved data to an AudioBus. | |
124 scoped_ptr<media::AudioBus> render_data_bus_; | |
125 | |
126 // Used to DCHECK that some methods are called on the main render thread. | |
127 base::ThreadChecker main_thread_checker_; | |
128 | |
129 // Used to DCHECK that some methods are called on the capture audio thread. | |
130 base::ThreadChecker capture_thread_checker_; | |
131 | |
132 // Used to DCHECK that PushRenderData() is called on the render audio thread. | |
133 base::ThreadChecker render_thread_checker_; | |
134 }; | |
135 | |
136 } // namespace content | |
137 | |
138 #endif // CONTENT_RENDERER_MEDIA_MEDIA_STREAM_AUDIO_PROCESSOR_H_ | |
OLD | NEW |