OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #ifndef CONTENT_RENDERER_MEDIA_WEBRTC_AUDIO_PROCESSOR_H_ | |
6 #define CONTENT_RENDERER_MEDIA_WEBRTC_AUDIO_PROCESSOR_H_ | |
7 | |
8 #include "base/synchronization/lock.h" | |
9 #include "base/threading/thread_checker.h" | |
10 #include "base/time/time.h" | |
11 #include "content/common/content_export.h" | |
12 #include "media/base/audio_converter.h" | |
13 #include "third_party/libjingle/source/talk/app/webrtc/mediaconstraintsinterface .h" | |
14 #include "third_party/webrtc/modules/audio_processing/include/audio_processing.h " | |
15 #include "third_party/webrtc/modules/interface/module_common_types.h" | |
16 | |
17 namespace media { | |
18 class AudioBus; | |
19 class AudioFifo; | |
20 class AudioParameters; | |
21 } // namespace media | |
22 | |
23 namespace webrtc { | |
24 class AudioFrame; | |
25 } | |
26 | |
27 namespace content { | |
28 | |
29 // This class owns an object of webrtc::AudioProcessing which contains signal | |
30 // processing components like AGC, AEC and NS. It enables the components based | |
31 // on the constraints, processes the data and outputs it in a unit of 10 ms | |
32 // data chunk. | |
33 class CONTENT_EXPORT WebRtcAudioProcessor { | |
34 public: | |
35 explicit WebRtcAudioProcessor( | |
36 const webrtc::MediaConstraintsInterface* constraints); | |
37 ~WebRtcAudioProcessor(); | |
38 | |
39 // Pushes capture data in |audio_source| to the internal FIFO. | |
40 // Called on the capture audio thread. | |
41 void PushCaptureData(media::AudioBus* audio_source); | |
42 | |
43 // Processes a block of 10 ms data from the internal FIFO and outputs it via | |
44 // |out|. |out| is the address of the pointer that will be pointed to | |
45 // the post-processed data if the method is returning a true. | |
Jói
2013/11/08 16:44:22
What about ownership? Is the data allocated by thi
no longer working on chromium
2013/11/11 14:35:25
This processor still owns the pointer, |out| just
| |
46 // Returns true if the internal FIFO has at least 10ms data for processing, | |
47 // otherwise false. | |
48 // Called on the capture audio thread. | |
49 bool ProcessAndConsumeData(const base::TimeDelta& capture_delay, | |
DaleCurtis
2013/11/08 21:00:48
base::TimeDelta can be passed by value efficiently
no longer working on chromium
2013/11/11 14:35:25
Done with passing the value.
| |
50 int volume, | |
51 bool key_pressed, | |
52 int16** out); | |
53 | |
54 // Called when the format of the capture data has changed. | |
55 // Called on the main render thread. | |
56 void SetCaptureFormat(const media::AudioParameters& source_params); | |
57 | |
58 // Push the render audio to WebRtc::AudioProcessing for analysis. This is | |
59 // needed iff echo processing is enabled. | |
60 // |render_audio| is the pointer to the render audio data, its format | |
61 // is specified by |sample_rate|, |number_of_channels| and |number_of_frames|. | |
62 // Called on the render audio thread. | |
63 void PushRenderData(const int16* render_audio, | |
64 int sample_rate, | |
65 int number_of_channels, | |
66 int number_of_frames, | |
67 const base::TimeDelta& render_delay); | |
68 | |
69 // The audio format of the output from the processor. | |
70 const media::AudioParameters& OutputFormat() const; | |
71 | |
72 // Accessor to check if the audio processing is enabled or not. | |
73 bool has_audio_processing() const { return audio_processing_.get() != NULL; } | |
74 | |
75 private: | |
76 class WebRtcAudioConverter; | |
77 | |
78 // Helper to initialize the WebRtc AudioProcessing. | |
79 void InitializeAudioProcessingModule( | |
80 const webrtc::MediaConstraintsInterface* constraints); | |
81 | |
82 // Helper to initialize the render converter. | |
83 void InitializeRenderConverterIfNeeded(int sample_rate, | |
84 int number_of_channels, | |
85 int frames_per_buffer); | |
86 | |
87 // Called by ProcessAndConsumeData(). | |
88 void ProcessData(webrtc::AudioFrame* audio_frame, | |
89 const base::TimeDelta& capture_delay, | |
90 int volume, | |
91 bool key_pressed); | |
92 | |
93 // Called when the processor is going away. | |
94 void StopAudioProcessing(); | |
95 | |
96 // Cached value for the render delay latency. | |
97 base::TimeDelta render_delay_; | |
98 | |
99 // Protects |render_delay_|. | |
100 // TODO(xians): Can we get rid of the lock? | |
101 mutable base::Lock lock_; | |
102 | |
103 // WebRtc AudioProcessing module which does AEC, AGC, NS, HighPass filter, | |
104 // ..etc. | |
105 scoped_ptr<webrtc::AudioProcessing> audio_processing_; | |
106 | |
107 // Converter used for the down-mixing and resampling of the capture data. | |
108 scoped_ptr<WebRtcAudioConverter> capture_converter_; | |
109 | |
110 // AudioFrame used to hold the output of |capture_converter_|. | |
111 webrtc::AudioFrame capture_frame_; | |
112 | |
113 // Converter used for the down-mixing and resampling of the render data when | |
114 // the AEC is enabled. | |
115 scoped_ptr<WebRtcAudioConverter> render_converter_; | |
116 | |
117 // AudioFrame used to hold the output of |render_converter_|. | |
118 webrtc::AudioFrame render_frame_; | |
119 | |
120 // Data bus to help converting interleaved data to an AudioBus. | |
121 scoped_ptr<media::AudioBus> render_data_bus_; | |
122 | |
123 // Used to DCHECK that some methods are called on the main render thread. | |
124 base::ThreadChecker main_thread_checker_; | |
125 | |
126 // Used to DCHECK that some methods are called on the capture audio thread. | |
127 base::ThreadChecker capture_thread_checker_; | |
128 | |
129 // Used to DCHECK that PushRenderData() is called on the render audio thread. | |
130 base::ThreadChecker render_thread_checker_; | |
131 }; | |
132 | |
133 } // namespace content | |
134 | |
135 #endif // CONTENT_RENDERER_MEDIA_WEBRTC_AUDIO_PROCESSOR_H_ | |
OLD | NEW |