Index: content/renderer/media/webrtc_audio_device_impl.h |
diff --git a/content/renderer/media/webrtc_audio_device_impl.h b/content/renderer/media/webrtc_audio_device_impl.h |
index debb4b71b2e630c39d0c68675169519d146f4e0a..e235320889ce909c7a273100fef3235e319731e4 100644 |
--- a/content/renderer/media/webrtc_audio_device_impl.h |
+++ b/content/renderer/media/webrtc_audio_device_impl.h |
@@ -199,39 +199,6 @@ class WebRtcAudioRendererSource { |
virtual ~WebRtcAudioRendererSource() {} |
}; |
-class PeerConnectionAudioSink { |
- public: |
- // Callback to deliver the captured interleaved data. |
- // |channels| contains a vector of WebRtc VoE channels. |
- // |audio_data| is the pointer to the audio data. |
- // |sample_rate| is the sample frequency of audio data. |
- // |number_of_channels| is the number of channels reflecting the order of |
- // surround sound channels. |
- // |audio_delay_milliseconds| is recording delay value. |
- // |current_volume| is current microphone volume, in range of |0, 255]. |
- // |need_audio_processing| indicates if the audio needs WebRtc AEC/NS/AGC |
- // audio processing. |
- // The return value is the new microphone volume, in the range of |0, 255]. |
- // When the volume does not need to be updated, it returns 0. |
- virtual int OnData(const int16* audio_data, |
- int sample_rate, |
- int number_of_channels, |
- int number_of_frames, |
- const std::vector<int>& channels, |
- int audio_delay_milliseconds, |
- int current_volume, |
- bool need_audio_processing, |
- bool key_pressed) = 0; |
- |
- // Set the format for the capture audio parameters. |
- // This is called when the capture format has changed, and it must be called |
- // on the same thread as calling CaptureData(). |
- virtual void OnSetFormat(const media::AudioParameters& params) = 0; |
- |
- protected: |
- virtual ~PeerConnectionAudioSink() {} |
-}; |
- |
// TODO(xians): Merge this interface with WebRtcAudioRendererSource. |
// The reason why we could not do it today is that WebRtcAudioRendererSource |
// gets the data by pulling, while the data is pushed into |
@@ -268,8 +235,7 @@ class WebRtcPlayoutDataSource { |
// the high number of non-implemented methods, we move the cruft over to the |
// WebRtcAudioDeviceNotImpl. |
class CONTENT_EXPORT WebRtcAudioDeviceImpl |
- : NON_EXPORTED_BASE(public PeerConnectionAudioSink), |
- NON_EXPORTED_BASE(public WebRtcAudioDeviceNotImpl), |
+ : NON_EXPORTED_BASE(public WebRtcAudioDeviceNotImpl), |
NON_EXPORTED_BASE(public WebRtcAudioRendererSource), |
NON_EXPORTED_BASE(public WebRtcPlayoutDataSource) { |
public: |
@@ -363,22 +329,6 @@ class CONTENT_EXPORT WebRtcAudioDeviceImpl |
// Make destructor private to ensure that we can only be deleted by Release(). |
~WebRtcAudioDeviceImpl() override; |
- // PeerConnectionAudioSink implementation. |
- |
- // Called on the AudioInputDevice worker thread. |
- int OnData(const int16* audio_data, |
- int sample_rate, |
- int number_of_channels, |
- int number_of_frames, |
- const std::vector<int>& channels, |
- int audio_delay_milliseconds, |
- int current_volume, |
- bool need_audio_processing, |
- bool key_pressed) override; |
- |
- // Called on the AudioInputDevice worker thread. |
- void OnSetFormat(const media::AudioParameters& params) override; |
- |
// WebRtcAudioRendererSource implementation. |
// Called on the AudioOutputDevice worker thread. |
@@ -445,9 +395,6 @@ class CONTENT_EXPORT WebRtcAudioDeviceImpl |
// It is only accessed by the audio render thread. |
std::vector<int16> render_buffer_; |
- // Flag to tell if audio processing is enabled in MediaStreamAudioProcessor. |
- const bool is_audio_track_processing_enabled_; |
- |
DISALLOW_COPY_AND_ASSIGN(WebRtcAudioDeviceImpl); |
}; |