| Index: content/renderer/media/webrtc_audio_device_impl.h
|
| diff --git a/content/renderer/media/webrtc_audio_device_impl.h b/content/renderer/media/webrtc_audio_device_impl.h
|
| index 86f8716911d84bb9e6a6c8b88174334f611c3cc5..f515c6e24cbc2bcea6d2bcb7fb027e6cc39b23e0 100644
|
| --- a/content/renderer/media/webrtc_audio_device_impl.h
|
| +++ b/content/renderer/media/webrtc_audio_device_impl.h
|
| @@ -202,7 +202,7 @@ class WebRtcAudioRendererSource {
|
| virtual ~WebRtcAudioRendererSource() {}
|
| };
|
|
|
| -class WebRtcAudioCapturerSink {
|
| +class PeerConnectionAudioSink {
|
| public:
|
| // Callback to deliver the captured interleaved data.
|
| // |channels| contains a vector of WebRtc VoE channels.
|
| @@ -216,31 +216,31 @@ class WebRtcAudioCapturerSink {
|
| // audio processing.
|
| // The return value is the new microphone volume, in the range of |0, 255].
|
| // When the volume does not need to be updated, it returns 0.
|
| - virtual int CaptureData(const std::vector<int>& channels,
|
| - const int16* audio_data,
|
| - int sample_rate,
|
| - int number_of_channels,
|
| - int number_of_frames,
|
| - int audio_delay_milliseconds,
|
| - int current_volume,
|
| - bool need_audio_processing,
|
| - bool key_pressed) = 0;
|
| + virtual int OnData(const int16* audio_data,
|
| + int sample_rate,
|
| + int number_of_channels,
|
| + int number_of_frames,
|
| + const std::vector<int>& channels,
|
| + int audio_delay_milliseconds,
|
| + int current_volume,
|
| + bool need_audio_processing,
|
| + bool key_pressed) = 0;
|
|
|
| // Set the format for the capture audio parameters.
|
| // This is called when the capture format has changed, and it must be called
|
| // on the same thread as calling CaptureData().
|
| - virtual void SetCaptureFormat(const media::AudioParameters& params) = 0;
|
| + virtual void OnSetFormat(const media::AudioParameters& params) = 0;
|
|
|
| protected:
|
| - virtual ~WebRtcAudioCapturerSink() {}
|
| + virtual ~PeerConnectionAudioSink() {}
|
| };
|
|
|
| // Note that this class inherits from webrtc::AudioDeviceModule but due to
|
| // the high number of non-implemented methods, we move the cruft over to the
|
| // WebRtcAudioDeviceNotImpl.
|
| class CONTENT_EXPORT WebRtcAudioDeviceImpl
|
| - : NON_EXPORTED_BASE(public WebRtcAudioDeviceNotImpl),
|
| - NON_EXPORTED_BASE(public WebRtcAudioCapturerSink),
|
| + : NON_EXPORTED_BASE(public PeerConnectionAudioSink),
|
| + NON_EXPORTED_BASE(public WebRtcAudioDeviceNotImpl),
|
| NON_EXPORTED_BASE(public WebRtcAudioRendererSource) {
|
| public:
|
| // The maximum volume value WebRtc uses.
|
| @@ -327,21 +327,21 @@ class CONTENT_EXPORT WebRtcAudioDeviceImpl
|
| // Make destructor private to ensure that we can only be deleted by Release().
|
| virtual ~WebRtcAudioDeviceImpl();
|
|
|
| - // WebRtcAudioCapturerSink implementation.
|
| + // PeerConnectionAudioSink implementation.
|
|
|
| // Called on the AudioInputDevice worker thread.
|
| - virtual int CaptureData(const std::vector<int>& channels,
|
| - const int16* audio_data,
|
| - int sample_rate,
|
| - int number_of_channels,
|
| - int number_of_frames,
|
| - int audio_delay_milliseconds,
|
| - int current_volume,
|
| - bool need_audio_processing,
|
| - bool key_pressed) OVERRIDE;
|
| + virtual int OnData(const int16* audio_data,
|
| + int sample_rate,
|
| + int number_of_channels,
|
| + int number_of_frames,
|
| + const std::vector<int>& channels,
|
| + int audio_delay_milliseconds,
|
| + int current_volume,
|
| + bool need_audio_processing,
|
| + bool key_pressed) OVERRIDE;
|
|
|
| - // Called on the main render thread.
|
| - virtual void SetCaptureFormat(const media::AudioParameters& params) OVERRIDE;
|
| + // Called on the AudioInputDevice worker thread.
|
| + virtual void OnSetFormat(const media::AudioParameters& params) OVERRIDE;
|
|
|
| // WebRtcAudioRendererSource implementation.
|
|
|
|
|