Index: content/renderer/media/webrtc_audio_device_unittest.cc |
diff --git a/content/renderer/media/webrtc_audio_device_unittest.cc b/content/renderer/media/webrtc_audio_device_unittest.cc |
index 6dcdda7033405830876fb72be6834f0b4d24adfa..a79176ec19d3d0d5acf3412d281d92f3b77abbdc 100644 |
--- a/content/renderer/media/webrtc_audio_device_unittest.cc |
+++ b/content/renderer/media/webrtc_audio_device_unittest.cc |
@@ -135,7 +135,7 @@ bool CreateAndInitializeCapturer(WebRtcAudioDeviceImpl* webrtc_audio_device) { |
// Also, connect the sink to the audio track. |
scoped_refptr<WebRtcLocalAudioTrack> |
CreateAndStartLocalAudioTrack(WebRtcAudioCapturer* capturer, |
- WebRtcAudioCapturerSink* sink) { |
+ PeerConnectionAudioSink* sink) { |
scoped_refptr<WebRtcLocalAudioTrack> local_audio_track( |
WebRtcLocalAudioTrack::Create(std::string(), capturer, NULL, NULL, NULL)); |
local_audio_track->AddSink(sink); |
@@ -205,37 +205,38 @@ class WebRTCMediaProcessImpl : public webrtc::VoEMediaProcess { |
DISALLOW_COPY_AND_ASSIGN(WebRTCMediaProcessImpl); |
}; |
-class MockWebRtcAudioCapturerSink : public WebRtcAudioCapturerSink { |
+// TODO(xians): Use MediaStreamAudioSink. |
+class MockMediaStreamAudioSink : public PeerConnectionAudioSink { |
public: |
- explicit MockWebRtcAudioCapturerSink(base::WaitableEvent* event) |
+ explicit MockMediaStreamAudioSink(base::WaitableEvent* event) |
: event_(event) { |
DCHECK(event_); |
} |
- virtual ~MockWebRtcAudioCapturerSink() {} |
- |
- // WebRtcAudioCapturerSink implementation. |
- virtual int CaptureData(const std::vector<int>& channels, |
- const int16* audio_data, |
- int sample_rate, |
- int number_of_channels, |
- int number_of_frames, |
- int audio_delay_milliseconds, |
- int current_volume, |
- bool need_audio_processing, |
- bool key_pressed) OVERRIDE { |
+ virtual ~MockMediaStreamAudioSink() {} |
+ |
+ // PeerConnectionAudioSink implementation. |
+ virtual int OnData(const int16* audio_data, |
+ int sample_rate, |
+ int number_of_channels, |
+ int number_of_frames, |
+ const std::vector<int>& channels, |
+ int audio_delay_milliseconds, |
+ int current_volume, |
+ bool need_audio_processing, |
+ bool key_pressed) OVERRIDE { |
// Signal that a callback has been received. |
event_->Signal(); |
return 0; |
} |
// Set the format for the capture audio parameters. |
- virtual void SetCaptureFormat( |
+ virtual void OnSetFormat( |
const media::AudioParameters& params) OVERRIDE {} |
private: |
base::WaitableEvent* event_; |
- DISALLOW_COPY_AND_ASSIGN(MockWebRtcAudioCapturerSink); |
+ DISALLOW_COPY_AND_ASSIGN(MockMediaStreamAudioSink); |
}; |
class MockWebRtcAudioRendererSource : public WebRtcAudioRendererSource { |
@@ -329,13 +330,13 @@ int RunWebRtcLoopbackTimeTest(media::AudioManager* manager, |
int err = base->Init(webrtc_audio_device.get()); |
EXPECT_EQ(0, err); |
- // We use SetCaptureFormat() and SetRenderFormat() to configure the audio |
+ // We use OnSetFormat() and SetRenderFormat() to configure the audio |
// parameters so that this test can run on machine without hardware device. |
const media::AudioParameters params = media::AudioParameters( |
media::AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO, |
48000, 2, 480); |
- WebRtcAudioCapturerSink* capturer_sink = |
- static_cast<WebRtcAudioCapturerSink*>(webrtc_audio_device.get()); |
+ PeerConnectionAudioSink* capturer_sink = |
+ static_cast<PeerConnectionAudioSink*>(webrtc_audio_device.get()); |
WebRtcAudioRendererSource* renderer_source = |
static_cast<WebRtcAudioRendererSource*>(webrtc_audio_device.get()); |
renderer_source->SetRenderFormat(params); |
@@ -379,12 +380,12 @@ int RunWebRtcLoopbackTimeTest(media::AudioManager* manager, |
voe_channels.push_back(channel); |
for (int j = 0; j < kNumberOfPacketsForLoopbackTest; ++j) { |
// Sending fake capture data to WebRtc. |
- capturer_sink->CaptureData( |
- voe_channels, |
+ capturer_sink->OnData( |
reinterpret_cast<int16*>(capture_data.get() + input_packet_size * j), |
params.sample_rate(), |
params.channels(), |
params.frames_per_buffer(), |
+ voe_channels, |
kHardwareLatencyInMs, |
1.0, |
enable_apm, |
@@ -860,8 +861,8 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, DISABLED_WebRtcRecordingSetupTime) { |
EXPECT_TRUE(CreateAndInitializeCapturer(webrtc_audio_device.get())); |
base::WaitableEvent event(false, false); |
- scoped_ptr<MockWebRtcAudioCapturerSink> sink( |
- new MockWebRtcAudioCapturerSink(&event)); |
+ scoped_ptr<MockMediaStreamAudioSink> sink( |
+ new MockMediaStreamAudioSink(&event)); |
// Create and start a local audio track. Starting the audio track will connect |
// the audio track to the capturer and also start the source of the capturer. |