Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(677)

Side by Side Diff: content/renderer/media/webrtc_audio_device_unittest.cc

Issue 90743004: Add generic interfaces for the sinks of the media stream audio track (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: fixed the nits. Created 7 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include <vector> 5 #include <vector>
6 6
7 #include "base/environment.h" 7 #include "base/environment.h"
8 #include "base/file_util.h" 8 #include "base/file_util.h"
9 #include "base/files/file_path.h" 9 #include "base/files/file_path.h"
10 #include "base/path_service.h" 10 #include "base/path_service.h"
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after
128 webrtc_audio_device->AddAudioCapturer(capturer); 128 webrtc_audio_device->AddAudioCapturer(capturer);
129 129
130 return true; 130 return true;
131 } 131 }
132 132
133 // Create and start a local audio track. Starting the audio track will connect 133 // Create and start a local audio track. Starting the audio track will connect
134 // the audio track to the capturer and also start the source of the capturer. 134 // the audio track to the capturer and also start the source of the capturer.
135 // Also, connect the sink to the audio track. 135 // Also, connect the sink to the audio track.
136 scoped_refptr<WebRtcLocalAudioTrack> 136 scoped_refptr<WebRtcLocalAudioTrack>
137 CreateAndStartLocalAudioTrack(WebRtcAudioCapturer* capturer, 137 CreateAndStartLocalAudioTrack(WebRtcAudioCapturer* capturer,
138 WebRtcAudioCapturerSink* sink) { 138 PeerConnectionAudioSink* sink) {
139 scoped_refptr<WebRtcLocalAudioTrack> local_audio_track( 139 scoped_refptr<WebRtcLocalAudioTrack> local_audio_track(
140 WebRtcLocalAudioTrack::Create(std::string(), capturer, NULL, NULL, NULL)); 140 WebRtcLocalAudioTrack::Create(std::string(), capturer, NULL, NULL, NULL));
141 local_audio_track->AddSink(sink); 141 local_audio_track->AddSink(sink);
142 local_audio_track->Start(); 142 local_audio_track->Start();
143 return local_audio_track; 143 return local_audio_track;
144 } 144 }
145 145
146 class WebRTCMediaProcessImpl : public webrtc::VoEMediaProcess { 146 class WebRTCMediaProcessImpl : public webrtc::VoEMediaProcess {
147 public: 147 public:
148 explicit WebRTCMediaProcessImpl(base::WaitableEvent* event) 148 explicit WebRTCMediaProcessImpl(base::WaitableEvent* event)
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
198 base::WaitableEvent* event_; 198 base::WaitableEvent* event_;
199 int channel_id_; 199 int channel_id_;
200 webrtc::ProcessingTypes type_; 200 webrtc::ProcessingTypes type_;
201 int packet_size_; 201 int packet_size_;
202 int sample_rate_; 202 int sample_rate_;
203 int channels_; 203 int channels_;
204 mutable base::Lock lock_; 204 mutable base::Lock lock_;
205 DISALLOW_COPY_AND_ASSIGN(WebRTCMediaProcessImpl); 205 DISALLOW_COPY_AND_ASSIGN(WebRTCMediaProcessImpl);
206 }; 206 };
207 207
208 class MockWebRtcAudioCapturerSink : public WebRtcAudioCapturerSink { 208 // TODO(xians): Use MediaStreamAudioSink.
209 class MockMediaStreamAudioSink : public PeerConnectionAudioSink {
209 public: 210 public:
210 explicit MockWebRtcAudioCapturerSink(base::WaitableEvent* event) 211 explicit MockMediaStreamAudioSink(base::WaitableEvent* event)
211 : event_(event) { 212 : event_(event) {
212 DCHECK(event_); 213 DCHECK(event_);
213 } 214 }
214 virtual ~MockWebRtcAudioCapturerSink() {} 215 virtual ~MockMediaStreamAudioSink() {}
215 216
216 // WebRtcAudioCapturerSink implementation. 217 // PeerConnectionAudioSink implementation.
217 virtual int CaptureData(const std::vector<int>& channels, 218 virtual int OnData(const int16* audio_data,
218 const int16* audio_data, 219 int sample_rate,
219 int sample_rate, 220 int number_of_channels,
220 int number_of_channels, 221 int number_of_frames,
221 int number_of_frames, 222 const std::vector<int>& channels,
222 int audio_delay_milliseconds, 223 int audio_delay_milliseconds,
223 int current_volume, 224 int current_volume,
224 bool need_audio_processing, 225 bool need_audio_processing,
225 bool key_pressed) OVERRIDE { 226 bool key_pressed) OVERRIDE {
226 // Signal that a callback has been received. 227 // Signal that a callback has been received.
227 event_->Signal(); 228 event_->Signal();
228 return 0; 229 return 0;
229 } 230 }
230 231
231 // Set the format for the capture audio parameters. 232 // Set the format for the capture audio parameters.
232 virtual void SetCaptureFormat( 233 virtual void OnSetFormat(
233 const media::AudioParameters& params) OVERRIDE {} 234 const media::AudioParameters& params) OVERRIDE {}
234 235
235 private: 236 private:
236 base::WaitableEvent* event_; 237 base::WaitableEvent* event_;
237 238
238 DISALLOW_COPY_AND_ASSIGN(MockWebRtcAudioCapturerSink); 239 DISALLOW_COPY_AND_ASSIGN(MockMediaStreamAudioSink);
239 }; 240 };
240 241
241 class MockWebRtcAudioRendererSource : public WebRtcAudioRendererSource { 242 class MockWebRtcAudioRendererSource : public WebRtcAudioRendererSource {
242 public: 243 public:
243 explicit MockWebRtcAudioRendererSource(base::WaitableEvent* event) 244 explicit MockWebRtcAudioRendererSource(base::WaitableEvent* event)
244 : event_(event) { 245 : event_(event) {
245 DCHECK(event_); 246 DCHECK(event_);
246 } 247 }
247 virtual ~MockWebRtcAudioRendererSource() {} 248 virtual ~MockWebRtcAudioRendererSource() {}
248 249
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after
322 bool enable_apm) { 323 bool enable_apm) {
323 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( 324 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
324 new WebRtcAudioDeviceImpl()); 325 new WebRtcAudioDeviceImpl());
325 WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create()); 326 WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
326 EXPECT_TRUE(engine.valid()); 327 EXPECT_TRUE(engine.valid());
327 ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get()); 328 ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
328 EXPECT_TRUE(base.valid()); 329 EXPECT_TRUE(base.valid());
329 int err = base->Init(webrtc_audio_device.get()); 330 int err = base->Init(webrtc_audio_device.get());
330 EXPECT_EQ(0, err); 331 EXPECT_EQ(0, err);
331 332
332 // We use SetCaptureFormat() and SetRenderFormat() to configure the audio 333 // We use OnSetFormat() and SetRenderFormat() to configure the audio
333 // parameters so that this test can run on machine without hardware device. 334 // parameters so that this test can run on machine without hardware device.
334 const media::AudioParameters params = media::AudioParameters( 335 const media::AudioParameters params = media::AudioParameters(
335 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO, 336 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
336 48000, 2, 480); 337 48000, 2, 480);
337 WebRtcAudioCapturerSink* capturer_sink = 338 PeerConnectionAudioSink* capturer_sink =
338 static_cast<WebRtcAudioCapturerSink*>(webrtc_audio_device.get()); 339 static_cast<PeerConnectionAudioSink*>(webrtc_audio_device.get());
339 WebRtcAudioRendererSource* renderer_source = 340 WebRtcAudioRendererSource* renderer_source =
340 static_cast<WebRtcAudioRendererSource*>(webrtc_audio_device.get()); 341 static_cast<WebRtcAudioRendererSource*>(webrtc_audio_device.get());
341 renderer_source->SetRenderFormat(params); 342 renderer_source->SetRenderFormat(params);
342 343
343 // Turn on/off all the signal processing components like AGC, AEC and NS. 344 // Turn on/off all the signal processing components like AGC, AEC and NS.
344 ScopedWebRTCPtr<webrtc::VoEAudioProcessing> audio_processing(engine.get()); 345 ScopedWebRTCPtr<webrtc::VoEAudioProcessing> audio_processing(engine.get());
345 EXPECT_TRUE(audio_processing.valid()); 346 EXPECT_TRUE(audio_processing.valid());
346 audio_processing->SetAgcStatus(enable_apm); 347 audio_processing->SetAgcStatus(enable_apm);
347 audio_processing->SetNsStatus(enable_apm); 348 audio_processing->SetNsStatus(enable_apm);
348 audio_processing->SetEcStatus(enable_apm); 349 audio_processing->SetEcStatus(enable_apm);
(...skipping 23 matching lines...) Expand all
372 ReadDataFromSpeechFile(capture_data.get(), length); 373 ReadDataFromSpeechFile(capture_data.get(), length);
373 374
374 // Start the timer. 375 // Start the timer.
375 scoped_ptr<uint8[]> buffer(new uint8[output_packet_size]); 376 scoped_ptr<uint8[]> buffer(new uint8[output_packet_size]);
376 base::Time start_time = base::Time::Now(); 377 base::Time start_time = base::Time::Now();
377 int delay = 0; 378 int delay = 0;
378 std::vector<int> voe_channels; 379 std::vector<int> voe_channels;
379 voe_channels.push_back(channel); 380 voe_channels.push_back(channel);
380 for (int j = 0; j < kNumberOfPacketsForLoopbackTest; ++j) { 381 for (int j = 0; j < kNumberOfPacketsForLoopbackTest; ++j) {
381 // Sending fake capture data to WebRtc. 382 // Sending fake capture data to WebRtc.
382 capturer_sink->CaptureData( 383 capturer_sink->OnData(
383 voe_channels,
384 reinterpret_cast<int16*>(capture_data.get() + input_packet_size * j), 384 reinterpret_cast<int16*>(capture_data.get() + input_packet_size * j),
385 params.sample_rate(), 385 params.sample_rate(),
386 params.channels(), 386 params.channels(),
387 params.frames_per_buffer(), 387 params.frames_per_buffer(),
388 voe_channels,
388 kHardwareLatencyInMs, 389 kHardwareLatencyInMs,
389 1.0, 390 1.0,
390 enable_apm, 391 enable_apm,
391 false); 392 false);
392 393
393 // Receiving data from WebRtc. 394 // Receiving data from WebRtc.
394 renderer_source->RenderData( 395 renderer_source->RenderData(
395 reinterpret_cast<uint8*>(buffer.get()), 396 reinterpret_cast<uint8*>(buffer.get()),
396 num_output_channels, webrtc_audio_device->output_buffer_size(), 397 num_output_channels, webrtc_audio_device->output_buffer_size(),
397 kHardwareLatencyInMs + delay); 398 kHardwareLatencyInMs + delay);
(...skipping 455 matching lines...) Expand 10 before | Expand all | Expand 10 after
853 ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get()); 854 ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
854 ASSERT_TRUE(base.valid()); 855 ASSERT_TRUE(base.valid());
855 int err = base->Init(webrtc_audio_device.get()); 856 int err = base->Init(webrtc_audio_device.get());
856 ASSERT_EQ(0, err); 857 ASSERT_EQ(0, err);
857 858
858 int ch = base->CreateChannel(); 859 int ch = base->CreateChannel();
859 EXPECT_NE(-1, ch); 860 EXPECT_NE(-1, ch);
860 861
861 EXPECT_TRUE(CreateAndInitializeCapturer(webrtc_audio_device.get())); 862 EXPECT_TRUE(CreateAndInitializeCapturer(webrtc_audio_device.get()));
862 base::WaitableEvent event(false, false); 863 base::WaitableEvent event(false, false);
863 scoped_ptr<MockWebRtcAudioCapturerSink> sink( 864 scoped_ptr<MockMediaStreamAudioSink> sink(
864 new MockWebRtcAudioCapturerSink(&event)); 865 new MockMediaStreamAudioSink(&event));
865 866
866 // Create and start a local audio track. Starting the audio track will connect 867 // Create and start a local audio track. Starting the audio track will connect
867 // the audio track to the capturer and also start the source of the capturer. 868 // the audio track to the capturer and also start the source of the capturer.
868 scoped_refptr<WebRtcLocalAudioTrack> local_audio_track( 869 scoped_refptr<WebRtcLocalAudioTrack> local_audio_track(
869 CreateAndStartLocalAudioTrack( 870 CreateAndStartLocalAudioTrack(
870 webrtc_audio_device->GetDefaultCapturer().get(), sink.get())); 871 webrtc_audio_device->GetDefaultCapturer().get(), sink.get()));
871 872
872 // connect the VoE voice channel to the audio track. 873 // connect the VoE voice channel to the audio track.
873 static_cast<webrtc::AudioTrackInterface*>(local_audio_track.get())-> 874 static_cast<webrtc::AudioTrackInterface*>(local_audio_track.get())->
874 GetRenderer()->AddChannel(ch); 875 GetRenderer()->AddChannel(ch);
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after
968 LOG(WARNING) << "Test disabled due to the test hangs on WinXP."; 969 LOG(WARNING) << "Test disabled due to the test hangs on WinXP.";
969 return; 970 return;
970 } 971 }
971 #endif 972 #endif
972 int latency = RunWebRtcLoopbackTimeTest(audio_manager_.get(), true); 973 int latency = RunWebRtcLoopbackTimeTest(audio_manager_.get(), true);
973 PrintPerfResultMs("webrtc_loopback_with_signal_processing (100 packets)", 974 PrintPerfResultMs("webrtc_loopback_with_signal_processing (100 packets)",
974 "t", latency); 975 "t", latency);
975 } 976 }
976 977
977 } // namespace content 978 } // namespace content
OLDNEW
« no previous file with comments | « content/renderer/media/webrtc_audio_device_impl.cc ('k') | content/renderer/media/webrtc_local_audio_renderer.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698