Chromium Code Reviews| Index: content/renderer/media/webrtc_audio_processor_unittest.cc |
| diff --git a/content/renderer/media/webrtc_audio_processor_unittest.cc b/content/renderer/media/webrtc_audio_processor_unittest.cc |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..bf1b90eeaba7c91b1cf4d189aca2f88664f1c41b |
| --- /dev/null |
| +++ b/content/renderer/media/webrtc_audio_processor_unittest.cc |
| @@ -0,0 +1,166 @@ |
| +// Copyright 2013 The Chromium Authors. All rights reserved. |
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +#include "base/command_line.h" |
| +#include "base/file_util.h" |
| +#include "base/files/file_path.h" |
| +#include "base/path_service.h" |
| +#include "content/public/common/content_switches.h" |
| +#include "content/renderer/media/rtc_media_constraints.h" |
| +#include "content/renderer/media/webrtc_audio_processor.h" |
| +#include "media/audio/audio_parameters.h" |
| +#include "media/base/audio_bus.h" |
| +#include "testing/gmock/include/gmock/gmock.h" |
| +#include "testing/gtest/include/gtest/gtest.h" |
| +#include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h" |
| + |
| +using ::testing::_; |
| +using ::testing::AnyNumber; |
| +using ::testing::AtLeast; |
| +using ::testing::Return; |
| + |
| +namespace content { |
| + |
| +namespace { |
| + |
| +#if defined(ANDROID) |
| +const int kAudioProcessingSampleRate = 16000; |
| +#else |
| +const int kAudioProcessingSampleRate = 32000; |
| +#endif |
| +const int kAudioProcessingNumberOfChannel = 1; |
| + |
| +// The number of packers used for testing. |
| +const int kNumberOfPacketsForTest = 100; |
| + |
| +void ReadDataFromSpeechFile(char* data, int length) { |
| + base::FilePath file; |
| + CHECK(PathService::Get(base::DIR_SOURCE_ROOT, &file)); |
| + file = file.Append(FILE_PATH_LITERAL("media")) |
| + .Append(FILE_PATH_LITERAL("test")) |
| + .Append(FILE_PATH_LITERAL("data")) |
| + .Append(FILE_PATH_LITERAL("speech_16b_stereo_48kHz.raw")); |
| + DCHECK(base::PathExists(file)); |
| + int64 data_file_size64 = 0; |
| + DCHECK(file_util::GetFileSize(file, &data_file_size64)); |
| + EXPECT_EQ(length, file_util::ReadFile(file, data, length)); |
| + DCHECK(data_file_size64 > length); |
| +} |
| + |
| +// Constant constraint keys which enables default audio constraints on |
| +// mediastreams with audio. |
| +struct { |
| + const char* key; |
| + const char* value; |
| +} const kDefaultAudioConstraints[] = { |
| + { webrtc::MediaConstraintsInterface::kEchoCancellation, |
| + webrtc::MediaConstraintsInterface::kValueTrue }, |
| +#if defined(OS_CHROMEOS) || defined(OS_MACOSX) |
| + // Enable the extended filter mode AEC on platforms with known echo issues. |
| + { webrtc::MediaConstraintsInterface::kExperimentalEchoCancellation, |
| + webrtc::MediaConstraintsInterface::kValueTrue }, |
| +#endif |
| + { webrtc::MediaConstraintsInterface::kAutoGainControl, |
| + webrtc::MediaConstraintsInterface::kValueTrue }, |
| + { webrtc::MediaConstraintsInterface::kExperimentalAutoGainControl, |
| + webrtc::MediaConstraintsInterface::kValueTrue }, |
| + { webrtc::MediaConstraintsInterface::kNoiseSuppression, |
| + webrtc::MediaConstraintsInterface::kValueTrue }, |
| + { webrtc::MediaConstraintsInterface::kHighpassFilter, |
| + webrtc::MediaConstraintsInterface::kValueTrue }, |
| +}; |
| + |
| +void ApplyFixedAudioConstraints(RTCMediaConstraints* constraints) { |
| + for (size_t i = 0; i < ARRAYSIZE_UNSAFE(kDefaultAudioConstraints); ++i) { |
| + constraints->AddMandatory(kDefaultAudioConstraints[i].key, |
| + kDefaultAudioConstraints[i].value, false); |
| + } |
| +} |
| + |
| +} // namespace |
| + |
| +class WebRtcAudioProcessorTest : public ::testing::Test { |
| + public: |
| + WebRtcAudioProcessorTest() |
| + : params_(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, |
| + media::CHANNEL_LAYOUT_STEREO, 48000, 16, 512) { |
| + CommandLine::ForCurrentProcess()->AppendSwitch( |
| + switches::kEnableAudioTrackProcessing); |
| + } |
| + |
| + protected: |
| + // Helper method to save duplicated code. |
| + void ProcessDataAndVerifyFormat(WebRtcAudioProcessor* audio_processor, |
| + int expected_output_sample_rate, |
| + int expected_output_channels, |
| + int expected_output_buffer_size) { |
| + // Read the audio data from a file. |
| + const int packet_size = |
| + params_.frames_per_buffer() * 2 * params_.channels(); |
| + const size_t length = packet_size * kNumberOfPacketsForTest; |
| + scoped_ptr<char[]> capture_data(new char[length]); |
|
DaleCurtis
2013/11/07 20:44:08
You can use media/base/test_data_util.h to simplif
no longer working on chromium
2013/11/08 13:01:15
Thanks for the tips, but I think I will just keep
|
| + ReadDataFromSpeechFile(capture_data.get(), length); |
| + const int16* data_ptr = reinterpret_cast<const int16*>(capture_data.get()); |
| + scoped_ptr<media::AudioBus> data_bus = media::AudioBus::Create( |
| + params_.channels(), params_.frames_per_buffer()); |
| + for (int i = 0; i < kNumberOfPacketsForTest; ++i) { |
| + data_bus->FromInterleaved(data_ptr, data_bus->frames(), 2); |
| + audio_processor->PushCaptureData(data_bus.get()); |
| + |
| + // Feed data as render data to the processor, this does not cost anything |
| + // when the audio processing is off in the processor. |
| + audio_processor->PushRenderData( |
| + data_ptr, |
| + params_.sample_rate(), params_.channels(), |
| + params_.frames_per_buffer(), 10); |
| + |
| + // Process and consume the data in the processor. |
| + int16* output = NULL; |
| + while(audio_processor->ProcessAndConsumeData(10, 255, false, &output)) { |
| + EXPECT_TRUE(output != NULL); |
| + EXPECT_EQ(audio_processor->OutputFormat().sample_rate(), |
| + expected_output_sample_rate); |
| + EXPECT_EQ(audio_processor->OutputFormat().channels(), |
| + expected_output_channels); |
| + EXPECT_EQ(audio_processor->OutputFormat().frames_per_buffer(), |
| + expected_output_buffer_size); |
| + } |
| + |
| + data_ptr += params_.frames_per_buffer() * params_.channels(); |
| + } |
| + } |
| + |
| + media::AudioParameters params_; |
| +}; |
| + |
| +TEST_F(WebRtcAudioProcessorTest, WithoutAudioProcessing) { |
| + // Setup the audio processor with empty constraint. |
| + RTCMediaConstraints constraints; |
| + scoped_ptr<WebRtcAudioProcessor> audio_processor( |
| + new WebRtcAudioProcessor(&constraints)); |
| + audio_processor->SetCaptureFormat(params_); |
| + EXPECT_FALSE(audio_processor->has_audio_processing()); |
| + |
| + ProcessDataAndVerifyFormat(audio_processor.get(), |
| + params_.sample_rate(), |
| + params_.channels(), |
| + params_.sample_rate() / 100); |
| +} |
| + |
| +TEST_F(WebRtcAudioProcessorTest, WithAudioProcessing) { |
| + // Setup the audio processor with default constraint. |
| + RTCMediaConstraints constraints; |
| + ApplyFixedAudioConstraints(&constraints); |
| + scoped_ptr<WebRtcAudioProcessor> audio_processor( |
|
DaleCurtis
2013/11/07 20:44:08
Doesn't need to be a scoped_ptr. Ditto above.
no longer working on chromium
2013/11/08 13:01:15
Done.
|
| + new WebRtcAudioProcessor(&constraints)); |
| + audio_processor->SetCaptureFormat(params_); |
| + EXPECT_TRUE(audio_processor->has_audio_processing()); |
| + |
| + ProcessDataAndVerifyFormat(audio_processor.get(), |
| + kAudioProcessingSampleRate, |
| + kAudioProcessingNumberOfChannel, |
| + kAudioProcessingSampleRate / 100); |
| +} |
| + |
| +} // namespace content |