| Index: content/renderer/media/webrtc_audio_processor_unittest.cc
 | 
| diff --git a/content/renderer/media/webrtc_audio_processor_unittest.cc b/content/renderer/media/webrtc_audio_processor_unittest.cc
 | 
| new file mode 100644
 | 
| index 0000000000000000000000000000000000000000..65e3f88828bae13c672b97062accad99bf8f95fb
 | 
| --- /dev/null
 | 
| +++ b/content/renderer/media/webrtc_audio_processor_unittest.cc
 | 
| @@ -0,0 +1,182 @@
 | 
| +// Copyright 2013 The Chromium Authors. All rights reserved.
 | 
| +// Use of this source code is governed by a BSD-style license that can be
 | 
| +// found in the LICENSE file.
 | 
| +
 | 
| +#include "base/command_line.h"
 | 
| +#include "base/file_util.h"
 | 
| +#include "base/files/file_path.h"
 | 
| +#include "base/path_service.h"
 | 
| +#include "content/public/common/content_switches.h"
 | 
| +#include "content/renderer/media/rtc_media_constraints.h"
 | 
| +#include "content/renderer/media/webrtc_audio_processor.h"
 | 
| +#include "media/audio/audio_parameters.h"
 | 
| +#include "media/base/audio_bus.h"
 | 
| +#include "testing/gmock/include/gmock/gmock.h"
 | 
| +#include "testing/gtest/include/gtest/gtest.h"
 | 
| +#include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h"
 | 
| +
 | 
| +using ::testing::_;
 | 
| +using ::testing::AnyNumber;
 | 
| +using ::testing::AtLeast;
 | 
| +using ::testing::Return;
 | 
| +
 | 
| +namespace content {
 | 
| +
 | 
| +namespace {
 | 
| +
 | 
| +#if defined(ANDROID)
 | 
| +const int kAudioProcessingSampleRate = 16000;
 | 
| +#else
 | 
| +const int kAudioProcessingSampleRate = 32000;
 | 
| +#endif
 | 
| +const int kAudioProcessingNumberOfChannel = 1;
 | 
| +
 | 
| +// The number of packers used for testing.
 | 
| +const int kNumberOfPacketsForTest = 100;
 | 
| +
 | 
| +void ReadDataFromSpeechFile(char* data, int length) {
 | 
| +  base::FilePath file;
 | 
| +  CHECK(PathService::Get(base::DIR_SOURCE_ROOT, &file));
 | 
| +  file = file.Append(FILE_PATH_LITERAL("media"))
 | 
| +             .Append(FILE_PATH_LITERAL("test"))
 | 
| +             .Append(FILE_PATH_LITERAL("data"))
 | 
| +             .Append(FILE_PATH_LITERAL("speech_16b_stereo_48kHz.raw"));
 | 
| +  DCHECK(base::PathExists(file));
 | 
| +  int64 data_file_size64 = 0;
 | 
| +  DCHECK(file_util::GetFileSize(file, &data_file_size64));
 | 
| +  EXPECT_EQ(length, file_util::ReadFile(file, data, length));
 | 
| +  DCHECK(data_file_size64 > length);
 | 
| +}
 | 
| +
 | 
| +// Constant constraint keys which enables default audio constraints on
 | 
| +// mediastreams with audio.
 | 
| +struct {
 | 
| +  const char* key;
 | 
| +  const char* value;
 | 
| +} const kDefaultAudioConstraints[] = {
 | 
| +  { webrtc::MediaConstraintsInterface::kEchoCancellation,
 | 
| +    webrtc::MediaConstraintsInterface::kValueTrue },
 | 
| +#if defined(OS_CHROMEOS) || defined(OS_MACOSX)
 | 
| +  // Enable the extended filter mode AEC on platforms with known echo issues.
 | 
| +  { webrtc::MediaConstraintsInterface::kExperimentalEchoCancellation,
 | 
| +    webrtc::MediaConstraintsInterface::kValueTrue },
 | 
| +#endif
 | 
| +  { webrtc::MediaConstraintsInterface::kAutoGainControl,
 | 
| +    webrtc::MediaConstraintsInterface::kValueTrue },
 | 
| +  { webrtc::MediaConstraintsInterface::kExperimentalAutoGainControl,
 | 
| +    webrtc::MediaConstraintsInterface::kValueTrue },
 | 
| +  { webrtc::MediaConstraintsInterface::kNoiseSuppression,
 | 
| +    webrtc::MediaConstraintsInterface::kValueTrue },
 | 
| +  { webrtc::MediaConstraintsInterface::kHighpassFilter,
 | 
| +    webrtc::MediaConstraintsInterface::kValueTrue },
 | 
| +};
 | 
| +
 | 
| +void ApplyFixedAudioConstraints(RTCMediaConstraints* constraints) {
 | 
| +  for (size_t i = 0; i < ARRAYSIZE_UNSAFE(kDefaultAudioConstraints); ++i) {
 | 
| +    constraints->AddMandatory(kDefaultAudioConstraints[i].key,
 | 
| +                              kDefaultAudioConstraints[i].value, false);
 | 
| +  }
 | 
| +}
 | 
| +
 | 
| +}  // namespace
 | 
| +
 | 
| +class WebRtcAudioProcessorTest : public ::testing::Test {
 | 
| + protected:
 | 
| +  virtual void SetUp() OVERRIDE {
 | 
| +    CommandLine::ForCurrentProcess()->AppendSwitch(
 | 
| +        switches::kEnableWebRtcAudioProcessor);
 | 
| +    params_.Reset(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
 | 
| +                  media::CHANNEL_LAYOUT_STEREO, 2, 0, 48000, 16, 512);
 | 
| +
 | 
| +  }
 | 
| +
 | 
| +  media::AudioParameters params_;
 | 
| +};
 | 
| +
 | 
| +TEST_F(WebRtcAudioProcessorTest, WithoutAudioProcessing) {
 | 
| +  // Setup the audio processor with empty constraint.
 | 
| +  RTCMediaConstraints constraints;
 | 
| +  scoped_ptr<WebRtcAudioProcessor> audio_processor(
 | 
| +      new WebRtcAudioProcessor(&constraints));
 | 
| +  audio_processor->SetCaptureFormat(params_);
 | 
| +  EXPECT_FALSE(audio_processor->has_audio_processing());
 | 
| +
 | 
| +  // Read the audio data from a file.
 | 
| +  const int packet_size = params_.frames_per_buffer() * 2 * params_.channels();
 | 
| +  const size_t length = packet_size * kNumberOfPacketsForTest;
 | 
| +  scoped_ptr<char[]> capture_data(new char[length]);
 | 
| +  ReadDataFromSpeechFile(capture_data.get(), length);
 | 
| +  const int16* data_ptr = reinterpret_cast<const int16*>(capture_data.get());
 | 
| +  scoped_ptr<media::AudioBus> data_bus = media::AudioBus::Create(
 | 
| +      params_.channels(), params_.frames_per_buffer());
 | 
| +  for (int i = 0; i < kNumberOfPacketsForTest; ++i) {
 | 
| +    data_bus->FromInterleaved(data_ptr, data_bus->frames(), 2);
 | 
| +    audio_processor->PushCaptureData(data_bus.get());
 | 
| +
 | 
| +    // Feed data as render data to the processor, this does not cost anything
 | 
| +    // when the audio processing is off in the processor.
 | 
| +    audio_processor->FeedRenderDataToAudioProcessing(
 | 
| +        data_ptr,
 | 
| +        params_.sample_rate(), params_.channels(),
 | 
| +        params_.frames_per_buffer(), 10);
 | 
| +
 | 
| +    // Process and consume the data in the processor.
 | 
| +    int16* output = NULL;
 | 
| +    while(audio_processor->ProcessAndConsume10MsData(10, 255, false, &output)) {
 | 
| +      EXPECT_TRUE(output != NULL);
 | 
| +      EXPECT_EQ(audio_processor->OutputFormat().sample_rate(),
 | 
| +                params_.sample_rate());
 | 
| +      EXPECT_EQ(audio_processor->OutputFormat().channels(),
 | 
| +                params_.channels());
 | 
| +      EXPECT_EQ(audio_processor->OutputFormat().frames_per_buffer(),
 | 
| +                params_.sample_rate() / 100);
 | 
| +    }
 | 
| +
 | 
| +    data_ptr += params_.frames_per_buffer() * params_.channels();
 | 
| +  }
 | 
| +}
 | 
| +
 | 
| +TEST_F(WebRtcAudioProcessorTest, WithAudioProcessing) {
 | 
| +  // Setup the audio processor with default constraint.
 | 
| +  RTCMediaConstraints constraints;
 | 
| +  ApplyFixedAudioConstraints(&constraints);
 | 
| +  scoped_ptr<WebRtcAudioProcessor> audio_processor(
 | 
| +      new WebRtcAudioProcessor(&constraints));
 | 
| +  audio_processor->SetCaptureFormat(params_);
 | 
| +  EXPECT_TRUE(audio_processor->has_audio_processing());
 | 
| +
 | 
| +  // Read the audio data from a file.
 | 
| +  const int packet_size = params_.frames_per_buffer() * 2 * params_.channels();
 | 
| +  const size_t length = packet_size * kNumberOfPacketsForTest;
 | 
| +  scoped_ptr<char[]> capture_data(new char[length]);
 | 
| +  ReadDataFromSpeechFile(capture_data.get(), length);
 | 
| +  const int16* data_ptr = reinterpret_cast<const int16*>(capture_data.get());
 | 
| +  scoped_ptr<media::AudioBus> data_bus = media::AudioBus::Create(
 | 
| +      params_.channels(), params_.frames_per_buffer());
 | 
| +  for (int i = 0; i < kNumberOfPacketsForTest; ++i) {
 | 
| +    data_bus->FromInterleaved(data_ptr, data_bus->frames(), 2);
 | 
| +    audio_processor->PushCaptureData(data_bus.get());
 | 
| +
 | 
| +    // Feed data as render data to the processor.
 | 
| +    audio_processor->FeedRenderDataToAudioProcessing(
 | 
| +        data_ptr,
 | 
| +        params_.sample_rate(), params_.channels(),
 | 
| +        params_.frames_per_buffer(), 10);
 | 
| +
 | 
| +    // Process and consume the data in the processor.
 | 
| +    int16* output = NULL;
 | 
| +    while(audio_processor->ProcessAndConsume10MsData(10, 255, false, &output)) {
 | 
| +      EXPECT_TRUE(output != NULL);
 | 
| +      EXPECT_EQ(audio_processor->OutputFormat().sample_rate(),
 | 
| +                kAudioProcessingSampleRate);
 | 
| +      EXPECT_EQ(audio_processor->OutputFormat().channels(),
 | 
| +                kAudioProcessingNumberOfChannel);
 | 
| +      EXPECT_EQ(audio_processor->OutputFormat().frames_per_buffer(),
 | 
| +                kAudioProcessingSampleRate / 100);
 | 
| +    }
 | 
| +
 | 
| +    data_ptr += params_.frames_per_buffer() * params_.channels();
 | 
| +  }
 | 
| +}
 | 
| +
 | 
| +}  // namespace content
 | 
| 
 |