Chromium Code Reviews| Index: content/renderer/media/audio_track_recorder_unittest.cc |
| diff --git a/content/renderer/media/audio_track_recorder_unittest.cc b/content/renderer/media/audio_track_recorder_unittest.cc |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..25de69104b2bbd97f05d9fe88e512b50c9ba5c84 |
| --- /dev/null |
| +++ b/content/renderer/media/audio_track_recorder_unittest.cc |
| @@ -0,0 +1,176 @@ |
| +// Copyright 2015 The Chromium Authors. All rights reserved. |
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +#include "content/renderer/media/audio_track_recorder.h" |
| + |
| +#include "base/run_loop.h" |
| +#include "base/strings/utf_string_conversions.h" |
| +#include "content/renderer/media/media_stream_audio_source.h" |
| +#include "content/renderer/media/mock_media_constraint_factory.h" |
| +#include "content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h" |
| +#include "content/renderer/media/webrtc_local_audio_track.h" |
| +#include "media/audio/simple_sources.h" |
| +#include "testing/gmock/include/gmock/gmock.h" |
| +#include "testing/gtest/include/gtest/gtest.h" |
| +#include "third_party/WebKit/public/web/WebHeap.h" |
| + |
| +using ::testing::_; |
| +using ::testing::DoAll; |
| +using ::testing::InSequence; |
| +using ::testing::Mock; |
| +using ::testing::Return; |
| +using ::testing::SaveArg; |
| + |
| +namespace { |
| + |
| +// Input audio format. |
| +const media::AudioParameters::Format kInputFormat = |
| + media::AudioParameters::AUDIO_PCM_LOW_LATENCY; |
| +const int kBitsPerSample = 16; |
| +const int kSamplingRate = 48000; |
| +const int kFramesPerBuffer = 480; |
| + |
| +} // namespace |
| + |
| +namespace content { |
| + |
| +ACTION_P(RunClosure, closure) { |
| + closure.Run(); |
| +} |
| + |
| +class AudioTrackRecorderTest : public testing::Test { |
| + public: |
| + AudioTrackRecorderTest() |
| + : params1_(kInputFormat, |
| + media::CHANNEL_LAYOUT_MONO, |
| + kSamplingRate, |
| + kBitsPerSample, |
| + kFramesPerBuffer), |
| + params2_(kInputFormat, |
| + media::CHANNEL_LAYOUT_STEREO, |
| + kSamplingRate, |
| + kBitsPerSample, |
| + kFramesPerBuffer), |
| + mono_source_(1 /* # channels */, 440, kSamplingRate), |
| + stereo_source_(2 /* # channels */, 440, kSamplingRate) { |
| + PrepareBlinkTrack(); |
| + audio_track_recorder_.reset(new AudioTrackRecorder( |
| + blink_track_, base::Bind(&AudioTrackRecorderTest::OnEncodedAudio, |
| + base::Unretained(this)))); |
| + } |
| + |
| + ~AudioTrackRecorderTest() { |
| + audio_track_recorder_.reset(); |
| + blink_track_.reset(); |
| + blink::WebHeap::collectAllGarbageForTesting(); |
| + } |
| + |
| + scoped_ptr<media::AudioBus> NextAudioBus( |
| + int num_channels, const base::TimeDelta& duration) { |
| + // Only supports up to two channels for now. |
| + EXPECT_GE(num_channels, 1); |
| + EXPECT_LE(num_channels, 2); |
|
mcasas
2015/10/28 20:03:00
why not
EXPECT(num_channels == 1 || num_channels =
ajose
2015/10/28 21:54:06
Done.
|
| + const int num_samples = static_cast<int>((kSamplingRate * duration) / |
| + base::TimeDelta::FromSeconds(1)); |
| + scoped_ptr<media::AudioBus> bus( |
| + media::AudioBus::Create(num_channels, num_samples)); |
| + (num_channels == 1) ? mono_source_.OnMoreData(bus.get(), 0) |
| + : stereo_source_.OnMoreData(bus.get(), 0); |
|
mcasas
2015/10/28 20:03:00
I understand the code but I think it's the first t
ajose
2015/10/28 21:54:06
Done.
|
| + return bus.Pass(); |
| + } |
| + |
| + MOCK_METHOD3(DoOnEncodedAudio, |
| + void(const media::AudioParameters& params, |
| + std::string encoded_data, |
| + base::TimeTicks timestamp)); |
| + |
| + void OnEncodedAudio(const media::AudioParameters& params, |
| + scoped_ptr<std::string> encoded_data, |
| + base::TimeTicks timestamp) { |
| + EXPECT_TRUE(!encoded_data->empty()); |
| + DoOnEncodedAudio(params, *encoded_data, timestamp); |
| + } |
| + |
| + const base::MessageLoop message_loop_; |
| + |
| + // ATR and WebMediaStreamTrack for fooling it. |
| + scoped_ptr<AudioTrackRecorder> audio_track_recorder_; |
| + blink::WebMediaStreamTrack blink_track_; |
| + |
| + // Two different sets of AudioParameters for testing re-init of ATR. |
| + media::AudioParameters params1_; |
| + media::AudioParameters params2_; |
| + |
| + // AudioSources for creating AudioBuses. |
| + media::SineWaveAudioSource mono_source_; |
| + media::SineWaveAudioSource stereo_source_; |
| + |
| + private: |
| + // Prepares a blink track of a given MediaStreamType and attaches the native |
| + // track, which can be used to capture audio data and pass it to the producer. |
| + // Adapted from media::WebRTCLocalAudioSourceProviderTest. |
| + void PrepareBlinkTrack() { |
| + MockMediaConstraintFactory constraint_factory; |
| + scoped_refptr<WebRtcAudioCapturer> capturer( |
| + WebRtcAudioCapturer::CreateCapturer( |
| + -1, StreamDeviceInfo(), |
| + constraint_factory.CreateWebMediaConstraints(), NULL, NULL)); |
| + scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter( |
| + WebRtcLocalAudioTrackAdapter::Create(std::string(), NULL)); |
| + scoped_ptr<WebRtcLocalAudioTrack> native_track( |
| + new WebRtcLocalAudioTrack(adapter.get(), capturer, NULL)); |
| + blink::WebMediaStreamSource audio_source; |
| + audio_source.initialize(base::UTF8ToUTF16("dummy_source_id"), |
| + blink::WebMediaStreamSource::TypeAudio, |
| + base::UTF8ToUTF16("dummy_source_name"), |
| + false /* remote */, true /* readonly */); |
| + blink_track_.initialize(blink::WebString::fromUTF8("audio_track"), |
| + audio_source); |
| + blink_track_.setExtraData(native_track.release()); |
| + } |
| + |
| + DISALLOW_COPY_AND_ASSIGN(AudioTrackRecorderTest); |
| +}; |
| + |
| +TEST_F(AudioTrackRecorderTest, OnData) { |
| + |
| + InSequence s; |
| + base::RunLoop run_loop; |
| + base::Closure quit_closure = run_loop.QuitClosure(); |
| + |
| + // Give ATR initial audio parameters. |
| + audio_track_recorder_->OnSetFormat(params1_); |
| + // TODO(ajose): consider adding WillOnce(SaveArg...) and inspecting, as done |
| + // in VTR unittests. |
| + // TODO(ajose): Using 10ms chunks due to hard-coded 100fps framerate. |
| + // Need to figure out what to do about framerate. |
|
mcasas
2015/10/28 20:03:00
TODO(s) need to have an associated crbug.com.
Or
ajose
2015/10/28 21:54:06
SGTM. Will add a bug for the framerate stuff if it
|
| + const base::TimeTicks time1 = base::TimeTicks::Now(); |
| + EXPECT_CALL(*this, DoOnEncodedAudio(_, _, time1)).Times(1); |
| + audio_track_recorder_->OnData( |
| + *NextAudioBus(params1_.channels(), |
| + base::TimeDelta::FromMilliseconds(10)), time1); |
| + |
| + // Send more audio. |
| + const base::TimeTicks time2 = base::TimeTicks::Now(); |
| + EXPECT_CALL(*this, DoOnEncodedAudio(_, _, _)).Times(1); |
| + audio_track_recorder_->OnData( |
| + *NextAudioBus(params1_.channels(), |
| + base::TimeDelta::FromMilliseconds(10)), time2); |
| + |
| + // Give ATR new audio parameters. |
| + audio_track_recorder_->OnSetFormat(params2_); |
| + // Send audio with different params. |
| + const base::TimeTicks time3 = base::TimeTicks::Now(); |
| + EXPECT_CALL(*this, DoOnEncodedAudio(_, _, _)) |
| + .Times(1) |
| + .WillOnce(RunClosure(quit_closure)); |
| + audio_track_recorder_->OnData( |
| + *NextAudioBus(params2_.channels(), |
| + base::TimeDelta::FromMilliseconds(10)), time3); |
| + |
| + run_loop.Run(); |
| + Mock::VerifyAndClearExpectations(this); |
| +} |
| + |
| +} // namespace content |