| Index: media/cast/test/end2end_unittest.cc
|
| diff --git a/media/cast/test/end2end_unittest.cc b/media/cast/test/end2end_unittest.cc
|
| index 6949fa88aedf9f85ef67a87029905def2b753b98..b2037abe60c16f0c5da9d593aae7690d94256bf1 100644
|
| --- a/media/cast/test/end2end_unittest.cc
|
| +++ b/media/cast/test/end2end_unittest.cc
|
| @@ -17,9 +17,12 @@
|
|
|
| #include "base/bind.h"
|
| #include "base/bind_helpers.h"
|
| +#include "base/stl_util.h"
|
| #include "base/strings/string_number_conversions.h"
|
| +#include "base/sys_byteorder.h"
|
| #include "base/test/simple_test_tick_clock.h"
|
| #include "base/time/tick_clock.h"
|
| +#include "media/base/audio_bus.h"
|
| #include "media/base/video_frame.h"
|
| #include "media/cast/cast_config.h"
|
| #include "media/cast/cast_environment.h"
|
| @@ -73,6 +76,10 @@ static const int kTimerErrorMs = 20;
|
| // effects cause by encoding and quantization.
|
| static const int kVideoStart = 100;
|
|
|
| +// The size of audio frames. The encoder joins/breaks all inserted audio into
|
| +// chunks of this size.
|
| +static const int kAudioFrameDurationMs = 10;
|
| +
|
| std::string ConvertFromBase16String(const std::string base_16) {
|
| std::string compressed;
|
| DCHECK_EQ(base_16.size() % 2, 0u) << "Must be a multiple of 2";
|
| @@ -209,8 +216,7 @@ class TestReceiverAudioCallback
|
| : public base::RefCountedThreadSafe<TestReceiverAudioCallback> {
|
| public:
|
| struct ExpectedAudioFrame {
|
| - PcmAudioFrame audio_frame;
|
| - int num_10ms_blocks;
|
| + scoped_ptr<AudioBus> audio_bus;
|
| base::TimeTicks record_time;
|
| };
|
|
|
| @@ -220,111 +226,98 @@ class TestReceiverAudioCallback
|
| expected_sampling_frequency_ = expected_sampling_frequency;
|
| }
|
|
|
| - void AddExpectedResult(scoped_ptr<PcmAudioFrame> audio_frame,
|
| - int expected_num_10ms_blocks,
|
| + void AddExpectedResult(const AudioBus& audio_bus,
|
| const base::TimeTicks& record_time) {
|
| - ExpectedAudioFrame expected_audio_frame;
|
| - expected_audio_frame.audio_frame = *audio_frame;
|
| - expected_audio_frame.num_10ms_blocks = expected_num_10ms_blocks;
|
| - expected_audio_frame.record_time = record_time;
|
| - expected_frame_.push_back(expected_audio_frame);
|
| + scoped_ptr<ExpectedAudioFrame> expected_audio_frame(
|
| + new ExpectedAudioFrame());
|
| + expected_audio_frame->audio_bus =
|
| + AudioBus::Create(audio_bus.channels(), audio_bus.frames()).Pass();
|
| + audio_bus.CopyTo(expected_audio_frame->audio_bus.get());
|
| + expected_audio_frame->record_time = record_time;
|
| + expected_frames_.push_back(expected_audio_frame.release());
|
| }
|
|
|
| - void IgnoreAudioFrame(scoped_ptr<PcmAudioFrame> audio_frame,
|
| - const base::TimeTicks& playout_time) {}
|
| + void IgnoreAudioFrame(scoped_ptr<AudioBus> audio_bus,
|
| + const base::TimeTicks& playout_time,
|
| + bool is_continuous) {
|
| + ++num_called_;
|
| + }
|
|
|
| - // Check the audio frame parameters but not the audio samples.
|
| - void CheckBasicAudioFrame(const scoped_ptr<PcmAudioFrame>& audio_frame,
|
| - const base::TimeTicks& playout_time) {
|
| - EXPECT_FALSE(expected_frame_.empty()); // Test for bug in test code.
|
| - ExpectedAudioFrame expected_audio_frame = expected_frame_.front();
|
| - EXPECT_EQ(audio_frame->channels, kAudioChannels);
|
| - EXPECT_EQ(audio_frame->frequency, expected_sampling_frequency_);
|
| - EXPECT_EQ(static_cast<int>(audio_frame->samples.size()),
|
| - expected_audio_frame.num_10ms_blocks * kAudioChannels *
|
| - expected_sampling_frequency_ / 100);
|
| + void CheckAudioFrame(scoped_ptr<AudioBus> audio_bus,
|
| + const base::TimeTicks& playout_time,
|
| + bool is_continuous) {
|
| + ++num_called_;
|
| +
|
| + ASSERT_FALSE(expected_frames_.empty());
|
| + const scoped_ptr<ExpectedAudioFrame> expected_audio_frame(
|
| + expected_frames_.front());
|
| + expected_frames_.pop_front();
|
| +
|
| + EXPECT_EQ(audio_bus->channels(), kAudioChannels);
|
| + EXPECT_EQ(audio_bus->frames(), expected_audio_frame->audio_bus->frames());
|
| + for (int ch = 0; ch < audio_bus->channels(); ++ch) {
|
| + EXPECT_NEAR(CountZeroCrossings(
|
| + expected_audio_frame->audio_bus->channel(ch),
|
| + expected_audio_frame->audio_bus->frames()),
|
| + CountZeroCrossings(audio_bus->channel(ch),
|
| + audio_bus->frames()),
|
| + 1);
|
| + }
|
|
|
| + // TODO(miu): This is a "fuzzy" way to check the timestamps. We should be
|
| + // able to compute exact offsets with "omnipotent" knowledge of the system.
|
| const base::TimeTicks upper_bound =
|
| - expected_audio_frame.record_time +
|
| + expected_audio_frame->record_time +
|
| base::TimeDelta::FromMilliseconds(kDefaultRtpMaxDelayMs +
|
| kTimerErrorMs);
|
| EXPECT_GE(upper_bound, playout_time)
|
| << "playout_time - upper_bound == "
|
| << (playout_time - upper_bound).InMicroseconds() << " usec";
|
| - EXPECT_LT(expected_audio_frame.record_time, playout_time)
|
| - << "playout_time - expected == "
|
| - << (playout_time - expected_audio_frame.record_time).InMilliseconds()
|
| - << " mS";
|
| -
|
| - EXPECT_EQ(audio_frame->samples.size(),
|
| - expected_audio_frame.audio_frame.samples.size());
|
| - }
|
| -
|
| - void CheckPcmAudioFrame(scoped_ptr<PcmAudioFrame> audio_frame,
|
| - const base::TimeTicks& playout_time) {
|
| - ++num_called_;
|
| -
|
| - CheckBasicAudioFrame(audio_frame, playout_time);
|
| - ExpectedAudioFrame expected_audio_frame = expected_frame_.front();
|
| - expected_frame_.pop_front();
|
| - if (audio_frame->samples.size() == 0)
|
| - return; // No more checks needed.
|
|
|
| - EXPECT_NEAR(CountZeroCrossings(expected_audio_frame.audio_frame.samples),
|
| - CountZeroCrossings(audio_frame->samples),
|
| - 1);
|
| + EXPECT_TRUE(is_continuous);
|
| }
|
|
|
| - void CheckCodedPcmAudioFrame(
|
| + void CheckCodedAudioFrame(
|
| scoped_ptr<transport::EncodedAudioFrame> audio_frame,
|
| const base::TimeTicks& playout_time) {
|
| - ++num_called_;
|
| -
|
| - EXPECT_FALSE(expected_frame_.empty()); // Test for bug in test code.
|
| - ExpectedAudioFrame expected_audio_frame = expected_frame_.front();
|
| - expected_frame_.pop_front();
|
| -
|
| - EXPECT_EQ(static_cast<int>(audio_frame->data.size()),
|
| - 2 * kAudioChannels * expected_sampling_frequency_ / 100);
|
| -
|
| - base::TimeDelta time_since_recording =
|
| - playout_time - expected_audio_frame.record_time;
|
| -
|
| - EXPECT_LE(time_since_recording,
|
| - base::TimeDelta::FromMilliseconds(kDefaultRtpMaxDelayMs +
|
| - kTimerErrorMs));
|
| -
|
| - EXPECT_LT(expected_audio_frame.record_time, playout_time);
|
| - if (audio_frame->data.size() == 0)
|
| - return; // No more checks needed.
|
| -
|
| - // We need to convert our "coded" audio frame to our raw format.
|
| - std::vector<int16> output_audio_samples;
|
| - size_t number_of_samples = audio_frame->data.size() / 2;
|
| -
|
| - for (size_t i = 0; i < number_of_samples; ++i) {
|
| - uint16 sample =
|
| - static_cast<uint8>(audio_frame->data[1 + i * sizeof(uint16)]) +
|
| - (static_cast<uint16>(audio_frame->data[i * sizeof(uint16)]) << 8);
|
| - output_audio_samples.push_back(static_cast<int16>(sample));
|
| - }
|
| -
|
| - EXPECT_NEAR(CountZeroCrossings(expected_audio_frame.audio_frame.samples),
|
| - CountZeroCrossings(output_audio_samples),
|
| - 1);
|
| + ASSERT_FALSE(expected_frames_.empty());
|
| + const ExpectedAudioFrame& expected_audio_frame =
|
| + *(expected_frames_.front());
|
| + // Note: Just peeking here. Will delegate to CheckAudioFrame() to pop.
|
| +
|
| + // We need to "decode" the encoded audio frame. The codec is simply to
|
| + // swizzle the bytes of each int16 from host-->network-->host order to get
|
| + // interleaved int16 PCM. Then, make an AudioBus out of that.
|
| + const int num_elements = audio_frame->data.size() / sizeof(int16);
|
| + ASSERT_EQ(expected_audio_frame.audio_bus->channels() *
|
| + expected_audio_frame.audio_bus->frames(),
|
| + num_elements);
|
| + int16* const pcm_data =
|
| + reinterpret_cast<int16*>(string_as_array(&audio_frame->data));
|
| + for (int i = 0; i < num_elements; ++i)
|
| + pcm_data[i] = static_cast<int16>(base::NetToHost16(pcm_data[i]));
|
| + scoped_ptr<AudioBus> audio_bus(
|
| + AudioBus::Create(expected_audio_frame.audio_bus->channels(),
|
| + expected_audio_frame.audio_bus->frames()));
|
| + audio_bus->FromInterleaved(pcm_data, audio_bus->frames(), sizeof(int16));
|
| +
|
| + // Delegate the checking from here...
|
| + CheckAudioFrame(audio_bus.Pass(), playout_time, true);
|
| }
|
|
|
| int number_times_called() const { return num_called_; }
|
|
|
| protected:
|
| - virtual ~TestReceiverAudioCallback() {}
|
| + virtual ~TestReceiverAudioCallback() {
|
| + STLDeleteElements(&expected_frames_);
|
| + }
|
|
|
| private:
|
| friend class base::RefCountedThreadSafe<TestReceiverAudioCallback>;
|
|
|
| int num_called_;
|
| int expected_sampling_frequency_;
|
| - std::list<ExpectedAudioFrame> expected_frame_;
|
| + std::list<ExpectedAudioFrame*> expected_frames_;
|
| };
|
|
|
| // Class that verifies the video frames coming out of the receiver.
|
| @@ -365,9 +358,11 @@ class TestReceiverVideoCallback
|
| const base::TimeDelta upper_bound = base::TimeDelta::FromMilliseconds(
|
| kDefaultRtpMaxDelayMs + kTimerErrorMs);
|
|
|
| + // TODO(miu): This is a "fuzzy" way to check the timestamps. We should be
|
| + // able to compute exact offsets with "omnipotent" knowledge of the system.
|
| EXPECT_GE(upper_bound, time_since_capture)
|
| << "time_since_capture - upper_bound == "
|
| - << (time_since_capture - upper_bound).InMilliseconds() << " mS";
|
| + << (time_since_capture - upper_bound).InMicroseconds() << " usec";
|
| EXPECT_LE(expected_video_frame.capture_time, render_time);
|
| EXPECT_EQ(expected_video_frame.width, video_frame->visible_rect().width());
|
| EXPECT_EQ(expected_video_frame.height,
|
| @@ -426,11 +421,10 @@ class End2EndTest : public ::testing::Test {
|
| &event_subscriber_sender_);
|
| }
|
|
|
| - void SetupConfig(transport::AudioCodec audio_codec,
|
| - int audio_sampling_frequency,
|
| - // TODO(miu): 3rd arg is meaningless?!?
|
| - bool external_audio_decoder,
|
| - int max_number_of_video_buffers_used) {
|
| + void Configure(transport::AudioCodec audio_codec,
|
| + int audio_sampling_frequency,
|
| + bool external_audio_decoder,
|
| + int max_number_of_video_buffers_used) {
|
| audio_sender_config_.sender_ssrc = 1;
|
| audio_sender_config_.incoming_feedback_ssrc = 2;
|
| audio_sender_config_.rtp_config.payload_type = 96;
|
| @@ -488,6 +482,42 @@ class End2EndTest : public ::testing::Test {
|
| transport_video_config_.base.rtp_config = video_sender_config_.rtp_config;
|
| }
|
|
|
| + void FeedAudioFrames(int count, bool will_be_checked) {
|
| + for (int i = 0; i < count; ++i) {
|
| + scoped_ptr<AudioBus> audio_bus(audio_bus_factory_->NextAudioBus(
|
| + base::TimeDelta::FromMilliseconds(kAudioFrameDurationMs)));
|
| + const base::TimeTicks send_time =
|
| + testing_clock_sender_->NowTicks() +
|
| + i * base::TimeDelta::FromMilliseconds(kAudioFrameDurationMs);
|
| + if (will_be_checked)
|
| + test_receiver_audio_callback_->AddExpectedResult(*audio_bus, send_time);
|
| + audio_frame_input_->InsertAudio(audio_bus.Pass(), send_time);
|
| + }
|
| + }
|
| +
|
| + void FeedAudioFramesWithExpectedDelay(int count,
|
| + const base::TimeDelta& delay) {
|
| + for (int i = 0; i < count; ++i) {
|
| + scoped_ptr<AudioBus> audio_bus(audio_bus_factory_->NextAudioBus(
|
| + base::TimeDelta::FromMilliseconds(kAudioFrameDurationMs)));
|
| + const base::TimeTicks send_time =
|
| + testing_clock_sender_->NowTicks() +
|
| + i * base::TimeDelta::FromMilliseconds(kAudioFrameDurationMs);
|
| + test_receiver_audio_callback_->AddExpectedResult(*audio_bus,
|
| + send_time + delay);
|
| + audio_frame_input_->InsertAudio(audio_bus.Pass(), send_time);
|
| + }
|
| + }
|
| +
|
| + void RequestAudioFrames(int count, bool with_check) {
|
| + for (int i = 0; i < count; ++i) {
|
| + frame_receiver_->GetRawAudioFrame(
|
| + base::Bind(with_check ? &TestReceiverAudioCallback::CheckAudioFrame :
|
| + &TestReceiverAudioCallback::IgnoreAudioFrame,
|
| + test_receiver_audio_callback_));
|
| + }
|
| + }
|
| +
|
| void Create() {
|
| cast_receiver_ = CastReceiver::Create(cast_environment_receiver_,
|
| audio_receiver_config_,
|
| @@ -620,146 +650,103 @@ class End2EndTest : public ::testing::Test {
|
| };
|
|
|
| TEST_F(End2EndTest, LoopNoLossPcm16) {
|
| - SetupConfig(transport::kPcm16, 32000, false, 1);
|
| + Configure(transport::kPcm16, 32000, false, 1);
|
| // Reduce video resolution to allow processing multiple frames within a
|
| // reasonable time frame.
|
| video_sender_config_.width = kVideoQcifWidth;
|
| video_sender_config_.height = kVideoQcifHeight;
|
| Create();
|
|
|
| + const int kNumIterations = 50;
|
| int video_start = kVideoStart;
|
| int audio_diff = kFrameTimerMs;
|
| - int i = 0;
|
| -
|
| - for (; i < 300; ++i) {
|
| - int num_10ms_blocks = audio_diff / 10;
|
| - audio_diff -= num_10ms_blocks * 10;
|
| -
|
| - scoped_ptr<AudioBus> audio_bus(audio_bus_factory_->NextAudioBus(
|
| - base::TimeDelta::FromMilliseconds(10) * num_10ms_blocks));
|
| + int num_audio_frames_requested = 0;
|
| + for (int i = 0; i < kNumIterations; ++i) {
|
| + const int num_audio_frames = audio_diff / kAudioFrameDurationMs;
|
| + audio_diff -= num_audio_frames * kAudioFrameDurationMs;
|
|
|
| - base::TimeTicks send_time = testing_clock_sender_->NowTicks();
|
| - if (i != 0) {
|
| - // Due to the re-sampler and NetEq in the webrtc AudioCodingModule the
|
| - // first samples will be 0 and then slowly ramp up to its real
|
| - // amplitude;
|
| - // ignore the first frame.
|
| - test_receiver_audio_callback_->AddExpectedResult(
|
| - ToPcmAudioFrame(*audio_bus, audio_sender_config_.frequency),
|
| - num_10ms_blocks,
|
| - send_time);
|
| - }
|
| -
|
| - audio_frame_input_->InsertAudio(audio_bus.Pass(), send_time);
|
| + if (num_audio_frames > 0)
|
| + FeedAudioFrames(1, true);
|
|
|
| test_receiver_video_callback_->AddExpectedResult(
|
| video_start,
|
| video_sender_config_.width,
|
| video_sender_config_.height,
|
| - send_time);
|
| - SendVideoFrame(video_start, send_time);
|
| + testing_clock_sender_->NowTicks());
|
| + SendVideoFrame(video_start, testing_clock_sender_->NowTicks());
|
|
|
| - if (i == 0) {
|
| - frame_receiver_->GetRawAudioFrame(
|
| - num_10ms_blocks,
|
| - audio_sender_config_.frequency,
|
| - base::Bind(&TestReceiverAudioCallback::IgnoreAudioFrame,
|
| - test_receiver_audio_callback_));
|
| - } else {
|
| - frame_receiver_->GetRawAudioFrame(
|
| - num_10ms_blocks,
|
| - audio_sender_config_.frequency,
|
| - base::Bind(&TestReceiverAudioCallback::CheckPcmAudioFrame,
|
| - test_receiver_audio_callback_));
|
| - }
|
| + if (num_audio_frames > 0)
|
| + RunTasks(kAudioFrameDurationMs); // Advance clock forward.
|
| + if (num_audio_frames > 1)
|
| + FeedAudioFrames(num_audio_frames - 1, true);
|
| +
|
| + RequestAudioFrames(num_audio_frames, true);
|
| + num_audio_frames_requested += num_audio_frames;
|
|
|
| frame_receiver_->GetRawVideoFrame(
|
| base::Bind(&TestReceiverVideoCallback::CheckVideoFrame,
|
| test_receiver_video_callback_));
|
|
|
| - RunTasks(kFrameTimerMs);
|
| + RunTasks(kFrameTimerMs - kAudioFrameDurationMs);
|
| audio_diff += kFrameTimerMs;
|
| video_start++;
|
| }
|
|
|
| RunTasks(2 * kFrameTimerMs + 1); // Empty the receiver pipeline.
|
| - EXPECT_EQ(i - 1, test_receiver_audio_callback_->number_times_called());
|
| - EXPECT_EQ(i, test_receiver_video_callback_->number_times_called());
|
| + EXPECT_EQ(num_audio_frames_requested,
|
| + test_receiver_audio_callback_->number_times_called());
|
| + EXPECT_EQ(kNumIterations,
|
| + test_receiver_video_callback_->number_times_called());
|
| }
|
|
|
| // This tests our external decoder interface for Audio.
|
| // Audio test without packet loss using raw PCM 16 audio "codec";
|
| TEST_F(End2EndTest, LoopNoLossPcm16ExternalDecoder) {
|
| - SetupConfig(transport::kPcm16, 32000, true, 1);
|
| + Configure(transport::kPcm16, 32000, true, 1);
|
| Create();
|
|
|
| - int i = 0;
|
| - for (; i < 10; ++i) {
|
| - base::TimeTicks send_time = testing_clock_sender_->NowTicks();
|
| - scoped_ptr<AudioBus> audio_bus(audio_bus_factory_->NextAudioBus(
|
| - base::TimeDelta::FromMilliseconds(10)));
|
| - test_receiver_audio_callback_->AddExpectedResult(
|
| - ToPcmAudioFrame(*audio_bus, audio_sender_config_.frequency),
|
| - 1,
|
| - send_time);
|
| -
|
| - audio_frame_input_->InsertAudio(audio_bus.Pass(), send_time);
|
| -
|
| - RunTasks(10);
|
| + const int kNumIterations = 10;
|
| + for (int i = 0; i < kNumIterations; ++i) {
|
| + FeedAudioFrames(1, true);
|
| + RunTasks(kAudioFrameDurationMs);
|
| frame_receiver_->GetCodedAudioFrame(
|
| - base::Bind(&TestReceiverAudioCallback::CheckCodedPcmAudioFrame,
|
| + base::Bind(&TestReceiverAudioCallback::CheckCodedAudioFrame,
|
| test_receiver_audio_callback_));
|
| }
|
| RunTasks(2 * kFrameTimerMs + 1); // Empty the receiver pipeline.
|
| - EXPECT_EQ(10, test_receiver_audio_callback_->number_times_called());
|
| + EXPECT_EQ(kNumIterations,
|
| + test_receiver_audio_callback_->number_times_called());
|
| }
|
|
|
| // This tests our Opus audio codec without video.
|
| TEST_F(End2EndTest, LoopNoLossOpus) {
|
| - SetupConfig(transport::kOpus, kDefaultAudioSamplingRate, false, 1);
|
| + Configure(transport::kOpus, kDefaultAudioSamplingRate, false, 1);
|
| Create();
|
|
|
| - int i = 0;
|
| - for (; i < 10; ++i) {
|
| - int num_10ms_blocks = 3;
|
| - base::TimeTicks send_time = testing_clock_sender_->NowTicks();
|
| -
|
| - scoped_ptr<AudioBus> audio_bus(audio_bus_factory_->NextAudioBus(
|
| - base::TimeDelta::FromMilliseconds(10) * num_10ms_blocks));
|
| -
|
| - if (i != 0) {
|
| - test_receiver_audio_callback_->AddExpectedResult(
|
| - ToPcmAudioFrame(*audio_bus, audio_sender_config_.frequency),
|
| - num_10ms_blocks,
|
| - send_time);
|
| - }
|
| -
|
| - audio_frame_input_->InsertAudio(audio_bus.Pass(), send_time);
|
| -
|
| - RunTasks(30);
|
| -
|
| - if (i == 0) {
|
| - frame_receiver_->GetRawAudioFrame(
|
| - num_10ms_blocks,
|
| - audio_sender_config_.frequency,
|
| - base::Bind(&TestReceiverAudioCallback::IgnoreAudioFrame,
|
| - test_receiver_audio_callback_));
|
| - } else {
|
| - frame_receiver_->GetRawAudioFrame(
|
| - num_10ms_blocks,
|
| - audio_sender_config_.frequency,
|
| - base::Bind(&TestReceiverAudioCallback::CheckPcmAudioFrame,
|
| - test_receiver_audio_callback_));
|
| - }
|
| + const int kNumIterations = 300;
|
| + for (int i = 0; i < kNumIterations; ++i) {
|
| + // Opus introduces a tiny delay before the sinewave starts; so don't examine
|
| + // the first frame.
|
| + const bool examine_audio_data = i > 0;
|
| + FeedAudioFrames(1, examine_audio_data);
|
| + RunTasks(kAudioFrameDurationMs);
|
| + RequestAudioFrames(1, examine_audio_data);
|
| }
|
| RunTasks(2 * kFrameTimerMs + 1); // Empty the receiver pipeline.
|
| - EXPECT_EQ(i - 1, test_receiver_audio_callback_->number_times_called());
|
| + EXPECT_EQ(kNumIterations,
|
| + test_receiver_audio_callback_->number_times_called());
|
| }
|
|
|
| // This tests start sending audio and video at start-up time before the receiver
|
| // is ready; it sends 2 frames before the receiver comes online.
|
| -TEST_F(End2EndTest, StartSenderBeforeReceiver) {
|
| - SetupConfig(transport::kOpus, kDefaultAudioSamplingRate, false, 1);
|
| +//
|
| +// Test disabled due to flakiness: It appears that the RTCP synchronization
|
| +// sometimes kicks in, and sometimes doesn't. When it does, there's a sharp
|
| +// discontinuity in the timeline, throwing off the test expectations. See TODOs
|
| +// in audio_receiver.cc for likely cause(s) of this bug.
|
| +// http://crbug.com/356942
|
| +TEST_F(End2EndTest, DISABLED_StartSenderBeforeReceiver) {
|
| + Configure(transport::kPcm16, kDefaultAudioSamplingRate, false, 1);
|
| Create();
|
|
|
| int video_start = kVideoStart;
|
| @@ -769,18 +756,16 @@ TEST_F(End2EndTest, StartSenderBeforeReceiver) {
|
|
|
| const int test_delay_ms = 100;
|
|
|
| - base::TimeTicks initial_send_time;
|
| - for (int i = 0; i < 2; ++i) {
|
| - int num_10ms_blocks = audio_diff / 10;
|
| - audio_diff -= num_10ms_blocks * 10;
|
| -
|
| - base::TimeTicks send_time = testing_clock_sender_->NowTicks();
|
| - if (initial_send_time.is_null())
|
| - initial_send_time = send_time;
|
| - scoped_ptr<AudioBus> audio_bus(audio_bus_factory_->NextAudioBus(
|
| - base::TimeDelta::FromMilliseconds(10) * num_10ms_blocks));
|
| + const int kNumVideoFramesBeforeReceiverStarted = 2;
|
| + const base::TimeTicks initial_send_time = testing_clock_sender_->NowTicks();
|
| + const base::TimeDelta expected_delay =
|
| + base::TimeDelta::FromMilliseconds(test_delay_ms + kFrameTimerMs);
|
| + for (int i = 0; i < kNumVideoFramesBeforeReceiverStarted; ++i) {
|
| + const int num_audio_frames = audio_diff / kAudioFrameDurationMs;
|
| + audio_diff -= num_audio_frames * kAudioFrameDurationMs;
|
|
|
| - audio_frame_input_->InsertAudio(audio_bus.Pass(), send_time);
|
| + if (num_audio_frames > 0)
|
| + FeedAudioFramesWithExpectedDelay(1, expected_delay);
|
|
|
| // Frame will be rendered with 100mS delay, as the transmission is delayed.
|
| // The receiver at this point cannot be synced to the sender's clock, as no
|
| @@ -789,11 +774,15 @@ TEST_F(End2EndTest, StartSenderBeforeReceiver) {
|
| video_start,
|
| video_sender_config_.width,
|
| video_sender_config_.height,
|
| - initial_send_time +
|
| - base::TimeDelta::FromMilliseconds(test_delay_ms + kFrameTimerMs));
|
| + initial_send_time + expected_delay);
|
| + SendVideoFrame(video_start, testing_clock_sender_->NowTicks());
|
|
|
| - SendVideoFrame(video_start, send_time);
|
| - RunTasks(kFrameTimerMs);
|
| + if (num_audio_frames > 0)
|
| + RunTasks(kAudioFrameDurationMs); // Advance clock forward.
|
| + if (num_audio_frames > 1)
|
| + FeedAudioFramesWithExpectedDelay(num_audio_frames - 1, expected_delay);
|
| +
|
| + RunTasks(kFrameTimerMs - kAudioFrameDurationMs);
|
| audio_diff += kFrameTimerMs;
|
| video_start++;
|
| }
|
| @@ -801,63 +790,47 @@ TEST_F(End2EndTest, StartSenderBeforeReceiver) {
|
| RunTasks(test_delay_ms);
|
| sender_to_receiver_.SetSendPackets(true);
|
|
|
| - int j = 0;
|
| - const int number_of_audio_frames_to_ignore = 2;
|
| - for (; j < 10; ++j) {
|
| - int num_10ms_blocks = audio_diff / 10;
|
| - audio_diff -= num_10ms_blocks * 10;
|
| - base::TimeTicks send_time = testing_clock_sender_->NowTicks();
|
| -
|
| - scoped_ptr<AudioBus> audio_bus(audio_bus_factory_->NextAudioBus(
|
| - base::TimeDelta::FromMilliseconds(10) * num_10ms_blocks));
|
| -
|
| - if (j >= number_of_audio_frames_to_ignore) {
|
| - test_receiver_audio_callback_->AddExpectedResult(
|
| - ToPcmAudioFrame(*audio_bus, audio_sender_config_.frequency),
|
| - num_10ms_blocks,
|
| - send_time);
|
| - }
|
| + int num_audio_frames_requested = 0;
|
| + for (int j = 0; j < 10; ++j) {
|
| + const int num_audio_frames = audio_diff / kAudioFrameDurationMs;
|
| + audio_diff -= num_audio_frames * kAudioFrameDurationMs;
|
|
|
| - audio_frame_input_->InsertAudio(audio_bus.Pass(), send_time);
|
| + if (num_audio_frames > 0)
|
| + FeedAudioFrames(1, true);
|
|
|
| test_receiver_video_callback_->AddExpectedResult(
|
| video_start,
|
| video_sender_config_.width,
|
| video_sender_config_.height,
|
| - send_time);
|
| + testing_clock_sender_->NowTicks());
|
| + SendVideoFrame(video_start, testing_clock_sender_->NowTicks());
|
|
|
| - SendVideoFrame(video_start, send_time);
|
| - RunTasks(kFrameTimerMs);
|
| - audio_diff += kFrameTimerMs;
|
| + if (num_audio_frames > 0)
|
| + RunTasks(kAudioFrameDurationMs); // Advance clock forward.
|
| + if (num_audio_frames > 1)
|
| + FeedAudioFrames(num_audio_frames - 1, true);
|
| +
|
| + RequestAudioFrames(num_audio_frames, true);
|
| + num_audio_frames_requested += num_audio_frames;
|
|
|
| - if (j < number_of_audio_frames_to_ignore) {
|
| - frame_receiver_->GetRawAudioFrame(
|
| - num_10ms_blocks,
|
| - audio_sender_config_.frequency,
|
| - base::Bind(&TestReceiverAudioCallback::IgnoreAudioFrame,
|
| - test_receiver_audio_callback_));
|
| - } else {
|
| - frame_receiver_->GetRawAudioFrame(
|
| - num_10ms_blocks,
|
| - audio_sender_config_.frequency,
|
| - base::Bind(&TestReceiverAudioCallback::CheckPcmAudioFrame,
|
| - test_receiver_audio_callback_));
|
| - }
|
| frame_receiver_->GetRawVideoFrame(
|
| base::Bind(&TestReceiverVideoCallback::CheckVideoFrame,
|
| test_receiver_video_callback_));
|
| +
|
| + RunTasks(kFrameTimerMs - kAudioFrameDurationMs);
|
| + audio_diff += kFrameTimerMs;
|
| video_start++;
|
| }
|
| RunTasks(2 * kFrameTimerMs + 1); // Empty the receiver pipeline.
|
| - EXPECT_EQ(j - number_of_audio_frames_to_ignore,
|
| + EXPECT_EQ(num_audio_frames_requested,
|
| test_receiver_audio_callback_->number_times_called());
|
| - EXPECT_EQ(j, test_receiver_video_callback_->number_times_called());
|
| + EXPECT_EQ(10, test_receiver_video_callback_->number_times_called());
|
| }
|
|
|
| // This tests a network glitch lasting for 10 video frames.
|
| // Flaky. See crbug.com/351596.
|
| TEST_F(End2EndTest, DISABLED_GlitchWith3Buffers) {
|
| - SetupConfig(transport::kOpus, kDefaultAudioSamplingRate, false, 3);
|
| + Configure(transport::kOpus, kDefaultAudioSamplingRate, false, 3);
|
| video_sender_config_.rtp_config.max_delay_ms = 67;
|
| video_receiver_config_.rtp_max_delay_ms = 67;
|
| Create();
|
| @@ -916,7 +889,7 @@ TEST_F(End2EndTest, DISABLED_GlitchWith3Buffers) {
|
| }
|
|
|
| TEST_F(End2EndTest, DropEveryOtherFrame3Buffers) {
|
| - SetupConfig(transport::kOpus, kDefaultAudioSamplingRate, false, 3);
|
| + Configure(transport::kOpus, kDefaultAudioSamplingRate, false, 3);
|
| video_sender_config_.rtp_config.max_delay_ms = 67;
|
| video_receiver_config_.rtp_max_delay_ms = 67;
|
| Create();
|
| @@ -952,7 +925,7 @@ TEST_F(End2EndTest, DropEveryOtherFrame3Buffers) {
|
| }
|
|
|
| TEST_F(End2EndTest, ResetReferenceFrameId) {
|
| - SetupConfig(transport::kOpus, kDefaultAudioSamplingRate, false, 3);
|
| + Configure(transport::kOpus, kDefaultAudioSamplingRate, false, 3);
|
| video_sender_config_.rtp_config.max_delay_ms = 67;
|
| video_receiver_config_.rtp_max_delay_ms = 67;
|
| Create();
|
| @@ -982,7 +955,7 @@ TEST_F(End2EndTest, ResetReferenceFrameId) {
|
| }
|
|
|
| TEST_F(End2EndTest, CryptoVideo) {
|
| - SetupConfig(transport::kPcm16, 32000, false, 1);
|
| + Configure(transport::kPcm16, 32000, false, 1);
|
|
|
| transport_video_config_.base.aes_iv_mask =
|
| ConvertFromBase16String("1234567890abcdeffedcba0987654321");
|
| @@ -1019,7 +992,7 @@ TEST_F(End2EndTest, CryptoVideo) {
|
| }
|
|
|
| TEST_F(End2EndTest, CryptoAudio) {
|
| - SetupConfig(transport::kPcm16, 32000, false, 1);
|
| + Configure(transport::kPcm16, 32000, false, 1);
|
|
|
| transport_audio_config_.base.aes_iv_mask =
|
| ConvertFromBase16String("abcdeffedcba12345678900987654321");
|
| @@ -1031,52 +1004,22 @@ TEST_F(End2EndTest, CryptoAudio) {
|
|
|
| Create();
|
|
|
| - int frames_counter = 0;
|
| - for (; frames_counter < 3; ++frames_counter) {
|
| - int num_10ms_blocks = 2;
|
| -
|
| - const base::TimeTicks send_time = testing_clock_sender_->NowTicks();
|
| -
|
| - scoped_ptr<AudioBus> audio_bus(audio_bus_factory_->NextAudioBus(
|
| - base::TimeDelta::FromMilliseconds(10) * num_10ms_blocks));
|
| -
|
| - if (frames_counter != 0) {
|
| - // Due to the re-sampler and NetEq in the webrtc AudioCodingModule the
|
| - // first samples will be 0 and then slowly ramp up to its real
|
| - // amplitude;
|
| - // ignore the first frame.
|
| - test_receiver_audio_callback_->AddExpectedResult(
|
| - ToPcmAudioFrame(*audio_bus, audio_sender_config_.frequency),
|
| - num_10ms_blocks,
|
| - send_time);
|
| - }
|
| - audio_frame_input_->InsertAudio(audio_bus.Pass(), send_time);
|
| -
|
| - RunTasks(num_10ms_blocks * 10);
|
| -
|
| - if (frames_counter == 0) {
|
| - frame_receiver_->GetRawAudioFrame(
|
| - num_10ms_blocks,
|
| - 32000,
|
| - base::Bind(&TestReceiverAudioCallback::IgnoreAudioFrame,
|
| - test_receiver_audio_callback_));
|
| - } else {
|
| - frame_receiver_->GetRawAudioFrame(
|
| - num_10ms_blocks,
|
| - 32000,
|
| - base::Bind(&TestReceiverAudioCallback::CheckPcmAudioFrame,
|
| - test_receiver_audio_callback_));
|
| - }
|
| + const int kNumIterations = 3;
|
| + const int kNumAudioFramesPerIteration = 2;
|
| + for (int i = 0; i < kNumIterations; ++i) {
|
| + FeedAudioFrames(kNumAudioFramesPerIteration, true);
|
| + RunTasks(kNumAudioFramesPerIteration * kAudioFrameDurationMs);
|
| + RequestAudioFrames(kNumAudioFramesPerIteration, true);
|
| }
|
| RunTasks(2 * kFrameTimerMs + 1); // Empty the pipeline.
|
| - EXPECT_EQ(frames_counter - 1,
|
| + EXPECT_EQ(kNumIterations * kNumAudioFramesPerIteration,
|
| test_receiver_audio_callback_->number_times_called());
|
| }
|
|
|
| // Video test without packet loss - tests the logging aspects of the end2end,
|
| // but is basically equivalent to LoopNoLossPcm16.
|
| TEST_F(End2EndTest, VideoLogging) {
|
| - SetupConfig(transport::kPcm16, 32000, false, 1);
|
| + Configure(transport::kPcm16, 32000, false, 1);
|
| Create();
|
|
|
| int video_start = kVideoStart;
|
| @@ -1197,58 +1140,30 @@ TEST_F(End2EndTest, VideoLogging) {
|
| // Audio test without packet loss - tests the logging aspects of the end2end,
|
| // but is basically equivalent to LoopNoLossPcm16.
|
| TEST_F(End2EndTest, AudioLogging) {
|
| - SetupConfig(transport::kPcm16, 32000, false, 1);
|
| + Configure(transport::kPcm16, 32000, false, 1);
|
| Create();
|
|
|
| int audio_diff = kFrameTimerMs;
|
| - const int num_audio_buses = 10;
|
| - int num_frames = 0;
|
| - for (int i = 0; i < num_audio_buses; ++i) {
|
| - int num_10ms_blocks = audio_diff / 10;
|
| - audio_diff -= num_10ms_blocks * 10;
|
| - base::TimeTicks send_time = testing_clock_sender_->NowTicks();
|
| -
|
| - // Each audio bus can contain more than one frame.
|
| - scoped_ptr<AudioBus> audio_bus(audio_bus_factory_->NextAudioBus(
|
| - base::TimeDelta::FromMilliseconds(10) * num_10ms_blocks));
|
| - num_frames += num_10ms_blocks;
|
| -
|
| - if (i != 0) {
|
| - // Due to the re-sampler and NetEq in the webrtc AudioCodingModule the
|
| - // first samples will be 0 and then slowly ramp up to its real
|
| - // amplitude;
|
| - // ignore the first frame.
|
| - test_receiver_audio_callback_->AddExpectedResult(
|
| - ToPcmAudioFrame(*audio_bus, audio_sender_config_.frequency),
|
| - num_10ms_blocks,
|
| - send_time);
|
| - }
|
| + const int kNumVideoFrames = 10;
|
| + int num_audio_frames_requested = 0;
|
| + for (int i = 0; i < kNumVideoFrames; ++i) {
|
| + const int num_audio_frames = audio_diff / kAudioFrameDurationMs;
|
| + audio_diff -= num_audio_frames * kAudioFrameDurationMs;
|
|
|
| - audio_frame_input_->InsertAudio(audio_bus.Pass(), send_time);
|
| + FeedAudioFrames(num_audio_frames, true);
|
|
|
| RunTasks(kFrameTimerMs);
|
| audio_diff += kFrameTimerMs;
|
|
|
| - if (i == 0) {
|
| - frame_receiver_->GetRawAudioFrame(
|
| - num_10ms_blocks,
|
| - audio_sender_config_.frequency,
|
| - base::Bind(&TestReceiverAudioCallback::IgnoreAudioFrame,
|
| - test_receiver_audio_callback_));
|
| - } else {
|
| - frame_receiver_->GetRawAudioFrame(
|
| - num_10ms_blocks,
|
| - audio_sender_config_.frequency,
|
| - base::Bind(&TestReceiverAudioCallback::CheckPcmAudioFrame,
|
| - test_receiver_audio_callback_));
|
| - }
|
| + RequestAudioFrames(num_audio_frames, true);
|
| + num_audio_frames_requested += num_audio_frames;
|
| }
|
|
|
| // Basic tests.
|
| RunTasks(2 * kFrameTimerMs + 1); // Empty the receiver pipeline.
|
|
|
| - int num_times_called = test_receiver_audio_callback_->number_times_called();
|
| - EXPECT_EQ(num_audio_buses - 1, num_times_called);
|
| + EXPECT_EQ(num_audio_frames_requested,
|
| + test_receiver_audio_callback_->number_times_called());
|
|
|
| // Logging tests.
|
| // Verify that all frames and all required events were logged.
|
| @@ -1271,8 +1186,8 @@ TEST_F(End2EndTest, AudioLogging) {
|
| encoded_count += it->second.counter[kAudioFrameEncoded];
|
| }
|
|
|
| - EXPECT_EQ(num_frames, received_count);
|
| - EXPECT_EQ(num_frames, encoded_count);
|
| + EXPECT_EQ(num_audio_frames_requested, received_count);
|
| + EXPECT_EQ(num_audio_frames_requested, encoded_count);
|
|
|
| std::map<RtpTimestamp, LoggingEventCounts>::iterator map_it =
|
| event_counter_for_frame.begin();
|
|
|