Index: media/cast/test/end2end_unittest.cc |
diff --git a/media/cast/test/end2end_unittest.cc b/media/cast/test/end2end_unittest.cc |
index 6ee722e3f859bbc4c715b4c72d583a2a9671a995..eb3821b89f3a52f9f2cc708bb00d83debeb51437 100644 |
--- a/media/cast/test/end2end_unittest.cc |
+++ b/media/cast/test/end2end_unittest.cc |
@@ -62,16 +62,11 @@ static const double kVideoAcceptedPSNR = 38.0; |
// The tests are commonly implemented with |kFrameTimerMs| RunTask function; |
// a normal video is 30 fps hence the 33 ms between frames. |
+// |
+// TODO(miu): The errors in timing will add up significantly. Find an |
+// alternative approaches that eliminate use of this constant. |
static const int kFrameTimerMs = 33; |
-// The packets pass through the pacer which can delay the beginning of the |
-// frame by 10 ms if there is packets belonging to the previous frame being |
-// retransmitted. |
-// In addition, audio packets are sent in 10mS intervals in audio_encoder.cc, |
-// although we send an audio frame every 33mS, which adds an extra delay. |
-// A TODO was added in the code to resolve this. |
-static const int kTimerErrorMs = 20; |
- |
// Start the video synthetic start value to medium range value, to avoid edge |
// effects cause by encoding and quantization. |
static const int kVideoStart = 100; |
@@ -80,6 +75,17 @@ static const int kVideoStart = 100; |
// chunks of this size. |
static const int kAudioFrameDurationMs = 10; |
+// The amount of time between frame capture on the sender and playout on the |
+// receiver. |
+static const int kTargetPlayoutDelayMs = 100; |
+ |
+// The maximum amount of deviation expected in the playout times emitted by the |
+// receiver. |
+// |
+// TODO(miu): This is a "fuzzy" way to check the timestamps. We should be able |
+// to compute exact offsets with "omnipotent" knowledge of the system. |
+static const int kMaxAllowedPlayoutErrorMs = 30; |
+ |
std::string ConvertFromBase16String(const std::string base_16) { |
std::string compressed; |
DCHECK_EQ(base_16.size() % 2, 0u) << "Must be a multiple of 2"; |
@@ -244,7 +250,7 @@ class TestReceiverAudioCallback |
public: |
struct ExpectedAudioFrame { |
scoped_ptr<AudioBus> audio_bus; |
- base::TimeTicks record_time; |
+ base::TimeTicks playout_time; |
}; |
TestReceiverAudioCallback() : num_called_(0) {} |
@@ -254,13 +260,13 @@ class TestReceiverAudioCallback |
} |
void AddExpectedResult(const AudioBus& audio_bus, |
- const base::TimeTicks& record_time) { |
+ const base::TimeTicks& playout_time) { |
scoped_ptr<ExpectedAudioFrame> expected_audio_frame( |
new ExpectedAudioFrame()); |
expected_audio_frame->audio_bus = |
AudioBus::Create(audio_bus.channels(), audio_bus.frames()).Pass(); |
audio_bus.CopyTo(expected_audio_frame->audio_bus.get()); |
- expected_audio_frame->record_time = record_time; |
+ expected_audio_frame->playout_time = playout_time; |
expected_frames_.push_back(expected_audio_frame.release()); |
} |
@@ -292,16 +298,10 @@ class TestReceiverAudioCallback |
1); |
} |
- // TODO(miu): This is a "fuzzy" way to check the timestamps. We should be |
- // able to compute exact offsets with "omnipotent" knowledge of the system. |
- const base::TimeTicks upper_bound = |
- expected_audio_frame->record_time + |
- base::TimeDelta::FromMilliseconds(kDefaultRtpMaxDelayMs + |
- kTimerErrorMs); |
- EXPECT_GE(upper_bound, playout_time) |
- << "playout_time - upper_bound == " |
- << (playout_time - upper_bound).InMicroseconds() << " usec"; |
- |
+ EXPECT_NEAR( |
+ (playout_time - expected_audio_frame->playout_time).InMillisecondsF(), |
+ 0.0, |
+ kMaxAllowedPlayoutErrorMs); |
EXPECT_TRUE(is_continuous); |
} |
@@ -357,7 +357,7 @@ class TestReceiverVideoCallback |
int start_value; |
int width; |
int height; |
- base::TimeTicks capture_time; |
+ base::TimeTicks playout_time; |
bool should_be_continuous; |
}; |
@@ -366,19 +366,19 @@ class TestReceiverVideoCallback |
void AddExpectedResult(int start_value, |
int width, |
int height, |
- const base::TimeTicks& capture_time, |
+ const base::TimeTicks& playout_time, |
bool should_be_continuous) { |
ExpectedVideoFrame expected_video_frame; |
expected_video_frame.start_value = start_value; |
expected_video_frame.width = width; |
expected_video_frame.height = height; |
- expected_video_frame.capture_time = capture_time; |
+ expected_video_frame.playout_time = playout_time; |
expected_video_frame.should_be_continuous = should_be_continuous; |
expected_frame_.push_back(expected_video_frame); |
} |
void CheckVideoFrame(const scoped_refptr<media::VideoFrame>& video_frame, |
- const base::TimeTicks& render_time, |
+ const base::TimeTicks& playout_time, |
bool is_continuous) { |
++num_called_; |
@@ -387,21 +387,6 @@ class TestReceiverVideoCallback |
ExpectedVideoFrame expected_video_frame = expected_frame_.front(); |
expected_frame_.pop_front(); |
- base::TimeDelta time_since_capture = |
- render_time - expected_video_frame.capture_time; |
- const base::TimeDelta upper_bound = base::TimeDelta::FromMilliseconds( |
- kDefaultRtpMaxDelayMs + kTimerErrorMs); |
- |
- // TODO(miu): This is a "fuzzy" way to check the timestamps. We should be |
- // able to compute exact offsets with "omnipotent" knowledge of the system. |
- EXPECT_GE(upper_bound, time_since_capture) |
- << "time_since_capture - upper_bound == " |
- << (time_since_capture - upper_bound).InMicroseconds() << " usec"; |
- // TODO(miu): I broke the concept of 100 ms target delay timing on the |
- // receiver side, but the logic for computing playout time really isn't any |
- // more broken than it was. This only affects the receiver, and is to be |
- // rectified in an soon-upcoming change. http://crbug.com/356942 |
- // EXPECT_LE(expected_video_frame.capture_time, render_time); |
EXPECT_EQ(expected_video_frame.width, video_frame->visible_rect().width()); |
EXPECT_EQ(expected_video_frame.height, |
video_frame->visible_rect().height()); |
@@ -414,6 +399,11 @@ class TestReceiverVideoCallback |
EXPECT_GE(I420PSNR(expected_I420_frame, video_frame), kVideoAcceptedPSNR); |
+ EXPECT_NEAR( |
+ (playout_time - expected_video_frame.playout_time).InMillisecondsF(), |
+ 0.0, |
+ kMaxAllowedPlayoutErrorMs); |
+ |
EXPECT_EQ(expected_video_frame.should_be_continuous, is_continuous); |
} |
@@ -467,6 +457,7 @@ class End2EndTest : public ::testing::Test { |
bool external_audio_decoder, |
int max_number_of_video_buffers_used) { |
audio_sender_config_.rtp_config.ssrc = 1; |
+ audio_sender_config_.rtp_config.max_delay_ms = kTargetPlayoutDelayMs; |
audio_sender_config_.incoming_feedback_ssrc = 2; |
audio_sender_config_.rtp_config.payload_type = 96; |
audio_sender_config_.use_external_encoder = false; |
@@ -478,6 +469,7 @@ class End2EndTest : public ::testing::Test { |
audio_receiver_config_.feedback_ssrc = |
audio_sender_config_.incoming_feedback_ssrc; |
audio_receiver_config_.incoming_ssrc = audio_sender_config_.rtp_config.ssrc; |
+ audio_receiver_config_.rtp_max_delay_ms = kTargetPlayoutDelayMs; |
audio_receiver_config_.rtp_payload_type = |
audio_sender_config_.rtp_config.payload_type; |
audio_receiver_config_.use_external_decoder = external_audio_decoder; |
@@ -489,6 +481,7 @@ class End2EndTest : public ::testing::Test { |
audio_receiver_config_.frequency); |
video_sender_config_.rtp_config.ssrc = 3; |
+ video_sender_config_.rtp_config.max_delay_ms = kTargetPlayoutDelayMs; |
video_sender_config_.incoming_feedback_ssrc = 4; |
video_sender_config_.rtp_config.payload_type = 97; |
video_sender_config_.use_external_encoder = false; |
@@ -507,6 +500,7 @@ class End2EndTest : public ::testing::Test { |
video_receiver_config_.feedback_ssrc = |
video_sender_config_.incoming_feedback_ssrc; |
video_receiver_config_.incoming_ssrc = video_sender_config_.rtp_config.ssrc; |
+ video_receiver_config_.rtp_max_delay_ms = kTargetPlayoutDelayMs; |
video_receiver_config_.rtp_payload_type = |
video_sender_config_.rtp_config.payload_type; |
video_receiver_config_.use_external_decoder = false; |
@@ -517,12 +511,16 @@ class End2EndTest : public ::testing::Test { |
for (int i = 0; i < count; ++i) { |
scoped_ptr<AudioBus> audio_bus(audio_bus_factory_->NextAudioBus( |
base::TimeDelta::FromMilliseconds(kAudioFrameDurationMs))); |
- const base::TimeTicks send_time = |
+ const base::TimeTicks capture_time = |
testing_clock_sender_->NowTicks() + |
i * base::TimeDelta::FromMilliseconds(kAudioFrameDurationMs); |
- if (will_be_checked) |
- test_receiver_audio_callback_->AddExpectedResult(*audio_bus, send_time); |
- audio_frame_input_->InsertAudio(audio_bus.Pass(), send_time); |
+ if (will_be_checked) { |
+ test_receiver_audio_callback_->AddExpectedResult( |
+ *audio_bus, |
+ capture_time + |
+ base::TimeDelta::FromMilliseconds(kTargetPlayoutDelayMs)); |
+ } |
+ audio_frame_input_->InsertAudio(audio_bus.Pass(), capture_time); |
} |
} |
@@ -531,12 +529,14 @@ class End2EndTest : public ::testing::Test { |
for (int i = 0; i < count; ++i) { |
scoped_ptr<AudioBus> audio_bus(audio_bus_factory_->NextAudioBus( |
base::TimeDelta::FromMilliseconds(kAudioFrameDurationMs))); |
- const base::TimeTicks send_time = |
+ const base::TimeTicks capture_time = |
testing_clock_sender_->NowTicks() + |
i * base::TimeDelta::FromMilliseconds(kAudioFrameDurationMs); |
- test_receiver_audio_callback_->AddExpectedResult(*audio_bus, |
- send_time + delay); |
- audio_frame_input_->InsertAudio(audio_bus.Pass(), send_time); |
+ test_receiver_audio_callback_->AddExpectedResult( |
+ *audio_bus, |
+ capture_time + delay + |
+ base::TimeDelta::FromMilliseconds(kTargetPlayoutDelayMs)); |
+ audio_frame_input_->InsertAudio(audio_bus.Pass(), capture_time); |
} |
} |
@@ -554,6 +554,7 @@ class End2EndTest : public ::testing::Test { |
audio_receiver_config_, |
video_receiver_config_, |
&receiver_to_sender_); |
+ |
net::IPEndPoint dummy_endpoint; |
transport_sender_.reset(new transport::CastTransportSenderImpl( |
NULL, |
@@ -704,7 +705,8 @@ TEST_F(End2EndTest, LoopNoLossPcm16) { |
video_start, |
video_sender_config_.width, |
video_sender_config_.height, |
- testing_clock_sender_->NowTicks(), |
+ testing_clock_sender_->NowTicks() + |
+ base::TimeDelta::FromMilliseconds(kTargetPlayoutDelayMs), |
true); |
SendVideoFrame(video_start, testing_clock_sender_->NowTicks()); |
@@ -809,7 +811,8 @@ TEST_F(End2EndTest, DISABLED_StartSenderBeforeReceiver) { |
video_start, |
video_sender_config_.width, |
video_sender_config_.height, |
- initial_send_time + expected_delay, |
+ initial_send_time + expected_delay + |
+ base::TimeDelta::FromMilliseconds(kTargetPlayoutDelayMs), |
true); |
SendVideoFrame(video_start, testing_clock_sender_->NowTicks()); |
@@ -838,7 +841,8 @@ TEST_F(End2EndTest, DISABLED_StartSenderBeforeReceiver) { |
video_start, |
video_sender_config_.width, |
video_sender_config_.height, |
- testing_clock_sender_->NowTicks(), |
+ testing_clock_sender_->NowTicks() + |
+ base::TimeDelta::FromMilliseconds(kTargetPlayoutDelayMs), |
true); |
SendVideoFrame(video_start, testing_clock_sender_->NowTicks()); |
@@ -874,18 +878,18 @@ TEST_F(End2EndTest, DISABLED_GlitchWith3Buffers) { |
Create(); |
int video_start = kVideoStart; |
- base::TimeTicks send_time; |
+ base::TimeTicks capture_time; |
// Frames will rendered on completion until the render time stabilizes, i.e. |
// we got enough data. |
const int frames_before_glitch = 20; |
for (int i = 0; i < frames_before_glitch; ++i) { |
- send_time = testing_clock_sender_->NowTicks(); |
- SendVideoFrame(video_start, send_time); |
+ capture_time = testing_clock_sender_->NowTicks(); |
+ SendVideoFrame(video_start, capture_time); |
test_receiver_video_callback_->AddExpectedResult( |
video_start, |
video_sender_config_.width, |
video_sender_config_.height, |
- send_time, |
+ capture_time + base::TimeDelta::FromMilliseconds(kTargetPlayoutDelayMs), |
true); |
frame_receiver_->GetRawVideoFrame( |
base::Bind(&TestReceiverVideoCallback::CheckVideoFrame, |
@@ -897,27 +901,28 @@ TEST_F(End2EndTest, DISABLED_GlitchWith3Buffers) { |
// Introduce a glitch lasting for 10 frames. |
sender_to_receiver_.SetSendPackets(false); |
for (int i = 0; i < 10; ++i) { |
- send_time = testing_clock_sender_->NowTicks(); |
+ capture_time = testing_clock_sender_->NowTicks(); |
// First 3 will be sent and lost. |
- SendVideoFrame(video_start, send_time); |
+ SendVideoFrame(video_start, capture_time); |
RunTasks(kFrameTimerMs); |
video_start++; |
} |
sender_to_receiver_.SetSendPackets(true); |
RunTasks(100); |
- send_time = testing_clock_sender_->NowTicks(); |
+ capture_time = testing_clock_sender_->NowTicks(); |
// Frame 1 should be acked by now and we should have an opening to send 4. |
- SendVideoFrame(video_start, send_time); |
+ SendVideoFrame(video_start, capture_time); |
RunTasks(kFrameTimerMs); |
// Frames 1-3 are old frames by now, and therefore should be decoded, but |
// not rendered. The next frame we expect to render is frame #4. |
- test_receiver_video_callback_->AddExpectedResult(video_start, |
- video_sender_config_.width, |
- video_sender_config_.height, |
- send_time, |
- true); |
+ test_receiver_video_callback_->AddExpectedResult( |
+ video_start, |
+ video_sender_config_.width, |
+ video_sender_config_.height, |
+ capture_time + base::TimeDelta::FromMilliseconds(kTargetPlayoutDelayMs), |
+ true); |
frame_receiver_->GetRawVideoFrame( |
base::Bind(&TestReceiverVideoCallback::CheckVideoFrame, |
@@ -938,19 +943,20 @@ TEST_F(End2EndTest, DISABLED_DropEveryOtherFrame3Buffers) { |
sender_to_receiver_.DropAllPacketsBelongingToOddFrames(); |
int video_start = kVideoStart; |
- base::TimeTicks send_time; |
+ base::TimeTicks capture_time; |
int i = 0; |
for (; i < 20; ++i) { |
- send_time = testing_clock_sender_->NowTicks(); |
- SendVideoFrame(video_start, send_time); |
+ capture_time = testing_clock_sender_->NowTicks(); |
+ SendVideoFrame(video_start, capture_time); |
if (i % 2 == 0) { |
test_receiver_video_callback_->AddExpectedResult( |
video_start, |
video_sender_config_.width, |
video_sender_config_.height, |
- send_time, |
+ capture_time + |
+ base::TimeDelta::FromMilliseconds(kTargetPlayoutDelayMs), |
i == 0); |
// GetRawVideoFrame will not return the frame until we are close in |
@@ -984,14 +990,14 @@ TEST_F(End2EndTest, CryptoVideo) { |
int frames_counter = 0; |
for (; frames_counter < 3; ++frames_counter) { |
- const base::TimeTicks send_time = testing_clock_sender_->NowTicks(); |
- SendVideoFrame(frames_counter, send_time); |
+ const base::TimeTicks capture_time = testing_clock_sender_->NowTicks(); |
+ SendVideoFrame(frames_counter, capture_time); |
test_receiver_video_callback_->AddExpectedResult( |
frames_counter, |
video_sender_config_.width, |
video_sender_config_.height, |
- send_time, |
+ capture_time + base::TimeDelta::FromMilliseconds(kTargetPlayoutDelayMs), |
true); |
RunTasks(kFrameTimerMs); |
@@ -1041,15 +1047,15 @@ TEST_F(End2EndTest, VideoLogging) { |
int video_start = kVideoStart; |
const int num_frames = 5; |
for (int i = 0; i < num_frames; ++i) { |
- base::TimeTicks send_time = testing_clock_sender_->NowTicks(); |
+ base::TimeTicks capture_time = testing_clock_sender_->NowTicks(); |
test_receiver_video_callback_->AddExpectedResult( |
video_start, |
video_sender_config_.width, |
video_sender_config_.height, |
- send_time, |
+ capture_time + base::TimeDelta::FromMilliseconds(kTargetPlayoutDelayMs), |
true); |
- SendVideoFrame(video_start, send_time); |
+ SendVideoFrame(video_start, capture_time); |
RunTasks(kFrameTimerMs); |
frame_receiver_->GetRawVideoFrame( |