Index: media/cast/test/end2end_unittest.cc |
diff --git a/media/cast/test/end2end_unittest.cc b/media/cast/test/end2end_unittest.cc |
index e9cefaad1522e30486e71682195d8ce8f95c577e..d82b765d399e8b94d7771640aa6aba2800499d94 100644 |
--- a/media/cast/test/end2end_unittest.cc |
+++ b/media/cast/test/end2end_unittest.cc |
@@ -199,6 +199,7 @@ class TestReceiverAudioCallback : |
// Check all out allowed delays. |
double square_error = 0; |
double variance = 0; |
+ |
Alpha Left Google
2013/11/07 01:10:11
nit: please don't fix style in a functional CL in
pwestin
2013/11/07 17:16:04
Done.
|
for (size_t i = 0; i < reference_audio_frame.samples.size() - delay; ++i) { |
size_t error = reference_audio_frame.samples[i] - |
output_audio_samples[i + delay]; |
@@ -247,6 +248,7 @@ class TestReceiverAudioCallback : |
CheckBasicAudioFrame(audio_frame, playout_time); |
ExpectedAudioFrame expected_audio_frame = expected_frame_.front(); |
expected_frame_.pop_front(); |
+ |
if (audio_frame->samples.size() == 0) return; // No more checks needed. |
size_t max_delay = CalculateMaxResamplingDelay(48000, 32000, |
@@ -284,8 +286,9 @@ class TestReceiverAudioCallback : |
size_t number_of_samples = audio_frame->data.size() / 2; |
for (size_t i = 0; i < number_of_samples; ++i) { |
- uint16 sample = (audio_frame->data[1 + i * sizeof(uint16)]) + |
- (static_cast<uint16>(audio_frame->data[i * sizeof(uint16)]) << 8); |
+ uint16 sample = |
+ static_cast<uint8>(audio_frame->data[1 + i * sizeof(uint16)]) + |
+ (static_cast<uint16>(audio_frame->data[i * sizeof(uint16)]) << 8); |
output_audio_samples.push_back(static_cast<int16>(sample)); |
} |
EXPECT_GE(ComputeBestSNR(expected_audio_frame.audio_frame, |
@@ -541,7 +544,7 @@ TEST_F(End2EndTest, LoopNoLossPcm16) { |
SetupConfig(kPcm16, 32000, false, 1); |
Create(); |
test_receiver_audio_callback_->SetExpectedResult(kAudioSamplingFrequency, 20, |
- 25); |
+ 20); |
int video_start = 1; |
int audio_diff = kFrameTimerMs; |
@@ -862,6 +865,91 @@ TEST_F(End2EndTest, ResetReferenceFrameId) { |
test_receiver_video_callback_->number_times_called()); |
} |
+TEST_F(End2EndTest, CryptoVideo) { |
+ SetupConfig(kPcm16, 32000, false, 1); |
+ |
+ video_sender_config_.aes_iv_mask = "1234567890abcdeffedcba0987654321"; |
+ video_sender_config_.aes_key = "deadbeefcafeb0b0b0b0cafedeadbeef"; |
+ |
+ video_receiver_config_.aes_iv_mask = video_sender_config_.aes_iv_mask; |
+ video_receiver_config_.aes_key = video_sender_config_.aes_key; |
+ |
+ Create(); |
+ |
+ int frames_counter = 0; |
+ for (; frames_counter < 20; ++frames_counter) { |
+ const base::TimeTicks send_time = testing_clock_.NowTicks(); |
+ |
+ SendVideoFrame(frames_counter, send_time); |
+ |
+ test_receiver_video_callback_->AddExpectedResult(frames_counter, |
+ video_sender_config_.width, video_sender_config_.height, send_time); |
+ |
+ // GetRawVideoFrame will not return the frame until we are close to the |
+ // time in which we should render the frame. |
+ frame_receiver_->GetRawVideoFrame( |
+ base::Bind(&TestReceiverVideoCallback::CheckVideoFrame, |
+ test_receiver_video_callback_)); |
+ RunTasks(kFrameTimerMs); |
+ } |
+ RunTasks(2 * kFrameTimerMs + 1); // Empty the pipeline. |
+ EXPECT_EQ(frames_counter, |
+ test_receiver_video_callback_->number_times_called()); |
+} |
+ |
+ |
+TEST_F(End2EndTest, CryptoAudio) { |
+ SetupConfig(kPcm16, 32000, false, 1); |
+ |
+ audio_sender_config_.aes_iv_mask = "abcdeffedcba12345678900987654321"; |
+ audio_sender_config_.aes_key = "deadbeefcafecafedeadbeefb0b0b0b0"; |
+ |
+ audio_receiver_config_.aes_iv_mask = audio_sender_config_.aes_iv_mask; |
+ audio_receiver_config_.aes_key = audio_sender_config_.aes_key; |
+ |
+ Create(); |
+ test_receiver_audio_callback_->SetExpectedResult(32000, 18, 20); |
+ |
+ int frames_counter = 0; |
+ for (; frames_counter < 20; ++frames_counter) { |
+ int num_10ms_blocks = 2; |
+ |
+ const base::TimeTicks send_time = testing_clock_.NowTicks(); |
+ |
+ PcmAudioFrame* audio_frame = CreateAudioFrame(num_10ms_blocks, |
+ kSoundFrequency, 32000); |
+ |
+ if (frames_counter != 0) { |
+ // Due to the re-sampler and NetEq in the webrtc AudioCodingModule the |
+ // first samples will be 0 and then slowly ramp up to its real amplitude; |
+ // ignore the first frame. |
+ test_receiver_audio_callback_->AddExpectedResult(audio_frame, |
+ num_10ms_blocks, send_time); |
+ } |
+ frame_input_->InsertRawAudioFrame(audio_frame, send_time, |
+ base::Bind(FrameInput::DeleteAudioFrame, audio_frame)); |
+ |
+ RunTasks(num_10ms_blocks * 10); |
+ |
+ if (frames_counter == 0) { |
+ frame_receiver_->GetRawAudioFrame(num_10ms_blocks, |
+ 32000, |
+ base::Bind(&TestReceiverAudioCallback::IgnoreAudioFrame, |
+ test_receiver_audio_callback_)); |
+ } else { |
+ frame_receiver_->GetRawAudioFrame(num_10ms_blocks, |
+ 32000, |
+ base::Bind(&TestReceiverAudioCallback::CheckPcmAudioFrame, |
+ test_receiver_audio_callback_)); |
+ } |
+ } |
+ RunTasks(2 * kFrameTimerMs + 1); // Empty the pipeline. |
+ EXPECT_EQ(frames_counter - 1, |
+ test_receiver_audio_callback_->number_times_called()); |
+} |
+ |
+ |
+ |
// TODO(pwestin): Add repeatable packet loss test. |
// TODO(pwestin): Add test for misaligned send get calls. |
// TODO(pwestin): Add more tests that does not resample. |