Chromium Code Reviews| Index: media/base/stream_parser_unittest.cc |
| diff --git a/media/base/stream_parser_unittest.cc b/media/base/stream_parser_unittest.cc |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..53eeff3b26f20147e03f1e5c7b0e9c3e08e31d9b |
| --- /dev/null |
| +++ b/media/base/stream_parser_unittest.cc |
| @@ -0,0 +1,391 @@ |
| +// Copyright 2014 The Chromium Authors. All rights reserved. |
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +#include "base/basictypes.h" |
| +#include "media/base/stream_parser.h" |
| +#include "media/base/stream_parser_buffer.h" |
| +#include "testing/gtest/include/gtest/gtest.h" |
| + |
| +namespace media { |
| + |
| +const int kEnd = -1; |
| +const uint8 kFakeData[] = { 0xFF }; |
| + |
| +bool IsAudio(scoped_refptr<StreamParserBuffer> buffer) { |
|
xhwang
2014/01/29 08:04:50
should these functions be static?
wolenetz
2014/02/05 02:49:53
Done.
|
| + if (buffer->type() == StreamParserBuffer::kAudio) |
| + return true; |
| + return false; |
|
xhwang
2014/01/29 08:04:50
return buffer->type() == StreamParserBuffer::kAudi
wolenetz
2014/02/05 02:49:53
Done.
|
| +} |
| + |
| +bool IsVideo(scoped_refptr<StreamParserBuffer> buffer) { |
| + if (buffer->type() == StreamParserBuffer::kVideo) |
| + return true; |
| + return false; |
|
xhwang
2014/01/29 08:04:50
ditto
wolenetz
2014/02/05 02:49:53
Done.
|
| +} |
| + |
| +bool IsText(scoped_refptr<StreamParserBuffer> buffer) { |
| + if (buffer->type() == StreamParserBuffer::kText) |
| + return true; |
| + return false; |
|
xhwang
2014/01/29 08:04:50
ditto
wolenetz
2014/02/05 02:49:53
Done.
|
| +} |
| + |
| +class StreamParserTest : public testing::Test { |
| + protected: |
| + StreamParserTest() {} |
| + |
|
xhwang
2014/01/29 08:04:50
remove extra empty line
wolenetz
2014/02/05 02:49:53
Done.
|
| + |
| + void GenerateBuffers(StreamParser::BufferQueue* queue, |
|
xhwang
2014/01/29 08:04:50
add doc about what this function do
wolenetz
2014/02/05 02:49:53
Done, and also made it a static nonmember.
|
| + int decode_timestamps[], |
| + StreamParserBuffer::Type type, |
| + int text_track_number) { |
|
xhwang
2014/01/29 08:04:50
reorder parameters: we usually put input before ou
wolenetz
2014/02/05 02:49:53
Done.
|
| + DCHECK(queue); |
| + for (int i = 0; decode_timestamps[i] != kEnd; i++) { |
|
xhwang
2014/01/29 08:04:50
nit: ++i
wolenetz
2014/02/05 02:49:53
Done.
|
| + scoped_refptr<StreamParserBuffer> buffer = |
| + StreamParserBuffer::CopyFrom(kFakeData, sizeof(kFakeData), |
| + true, type); |
| + if (type == StreamParserBuffer::kText) |
| + buffer->set_text_track_number(text_track_number); |
| + buffer->SetDecodeTimestamp( |
| + base::TimeDelta::FromMicroseconds(decode_timestamps[i])); |
| + queue->push_back(buffer); |
| + } |
| + } |
| + |
| + void GenerateBuffers(StreamParser::BufferQueue* queue, |
|
xhwang
2014/01/29 08:04:50
Usually we don't like function overloading. How ab
wolenetz
2014/02/05 02:49:53
Done, also saving the |queue| parameter by using w
|
| + int decode_timestamps[], |
| + StreamParserBuffer::Type type) { |
| + DCHECK_NE(type, StreamParserBuffer::kText); // Needs a text track number. |
| + GenerateBuffers(queue, decode_timestamps, type, -1); |
| + } |
| + |
| + void GenerateTextBuffers(StreamParser::BufferQueue* queue, |
| + int decode_timestamps[], |
| + int text_track_number) { |
| + GenerateBuffers(queue, decode_timestamps, StreamParserBuffer::kText, |
| + text_track_number); |
| + } |
| + |
| + std::string BufferQueueToString(const StreamParser::BufferQueue* queue, |
|
xhwang
2014/01/29 08:04:50
pass as const-ref?
xhwang
2014/01/29 08:04:50
We use StreamParser::BufferQueue a lot in this tes
xhwang
2014/01/29 08:04:50
doc about the output string format
wolenetz
2014/02/05 02:49:53
Changed to just use well-defined member |merged_bu
wolenetz
2014/02/05 02:49:53
Done and ditto for TextBufferQueueMap.
wolenetz
2014/02/05 02:49:53
Done.
|
| + bool include_type_and_text_track) { |
| + std::stringstream results_stream; |
|
xhwang
2014/01/29 08:04:50
add #include <sstream> for this
wolenetz
2014/02/05 02:49:53
Done.
|
| + for (StreamParser::BufferQueue::const_iterator itr = queue->begin(); |
| + itr != queue->end(); |
| + ++itr) { |
| + if (itr != queue->begin()) |
| + results_stream << " "; |
| + scoped_refptr<StreamParserBuffer> buffer = *itr; |
|
xhwang
2014/01/29 08:04:50
nit: you can avoid increment/decrement the ref cou
wolenetz
2014/02/05 02:49:53
Nice :) Done.
|
| + if (include_type_and_text_track) { |
| + switch (buffer->type()) { |
| + case StreamParserBuffer::kAudio: |
| + results_stream << "A"; |
| + break; |
| + case StreamParserBuffer::kVideo: |
| + results_stream << "V"; |
| + break; |
| + case StreamParserBuffer::kText: |
| + results_stream << "T"; |
| + results_stream << buffer->text_track_number() << ":"; |
| + break; |
| + } |
| + } |
| + results_stream << buffer->GetDecodeTimestamp().InMicroseconds(); |
| + } |
| + |
| + return results_stream.str(); |
| + } |
| + |
| + |
| + void VerifySuccessfulMerge(const StreamParser::BufferQueue& audio_buffers, |
| + const StreamParser::BufferQueue& video_buffers, |
| + const StreamParser::TextBufferQueueMap& text_map, |
| + const std::string expected, |
|
xhwang
2014/01/29 08:04:50
pass by const-ref
wolenetz
2014/02/05 02:49:53
Changed to just use well-defined members |audio_bu
|
| + bool verify_type_and_text_track_sequence, |
| + StreamParser::BufferQueue* merged_buffers) { |
|
xhwang
2014/01/29 08:04:50
Add a doc. It's not obvious that |merged_buffers|
wolenetz
2014/02/05 02:49:53
Done.
|
| + // |merged_buffers| may already have some buffers. Count them by type for |
| + // later inclusion in verification. |
| + size_t original_audio_in_merged = |
| + static_cast<size_t>(count_if(merged_buffers->begin(), |
|
xhwang
2014/01/29 08:04:50
s/count_if/std::count_if
add #include <algorithm>
wolenetz
2014/02/05 02:49:53
Done.
|
| + merged_buffers->end(), IsAudio)); |
| + size_t original_video_in_merged = |
| + static_cast<size_t>(count_if(merged_buffers->begin(), |
| + merged_buffers->end(), IsVideo)); |
| + size_t original_text_in_merged = |
| + static_cast<size_t>(count_if(merged_buffers->begin(), |
| + merged_buffers->end(), IsText)); |
| + |
| + EXPECT_TRUE(StreamParser::MergeBufferQueues(audio_buffers, video_buffers, |
| + text_map, merged_buffers)); |
| + |
| + // Verify resulting contents of |merged_buffers| matches |expected|. |
| + EXPECT_EQ(expected, |
| + BufferQueueToString(merged_buffers, |
| + verify_type_and_text_track_sequence)); |
| + |
| + // If not verifying the sequence of types, at least still verify that the |
| + // correct number of each type of buffer is in the merge result. |
| + size_t audio_in_merged = |
| + static_cast<size_t>(count_if(merged_buffers->begin(), |
| + merged_buffers->end(), IsAudio)); |
| + size_t video_in_merged = |
| + static_cast<size_t>(count_if(merged_buffers->begin(), |
| + merged_buffers->end(), IsVideo)); |
| + size_t text_in_merged = |
| + static_cast<size_t>(count_if(merged_buffers->begin(), |
| + merged_buffers->end(), IsText)); |
| + EXPECT_GE(audio_in_merged, original_audio_in_merged); |
| + EXPECT_GE(video_in_merged, original_video_in_merged); |
| + EXPECT_GE(text_in_merged, original_text_in_merged); |
| + |
| + EXPECT_EQ(audio_buffers.size(), audio_in_merged - original_audio_in_merged); |
| + EXPECT_EQ(video_buffers.size(), video_in_merged - original_video_in_merged); |
| + |
| + size_t expected_text_buffer_count = 0; |
| + for (StreamParser::TextBufferQueueMap::const_iterator itr = |
| + text_map.begin(); |
| + itr != text_map.end(); |
| + ++itr) { |
| + expected_text_buffer_count += itr->second.size(); |
| + } |
| + EXPECT_EQ(expected_text_buffer_count, |
| + text_in_merged - original_text_in_merged); |
| + } |
| + |
| + void VerifySuccessfulMerge(const StreamParser::BufferQueue& audio_buffers, |
| + const StreamParser::BufferQueue& video_buffers, |
| + const StreamParser::TextBufferQueueMap& text_map, |
| + const std::string expected, |
| + bool verify_type_and_text_track_sequence) { |
| + StreamParser::BufferQueue merged_buffers; |
| + VerifySuccessfulMerge(audio_buffers, video_buffers, text_map, expected, |
| + verify_type_and_text_track_sequence, &merged_buffers); |
| + } |
| + |
| + private: |
| + DISALLOW_COPY_AND_ASSIGN(StreamParserTest); |
| +}; |
| + |
| +TEST_F(StreamParserTest, MergeBufferQueues_AllEmpty) { |
| + StreamParser::BufferQueue audio_buffers, video_buffers; |
| + StreamParser::TextBufferQueueMap text_map; |
|
xhwang
2014/01/29 08:04:50
these are used in all tests, may be you can just d
wolenetz
2014/02/05 02:49:53
Done. Saves spending a lot of lines: thank you!
|
| + |
| + std::string expected = ""; |
| + VerifySuccessfulMerge(audio_buffers, video_buffers, text_map, expected, true); |
| +} |
| + |
| +TEST_F(StreamParserTest, MergeBufferQueues_SingleAudioBuffer) { |
| + StreamParser::BufferQueue audio_buffers, video_buffers; |
| + StreamParser::TextBufferQueueMap text_map; |
| + |
| + std::string expected = "A100"; |
| + int audio_timestamps[] = { 100, kEnd }; |
| + GenerateBuffers(&audio_buffers, audio_timestamps, StreamParserBuffer::kAudio); |
| + VerifySuccessfulMerge(audio_buffers, video_buffers, text_map, expected, true); |
| +} |
| + |
| +TEST_F(StreamParserTest, MergeBufferQueues_SingleVideoBuffer) { |
| + StreamParser::BufferQueue audio_buffers, video_buffers; |
| + StreamParser::TextBufferQueueMap text_map; |
| + |
| + std::string expected = "V100"; |
| + int video_timestamps[] = { 100, kEnd }; |
| + GenerateBuffers(&video_buffers, video_timestamps, StreamParserBuffer::kVideo); |
| + VerifySuccessfulMerge(audio_buffers, video_buffers, text_map, expected, true); |
| +} |
| + |
| +TEST_F(StreamParserTest, MergeBufferQueues_SingleTextBuffer) { |
| + StreamParser::BufferQueue audio_buffers, video_buffers, text_buffers; |
| + StreamParser::TextBufferQueueMap text_map; |
| + |
| + std::string expected = "T12:100"; |
| + int text_timestamps[] = { 100, kEnd }; |
| + GenerateTextBuffers(&text_buffers, text_timestamps, 12); |
| + text_map.insert(std::make_pair(3141593, text_buffers)); // Immaterial index. |
| + VerifySuccessfulMerge(audio_buffers, video_buffers, text_map, expected, true); |
| +} |
| + |
| +TEST_F(StreamParserTest, MergeBufferQueues_OverlappingAudioVideo) { |
| + StreamParser::BufferQueue audio_buffers, video_buffers; |
| + StreamParser::TextBufferQueueMap text_map; |
| + |
| + std::string expected = "A100 V101 V102 A103 A104 V105"; |
| + int audio_timestamps[] = { 100, 103, 104, kEnd }; |
| + GenerateBuffers(&audio_buffers, audio_timestamps, StreamParserBuffer::kAudio); |
| + int video_timestamps[] = { 101, 102, 105, kEnd }; |
| + GenerateBuffers(&video_buffers, video_timestamps, StreamParserBuffer::kVideo); |
| + VerifySuccessfulMerge(audio_buffers, video_buffers, text_map, expected, true); |
| +} |
| + |
| +TEST_F(StreamParserTest, MergeBufferQueues_OverlappingMultipleText) { |
| + StreamParser::BufferQueue audio_buffers, video_buffers, text_1, text_2; |
| + StreamParser::TextBufferQueueMap text_map; |
| + |
| + std::string expected = "T1:100 T1:101 T2:103 T1:104 T2:105 T2:106"; |
|
xhwang
2014/01/29 08:04:50
We are using ":" for text because it has text trac
wolenetz
2014/02/05 02:49:53
Done (T1:100 A:100).
|
| + int text_timestamps_1[] = { 100, 101, 104, kEnd }; |
| + GenerateTextBuffers(&text_1, text_timestamps_1, 1); |
| + int text_timestamps_2[] = { 103, 105, 106, kEnd }; |
| + GenerateTextBuffers(&text_2, text_timestamps_2, 2); |
| + text_map.insert(std::make_pair(50, text_1)); // Immaterial index. |
| + text_map.insert(std::make_pair(51, text_2)); // Immaterial index. |
| + VerifySuccessfulMerge(audio_buffers, video_buffers, text_map, expected, true); |
| +} |
| + |
| +TEST_F(StreamParserTest, MergeBufferQueues_OverlappingAudioVideoText) { |
| + StreamParser::BufferQueue audio_buffers, video_buffers, text_1, text_2; |
| + StreamParser::TextBufferQueueMap text_map; |
| + |
| + std::string expected = "A100 V101 T1:102 V103 T2:104 A105 V106 T1:107"; |
| + int audio_timestamps[] = { 100, 105, kEnd }; |
| + GenerateBuffers(&audio_buffers, audio_timestamps, StreamParserBuffer::kAudio); |
| + int video_timestamps[] = { 101, 103, 106, kEnd }; |
| + GenerateBuffers(&video_buffers, video_timestamps, StreamParserBuffer::kVideo); |
| + int text_timestamps_1[] = { 102, 107, kEnd }; |
| + GenerateTextBuffers(&text_1, text_timestamps_1, 1); |
| + int text_timestamps_2[] = { 104, kEnd }; |
| + GenerateTextBuffers(&text_2, text_timestamps_2, 2); |
| + text_map.insert(std::make_pair(50, text_1)); // Immaterial index. |
| + text_map.insert(std::make_pair(51, text_2)); // Immaterial index. |
| + VerifySuccessfulMerge(audio_buffers, video_buffers, text_map, expected, true); |
| +} |
| + |
| +TEST_F(StreamParserTest, MergeBufferQueues_NonDecreasingNoCrossMediaDuplicate) { |
| + StreamParser::BufferQueue audio_buffers, video_buffers; |
| + StreamParser::TextBufferQueueMap text_map; |
| + |
| + std::string expected = "A100 A100 A100 V101 V101 V101 A102 V103 V103"; |
| + int audio_timestamps[] = { 100, 100, 100, 102, kEnd }; |
| + GenerateBuffers(&audio_buffers, audio_timestamps, StreamParserBuffer::kAudio); |
| + int video_timestamps[] = { 101, 101, 101, 103, 103, kEnd }; |
| + GenerateBuffers(&video_buffers, video_timestamps, StreamParserBuffer::kVideo); |
| + VerifySuccessfulMerge(audio_buffers, video_buffers, text_map, expected, true); |
| +} |
| + |
| +TEST_F(StreamParserTest, MergeBufferQueues_CrossStreamDuplicates) { |
| + // Interface keeps the choice undefined of which stream's buffer wins the |
| + // selection when timestamps are tied. Verify at least the right number of |
| + // each kind of buffer results, and that buffers are in nondecreasing order. |
| + StreamParser::BufferQueue audio_buffers, video_buffers, text_buffers; |
| + StreamParser::TextBufferQueueMap text_map; |
| + |
| + std::string expected = "100 100 100 100 100 100 102 102 102 102 102 102 102"; |
| + int audio_timestamps[] = { 100, 100, 100, 102, kEnd }; |
| + GenerateBuffers(&audio_buffers, audio_timestamps, StreamParserBuffer::kAudio); |
| + int video_timestamps[] = { 100, 100, 102, 102, 102, kEnd }; |
| + GenerateBuffers(&video_buffers, video_timestamps, StreamParserBuffer::kVideo); |
| + int text_timestamps[] = { 100, 102, 102, 102, kEnd }; |
| + GenerateTextBuffers(&text_buffers, text_timestamps, 1); |
| + text_map.insert(std::make_pair(50, text_buffers)); // Immaterial index. |
| + VerifySuccessfulMerge(audio_buffers, video_buffers, text_map, expected, |
| + false); |
| +} |
| + |
| +TEST_F(StreamParserTest, MergeBufferQueues_InvalidDecreasingSingleStream) { |
| + StreamParser::BufferQueue audio_buffers, video_buffers; |
| + StreamParser::TextBufferQueueMap text_map; |
| + |
| + int audio_timestamps[] = { 101, 102, 100, 103, kEnd }; |
| + GenerateBuffers(&audio_buffers, audio_timestamps, StreamParserBuffer::kAudio); |
| + |
| + StreamParser::BufferQueue merged_buffers; |
| + EXPECT_FALSE(StreamParser::MergeBufferQueues(audio_buffers, video_buffers, |
| + text_map, &merged_buffers)); |
| +} |
| + |
| +TEST_F(StreamParserTest, MergeBufferQueues_InvalidDecreasingMultipleStreams) { |
| + StreamParser::BufferQueue audio_buffers, video_buffers; |
| + StreamParser::TextBufferQueueMap text_map; |
| + |
| + int audio_timestamps[] = { 101, 102, 100, 103, kEnd }; |
| + GenerateBuffers(&audio_buffers, audio_timestamps, StreamParserBuffer::kAudio); |
| + int video_timestamps[] = { 104, 100, kEnd }; |
| + GenerateBuffers(&video_buffers, video_timestamps, StreamParserBuffer::kVideo); |
| + |
| + StreamParser::BufferQueue merged_buffers; |
| + EXPECT_FALSE(StreamParser::MergeBufferQueues(audio_buffers, video_buffers, |
| + text_map, &merged_buffers)); |
| +} |
| + |
| +TEST_F(StreamParserTest, MergeBufferQueues_ValidAppendToExistingMerge) { |
| + StreamParser::BufferQueue audio_buffers, video_buffers, text_1, text_2; |
| + StreamParser::TextBufferQueueMap text_map; |
| + |
| + std::string expected = "A100 V101 T1:102 V103 T2:104 A105 V106 T1:107"; |
| + int audio_timestamps[] = { 100, 105, kEnd }; |
| + GenerateBuffers(&audio_buffers, audio_timestamps, StreamParserBuffer::kAudio); |
| + int video_timestamps[] = { 101, 103, 106, kEnd }; |
| + GenerateBuffers(&video_buffers, video_timestamps, StreamParserBuffer::kVideo); |
| + int text_timestamps_1[] = { 102, 107, kEnd }; |
| + GenerateTextBuffers(&text_1, text_timestamps_1, 1); |
| + int text_timestamps_2[] = { 104, kEnd }; |
| + GenerateTextBuffers(&text_2, text_timestamps_2, 2); |
| + text_map.insert(std::make_pair(50, text_1)); // Immaterial index. |
| + text_map.insert(std::make_pair(51, text_2)); // Immaterial index. |
| + StreamParser::BufferQueue merged_buffers; |
| + VerifySuccessfulMerge(audio_buffers, video_buffers, text_map, expected, true, |
| + &merged_buffers); |
| + |
| + audio_buffers.clear(); |
| + video_buffers.clear(); |
| + text_1.clear(); |
| + text_2.clear(); |
| + text_map.clear(); |
| + |
| + expected = "A100 V101 T1:102 V103 T2:104 A105 V106 T1:107 " |
| + "A107 V111 T1:112 V113 T2:114 A115 V116 T1:117"; |
| + int more_audio_timestamps[] = { 107, 115, kEnd }; |
| + GenerateBuffers(&audio_buffers, more_audio_timestamps, |
| + StreamParserBuffer::kAudio); |
| + int more_video_timestamps[] = { 111, 113, 116, kEnd }; |
| + GenerateBuffers(&video_buffers, more_video_timestamps, |
| + StreamParserBuffer::kVideo); |
| + int more_text_timestamps_1[] = { 112, 117, kEnd }; |
| + GenerateTextBuffers(&text_1, more_text_timestamps_1, 1); |
| + int more_text_timestamps_2[] = { 114, kEnd }; |
| + GenerateTextBuffers(&text_2, more_text_timestamps_2, 2); |
| + text_map.insert(std::make_pair(50, text_1)); // Immaterial index. |
| + text_map.insert(std::make_pair(51, text_2)); // Immaterial index. |
| + VerifySuccessfulMerge(audio_buffers, video_buffers, text_map, expected, true, |
| + &merged_buffers); |
| +} |
| + |
| +TEST_F(StreamParserTest, MergeBufferQueues_InvalidAppendToExistingMerge) { |
| + StreamParser::BufferQueue audio_buffers, video_buffers, text_1, text_2; |
| + StreamParser::TextBufferQueueMap text_map; |
| + |
| + std::string expected = "A100 V101 T1:102 V103 T2:104 A105 V106 T1:107"; |
| + int audio_timestamps[] = { 100, 105, kEnd }; |
| + GenerateBuffers(&audio_buffers, audio_timestamps, StreamParserBuffer::kAudio); |
| + int video_timestamps[] = { 101, 103, 106, kEnd }; |
| + GenerateBuffers(&video_buffers, video_timestamps, StreamParserBuffer::kVideo); |
| + int text_timestamps_1[] = { 102, 107, kEnd }; |
| + GenerateTextBuffers(&text_1, text_timestamps_1, 1); |
| + int text_timestamps_2[] = { 104, kEnd }; |
| + GenerateTextBuffers(&text_2, text_timestamps_2, 2); |
| + text_map.insert(std::make_pair(50, text_1)); // Immaterial index. |
| + text_map.insert(std::make_pair(51, text_2)); // Immaterial index. |
| + StreamParser::BufferQueue merged_buffers; |
| + VerifySuccessfulMerge(audio_buffers, video_buffers, text_map, expected, true, |
| + &merged_buffers); |
| + |
| + audio_buffers.clear(); |
| + video_buffers.clear(); |
| + text_1.clear(); |
| + text_2.clear(); |
| + text_map.clear(); |
| + |
| + // Appending empty buffers to pre-existing merge result should succeed and not |
| + // change the existing result. |
| + VerifySuccessfulMerge(audio_buffers, video_buffers, text_map, expected, true, |
| + &merged_buffers); |
| + |
| + // But appending something with a lower timestamp than the last timestamp |
| + // in the pre-existing merge result should fail. |
| + int more_audio_timestamps[] = { 106, kEnd }; |
| + GenerateBuffers(&audio_buffers, more_audio_timestamps, |
| + StreamParserBuffer::kAudio); |
| + EXPECT_FALSE(StreamParser::MergeBufferQueues(audio_buffers, video_buffers, |
| + text_map, &merged_buffers)); |
| +} |
| + |
| +} // namespace media |
| + |