OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2014 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "base/basictypes.h" | |
6 #include "media/base/stream_parser.h" | |
7 #include "media/base/stream_parser_buffer.h" | |
8 #include "testing/gtest/include/gtest/gtest.h" | |
9 | |
10 namespace media { | |
11 | |
12 const int kEnd = -1; | |
13 const uint8 kFakeData[] = { 0xFF }; | |
14 | |
15 bool IsAudio(scoped_refptr<StreamParserBuffer> buffer) { | |
xhwang
2014/01/29 08:04:50
should these functions be static?
wolenetz
2014/02/05 02:49:53
Done.
| |
16 if (buffer->type() == StreamParserBuffer::kAudio) | |
17 return true; | |
18 return false; | |
xhwang
2014/01/29 08:04:50
return buffer->type() == StreamParserBuffer::kAudi
wolenetz
2014/02/05 02:49:53
Done.
| |
19 } | |
20 | |
21 bool IsVideo(scoped_refptr<StreamParserBuffer> buffer) { | |
22 if (buffer->type() == StreamParserBuffer::kVideo) | |
23 return true; | |
24 return false; | |
xhwang
2014/01/29 08:04:50
ditto
wolenetz
2014/02/05 02:49:53
Done.
| |
25 } | |
26 | |
27 bool IsText(scoped_refptr<StreamParserBuffer> buffer) { | |
28 if (buffer->type() == StreamParserBuffer::kText) | |
29 return true; | |
30 return false; | |
xhwang
2014/01/29 08:04:50
ditto
wolenetz
2014/02/05 02:49:53
Done.
| |
31 } | |
32 | |
33 class StreamParserTest : public testing::Test { | |
34 protected: | |
35 StreamParserTest() {} | |
36 | |
xhwang
2014/01/29 08:04:50
remove extra empty line
wolenetz
2014/02/05 02:49:53
Done.
| |
37 | |
38 void GenerateBuffers(StreamParser::BufferQueue* queue, | |
xhwang
2014/01/29 08:04:50
add doc about what this function do
wolenetz
2014/02/05 02:49:53
Done, and also made it a static nonmember.
| |
39 int decode_timestamps[], | |
40 StreamParserBuffer::Type type, | |
41 int text_track_number) { | |
xhwang
2014/01/29 08:04:50
reorder parameters: we usually put input before ou
wolenetz
2014/02/05 02:49:53
Done.
| |
42 DCHECK(queue); | |
43 for (int i = 0; decode_timestamps[i] != kEnd; i++) { | |
xhwang
2014/01/29 08:04:50
nit: ++i
wolenetz
2014/02/05 02:49:53
Done.
| |
44 scoped_refptr<StreamParserBuffer> buffer = | |
45 StreamParserBuffer::CopyFrom(kFakeData, sizeof(kFakeData), | |
46 true, type); | |
47 if (type == StreamParserBuffer::kText) | |
48 buffer->set_text_track_number(text_track_number); | |
49 buffer->SetDecodeTimestamp( | |
50 base::TimeDelta::FromMicroseconds(decode_timestamps[i])); | |
51 queue->push_back(buffer); | |
52 } | |
53 } | |
54 | |
55 void GenerateBuffers(StreamParser::BufferQueue* queue, | |
xhwang
2014/01/29 08:04:50
Usually we don't like function overloading. How ab
wolenetz
2014/02/05 02:49:53
Done, also saving the |queue| parameter by using w
| |
56 int decode_timestamps[], | |
57 StreamParserBuffer::Type type) { | |
58 DCHECK_NE(type, StreamParserBuffer::kText); // Needs a text track number. | |
59 GenerateBuffers(queue, decode_timestamps, type, -1); | |
60 } | |
61 | |
62 void GenerateTextBuffers(StreamParser::BufferQueue* queue, | |
63 int decode_timestamps[], | |
64 int text_track_number) { | |
65 GenerateBuffers(queue, decode_timestamps, StreamParserBuffer::kText, | |
66 text_track_number); | |
67 } | |
68 | |
69 std::string BufferQueueToString(const StreamParser::BufferQueue* queue, | |
xhwang
2014/01/29 08:04:50
pass as const-ref?
xhwang
2014/01/29 08:04:50
We use StreamParser::BufferQueue a lot in this tes
xhwang
2014/01/29 08:04:50
doc about the output string format
wolenetz
2014/02/05 02:49:53
Changed to just use well-defined member |merged_bu
wolenetz
2014/02/05 02:49:53
Done and ditto for TextBufferQueueMap.
wolenetz
2014/02/05 02:49:53
Done.
| |
70 bool include_type_and_text_track) { | |
71 std::stringstream results_stream; | |
xhwang
2014/01/29 08:04:50
add #include <sstream> for this
wolenetz
2014/02/05 02:49:53
Done.
| |
72 for (StreamParser::BufferQueue::const_iterator itr = queue->begin(); | |
73 itr != queue->end(); | |
74 ++itr) { | |
75 if (itr != queue->begin()) | |
76 results_stream << " "; | |
77 scoped_refptr<StreamParserBuffer> buffer = *itr; | |
xhwang
2014/01/29 08:04:50
nit: you can avoid increment/decrement the ref cou
wolenetz
2014/02/05 02:49:53
Nice :) Done.
| |
78 if (include_type_and_text_track) { | |
79 switch (buffer->type()) { | |
80 case StreamParserBuffer::kAudio: | |
81 results_stream << "A"; | |
82 break; | |
83 case StreamParserBuffer::kVideo: | |
84 results_stream << "V"; | |
85 break; | |
86 case StreamParserBuffer::kText: | |
87 results_stream << "T"; | |
88 results_stream << buffer->text_track_number() << ":"; | |
89 break; | |
90 } | |
91 } | |
92 results_stream << buffer->GetDecodeTimestamp().InMicroseconds(); | |
93 } | |
94 | |
95 return results_stream.str(); | |
96 } | |
97 | |
98 | |
99 void VerifySuccessfulMerge(const StreamParser::BufferQueue& audio_buffers, | |
100 const StreamParser::BufferQueue& video_buffers, | |
101 const StreamParser::TextBufferQueueMap& text_map, | |
102 const std::string expected, | |
xhwang
2014/01/29 08:04:50
pass by const-ref
wolenetz
2014/02/05 02:49:53
Changed to just use well-defined members |audio_bu
| |
103 bool verify_type_and_text_track_sequence, | |
104 StreamParser::BufferQueue* merged_buffers) { | |
xhwang
2014/01/29 08:04:50
Add a doc. It's not obvious that |merged_buffers|
wolenetz
2014/02/05 02:49:53
Done.
| |
105 // |merged_buffers| may already have some buffers. Count them by type for | |
106 // later inclusion in verification. | |
107 size_t original_audio_in_merged = | |
108 static_cast<size_t>(count_if(merged_buffers->begin(), | |
xhwang
2014/01/29 08:04:50
s/count_if/std::count_if
add #include <algorithm>
wolenetz
2014/02/05 02:49:53
Done.
| |
109 merged_buffers->end(), IsAudio)); | |
110 size_t original_video_in_merged = | |
111 static_cast<size_t>(count_if(merged_buffers->begin(), | |
112 merged_buffers->end(), IsVideo)); | |
113 size_t original_text_in_merged = | |
114 static_cast<size_t>(count_if(merged_buffers->begin(), | |
115 merged_buffers->end(), IsText)); | |
116 | |
117 EXPECT_TRUE(StreamParser::MergeBufferQueues(audio_buffers, video_buffers, | |
118 text_map, merged_buffers)); | |
119 | |
120 // Verify resulting contents of |merged_buffers| matches |expected|. | |
121 EXPECT_EQ(expected, | |
122 BufferQueueToString(merged_buffers, | |
123 verify_type_and_text_track_sequence)); | |
124 | |
125 // If not verifying the sequence of types, at least still verify that the | |
126 // correct number of each type of buffer is in the merge result. | |
127 size_t audio_in_merged = | |
128 static_cast<size_t>(count_if(merged_buffers->begin(), | |
129 merged_buffers->end(), IsAudio)); | |
130 size_t video_in_merged = | |
131 static_cast<size_t>(count_if(merged_buffers->begin(), | |
132 merged_buffers->end(), IsVideo)); | |
133 size_t text_in_merged = | |
134 static_cast<size_t>(count_if(merged_buffers->begin(), | |
135 merged_buffers->end(), IsText)); | |
136 EXPECT_GE(audio_in_merged, original_audio_in_merged); | |
137 EXPECT_GE(video_in_merged, original_video_in_merged); | |
138 EXPECT_GE(text_in_merged, original_text_in_merged); | |
139 | |
140 EXPECT_EQ(audio_buffers.size(), audio_in_merged - original_audio_in_merged); | |
141 EXPECT_EQ(video_buffers.size(), video_in_merged - original_video_in_merged); | |
142 | |
143 size_t expected_text_buffer_count = 0; | |
144 for (StreamParser::TextBufferQueueMap::const_iterator itr = | |
145 text_map.begin(); | |
146 itr != text_map.end(); | |
147 ++itr) { | |
148 expected_text_buffer_count += itr->second.size(); | |
149 } | |
150 EXPECT_EQ(expected_text_buffer_count, | |
151 text_in_merged - original_text_in_merged); | |
152 } | |
153 | |
154 void VerifySuccessfulMerge(const StreamParser::BufferQueue& audio_buffers, | |
155 const StreamParser::BufferQueue& video_buffers, | |
156 const StreamParser::TextBufferQueueMap& text_map, | |
157 const std::string expected, | |
158 bool verify_type_and_text_track_sequence) { | |
159 StreamParser::BufferQueue merged_buffers; | |
160 VerifySuccessfulMerge(audio_buffers, video_buffers, text_map, expected, | |
161 verify_type_and_text_track_sequence, &merged_buffers); | |
162 } | |
163 | |
164 private: | |
165 DISALLOW_COPY_AND_ASSIGN(StreamParserTest); | |
166 }; | |
167 | |
168 TEST_F(StreamParserTest, MergeBufferQueues_AllEmpty) { | |
169 StreamParser::BufferQueue audio_buffers, video_buffers; | |
170 StreamParser::TextBufferQueueMap text_map; | |
xhwang
2014/01/29 08:04:50
these are used in all tests, may be you can just d
wolenetz
2014/02/05 02:49:53
Done. Saves spending a lot of lines: thank you!
| |
171 | |
172 std::string expected = ""; | |
173 VerifySuccessfulMerge(audio_buffers, video_buffers, text_map, expected, true); | |
174 } | |
175 | |
176 TEST_F(StreamParserTest, MergeBufferQueues_SingleAudioBuffer) { | |
177 StreamParser::BufferQueue audio_buffers, video_buffers; | |
178 StreamParser::TextBufferQueueMap text_map; | |
179 | |
180 std::string expected = "A100"; | |
181 int audio_timestamps[] = { 100, kEnd }; | |
182 GenerateBuffers(&audio_buffers, audio_timestamps, StreamParserBuffer::kAudio); | |
183 VerifySuccessfulMerge(audio_buffers, video_buffers, text_map, expected, true); | |
184 } | |
185 | |
186 TEST_F(StreamParserTest, MergeBufferQueues_SingleVideoBuffer) { | |
187 StreamParser::BufferQueue audio_buffers, video_buffers; | |
188 StreamParser::TextBufferQueueMap text_map; | |
189 | |
190 std::string expected = "V100"; | |
191 int video_timestamps[] = { 100, kEnd }; | |
192 GenerateBuffers(&video_buffers, video_timestamps, StreamParserBuffer::kVideo); | |
193 VerifySuccessfulMerge(audio_buffers, video_buffers, text_map, expected, true); | |
194 } | |
195 | |
196 TEST_F(StreamParserTest, MergeBufferQueues_SingleTextBuffer) { | |
197 StreamParser::BufferQueue audio_buffers, video_buffers, text_buffers; | |
198 StreamParser::TextBufferQueueMap text_map; | |
199 | |
200 std::string expected = "T12:100"; | |
201 int text_timestamps[] = { 100, kEnd }; | |
202 GenerateTextBuffers(&text_buffers, text_timestamps, 12); | |
203 text_map.insert(std::make_pair(3141593, text_buffers)); // Immaterial index. | |
204 VerifySuccessfulMerge(audio_buffers, video_buffers, text_map, expected, true); | |
205 } | |
206 | |
207 TEST_F(StreamParserTest, MergeBufferQueues_OverlappingAudioVideo) { | |
208 StreamParser::BufferQueue audio_buffers, video_buffers; | |
209 StreamParser::TextBufferQueueMap text_map; | |
210 | |
211 std::string expected = "A100 V101 V102 A103 A104 V105"; | |
212 int audio_timestamps[] = { 100, 103, 104, kEnd }; | |
213 GenerateBuffers(&audio_buffers, audio_timestamps, StreamParserBuffer::kAudio); | |
214 int video_timestamps[] = { 101, 102, 105, kEnd }; | |
215 GenerateBuffers(&video_buffers, video_timestamps, StreamParserBuffer::kVideo); | |
216 VerifySuccessfulMerge(audio_buffers, video_buffers, text_map, expected, true); | |
217 } | |
218 | |
219 TEST_F(StreamParserTest, MergeBufferQueues_OverlappingMultipleText) { | |
220 StreamParser::BufferQueue audio_buffers, video_buffers, text_1, text_2; | |
221 StreamParser::TextBufferQueueMap text_map; | |
222 | |
223 std::string expected = "T1:100 T1:101 T2:103 T1:104 T2:105 T2:106"; | |
xhwang
2014/01/29 08:04:50
We are using ":" for text because it has text trac
wolenetz
2014/02/05 02:49:53
Done (T1:100 A:100).
| |
224 int text_timestamps_1[] = { 100, 101, 104, kEnd }; | |
225 GenerateTextBuffers(&text_1, text_timestamps_1, 1); | |
226 int text_timestamps_2[] = { 103, 105, 106, kEnd }; | |
227 GenerateTextBuffers(&text_2, text_timestamps_2, 2); | |
228 text_map.insert(std::make_pair(50, text_1)); // Immaterial index. | |
229 text_map.insert(std::make_pair(51, text_2)); // Immaterial index. | |
230 VerifySuccessfulMerge(audio_buffers, video_buffers, text_map, expected, true); | |
231 } | |
232 | |
233 TEST_F(StreamParserTest, MergeBufferQueues_OverlappingAudioVideoText) { | |
234 StreamParser::BufferQueue audio_buffers, video_buffers, text_1, text_2; | |
235 StreamParser::TextBufferQueueMap text_map; | |
236 | |
237 std::string expected = "A100 V101 T1:102 V103 T2:104 A105 V106 T1:107"; | |
238 int audio_timestamps[] = { 100, 105, kEnd }; | |
239 GenerateBuffers(&audio_buffers, audio_timestamps, StreamParserBuffer::kAudio); | |
240 int video_timestamps[] = { 101, 103, 106, kEnd }; | |
241 GenerateBuffers(&video_buffers, video_timestamps, StreamParserBuffer::kVideo); | |
242 int text_timestamps_1[] = { 102, 107, kEnd }; | |
243 GenerateTextBuffers(&text_1, text_timestamps_1, 1); | |
244 int text_timestamps_2[] = { 104, kEnd }; | |
245 GenerateTextBuffers(&text_2, text_timestamps_2, 2); | |
246 text_map.insert(std::make_pair(50, text_1)); // Immaterial index. | |
247 text_map.insert(std::make_pair(51, text_2)); // Immaterial index. | |
248 VerifySuccessfulMerge(audio_buffers, video_buffers, text_map, expected, true); | |
249 } | |
250 | |
251 TEST_F(StreamParserTest, MergeBufferQueues_NonDecreasingNoCrossMediaDuplicate) { | |
252 StreamParser::BufferQueue audio_buffers, video_buffers; | |
253 StreamParser::TextBufferQueueMap text_map; | |
254 | |
255 std::string expected = "A100 A100 A100 V101 V101 V101 A102 V103 V103"; | |
256 int audio_timestamps[] = { 100, 100, 100, 102, kEnd }; | |
257 GenerateBuffers(&audio_buffers, audio_timestamps, StreamParserBuffer::kAudio); | |
258 int video_timestamps[] = { 101, 101, 101, 103, 103, kEnd }; | |
259 GenerateBuffers(&video_buffers, video_timestamps, StreamParserBuffer::kVideo); | |
260 VerifySuccessfulMerge(audio_buffers, video_buffers, text_map, expected, true); | |
261 } | |
262 | |
263 TEST_F(StreamParserTest, MergeBufferQueues_CrossStreamDuplicates) { | |
264 // Interface keeps the choice undefined of which stream's buffer wins the | |
265 // selection when timestamps are tied. Verify at least the right number of | |
266 // each kind of buffer results, and that buffers are in nondecreasing order. | |
267 StreamParser::BufferQueue audio_buffers, video_buffers, text_buffers; | |
268 StreamParser::TextBufferQueueMap text_map; | |
269 | |
270 std::string expected = "100 100 100 100 100 100 102 102 102 102 102 102 102"; | |
271 int audio_timestamps[] = { 100, 100, 100, 102, kEnd }; | |
272 GenerateBuffers(&audio_buffers, audio_timestamps, StreamParserBuffer::kAudio); | |
273 int video_timestamps[] = { 100, 100, 102, 102, 102, kEnd }; | |
274 GenerateBuffers(&video_buffers, video_timestamps, StreamParserBuffer::kVideo); | |
275 int text_timestamps[] = { 100, 102, 102, 102, kEnd }; | |
276 GenerateTextBuffers(&text_buffers, text_timestamps, 1); | |
277 text_map.insert(std::make_pair(50, text_buffers)); // Immaterial index. | |
278 VerifySuccessfulMerge(audio_buffers, video_buffers, text_map, expected, | |
279 false); | |
280 } | |
281 | |
282 TEST_F(StreamParserTest, MergeBufferQueues_InvalidDecreasingSingleStream) { | |
283 StreamParser::BufferQueue audio_buffers, video_buffers; | |
284 StreamParser::TextBufferQueueMap text_map; | |
285 | |
286 int audio_timestamps[] = { 101, 102, 100, 103, kEnd }; | |
287 GenerateBuffers(&audio_buffers, audio_timestamps, StreamParserBuffer::kAudio); | |
288 | |
289 StreamParser::BufferQueue merged_buffers; | |
290 EXPECT_FALSE(StreamParser::MergeBufferQueues(audio_buffers, video_buffers, | |
291 text_map, &merged_buffers)); | |
292 } | |
293 | |
294 TEST_F(StreamParserTest, MergeBufferQueues_InvalidDecreasingMultipleStreams) { | |
295 StreamParser::BufferQueue audio_buffers, video_buffers; | |
296 StreamParser::TextBufferQueueMap text_map; | |
297 | |
298 int audio_timestamps[] = { 101, 102, 100, 103, kEnd }; | |
299 GenerateBuffers(&audio_buffers, audio_timestamps, StreamParserBuffer::kAudio); | |
300 int video_timestamps[] = { 104, 100, kEnd }; | |
301 GenerateBuffers(&video_buffers, video_timestamps, StreamParserBuffer::kVideo); | |
302 | |
303 StreamParser::BufferQueue merged_buffers; | |
304 EXPECT_FALSE(StreamParser::MergeBufferQueues(audio_buffers, video_buffers, | |
305 text_map, &merged_buffers)); | |
306 } | |
307 | |
308 TEST_F(StreamParserTest, MergeBufferQueues_ValidAppendToExistingMerge) { | |
309 StreamParser::BufferQueue audio_buffers, video_buffers, text_1, text_2; | |
310 StreamParser::TextBufferQueueMap text_map; | |
311 | |
312 std::string expected = "A100 V101 T1:102 V103 T2:104 A105 V106 T1:107"; | |
313 int audio_timestamps[] = { 100, 105, kEnd }; | |
314 GenerateBuffers(&audio_buffers, audio_timestamps, StreamParserBuffer::kAudio); | |
315 int video_timestamps[] = { 101, 103, 106, kEnd }; | |
316 GenerateBuffers(&video_buffers, video_timestamps, StreamParserBuffer::kVideo); | |
317 int text_timestamps_1[] = { 102, 107, kEnd }; | |
318 GenerateTextBuffers(&text_1, text_timestamps_1, 1); | |
319 int text_timestamps_2[] = { 104, kEnd }; | |
320 GenerateTextBuffers(&text_2, text_timestamps_2, 2); | |
321 text_map.insert(std::make_pair(50, text_1)); // Immaterial index. | |
322 text_map.insert(std::make_pair(51, text_2)); // Immaterial index. | |
323 StreamParser::BufferQueue merged_buffers; | |
324 VerifySuccessfulMerge(audio_buffers, video_buffers, text_map, expected, true, | |
325 &merged_buffers); | |
326 | |
327 audio_buffers.clear(); | |
328 video_buffers.clear(); | |
329 text_1.clear(); | |
330 text_2.clear(); | |
331 text_map.clear(); | |
332 | |
333 expected = "A100 V101 T1:102 V103 T2:104 A105 V106 T1:107 " | |
334 "A107 V111 T1:112 V113 T2:114 A115 V116 T1:117"; | |
335 int more_audio_timestamps[] = { 107, 115, kEnd }; | |
336 GenerateBuffers(&audio_buffers, more_audio_timestamps, | |
337 StreamParserBuffer::kAudio); | |
338 int more_video_timestamps[] = { 111, 113, 116, kEnd }; | |
339 GenerateBuffers(&video_buffers, more_video_timestamps, | |
340 StreamParserBuffer::kVideo); | |
341 int more_text_timestamps_1[] = { 112, 117, kEnd }; | |
342 GenerateTextBuffers(&text_1, more_text_timestamps_1, 1); | |
343 int more_text_timestamps_2[] = { 114, kEnd }; | |
344 GenerateTextBuffers(&text_2, more_text_timestamps_2, 2); | |
345 text_map.insert(std::make_pair(50, text_1)); // Immaterial index. | |
346 text_map.insert(std::make_pair(51, text_2)); // Immaterial index. | |
347 VerifySuccessfulMerge(audio_buffers, video_buffers, text_map, expected, true, | |
348 &merged_buffers); | |
349 } | |
350 | |
351 TEST_F(StreamParserTest, MergeBufferQueues_InvalidAppendToExistingMerge) { | |
352 StreamParser::BufferQueue audio_buffers, video_buffers, text_1, text_2; | |
353 StreamParser::TextBufferQueueMap text_map; | |
354 | |
355 std::string expected = "A100 V101 T1:102 V103 T2:104 A105 V106 T1:107"; | |
356 int audio_timestamps[] = { 100, 105, kEnd }; | |
357 GenerateBuffers(&audio_buffers, audio_timestamps, StreamParserBuffer::kAudio); | |
358 int video_timestamps[] = { 101, 103, 106, kEnd }; | |
359 GenerateBuffers(&video_buffers, video_timestamps, StreamParserBuffer::kVideo); | |
360 int text_timestamps_1[] = { 102, 107, kEnd }; | |
361 GenerateTextBuffers(&text_1, text_timestamps_1, 1); | |
362 int text_timestamps_2[] = { 104, kEnd }; | |
363 GenerateTextBuffers(&text_2, text_timestamps_2, 2); | |
364 text_map.insert(std::make_pair(50, text_1)); // Immaterial index. | |
365 text_map.insert(std::make_pair(51, text_2)); // Immaterial index. | |
366 StreamParser::BufferQueue merged_buffers; | |
367 VerifySuccessfulMerge(audio_buffers, video_buffers, text_map, expected, true, | |
368 &merged_buffers); | |
369 | |
370 audio_buffers.clear(); | |
371 video_buffers.clear(); | |
372 text_1.clear(); | |
373 text_2.clear(); | |
374 text_map.clear(); | |
375 | |
376 // Appending empty buffers to pre-existing merge result should succeed and not | |
377 // change the existing result. | |
378 VerifySuccessfulMerge(audio_buffers, video_buffers, text_map, expected, true, | |
379 &merged_buffers); | |
380 | |
381 // But appending something with a lower timestamp than the last timestamp | |
382 // in the pre-existing merge result should fail. | |
383 int more_audio_timestamps[] = { 106, kEnd }; | |
384 GenerateBuffers(&audio_buffers, more_audio_timestamps, | |
385 StreamParserBuffer::kAudio); | |
386 EXPECT_FALSE(StreamParser::MergeBufferQueues(audio_buffers, video_buffers, | |
387 text_map, &merged_buffers)); | |
388 } | |
389 | |
390 } // namespace media | |
391 | |
OLD | NEW |