Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(43)

Unified Diff: media/base/stream_parser_unittest.cc

Issue 149153002: MSE: Add StreamParser buffer remuxing utility and tests (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Rebased and addressed comments from PS2 Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: media/base/stream_parser_unittest.cc
diff --git a/media/base/stream_parser_unittest.cc b/media/base/stream_parser_unittest.cc
new file mode 100644
index 0000000000000000000000000000000000000000..dfe028cb374b91d99c410679c772f4332e8d5df0
--- /dev/null
+++ b/media/base/stream_parser_unittest.cc
@@ -0,0 +1,376 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <algorithm>
+#include <sstream>
+
+#include "base/basictypes.h"
+#include "media/base/stream_parser.h"
+#include "media/base/stream_parser_buffer.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+typedef StreamParser::BufferQueue BufferQueue;
+typedef StreamParser::TextBufferQueueMap TextBufferQueueMap;
+
+const int kEnd = -1;
+const uint8 kFakeData[] = { 0xFF };
+
+static bool IsAudio(scoped_refptr<StreamParserBuffer> buffer) {
+ return buffer->type() == DemuxerStream::AUDIO;
+}
+
+static bool IsVideo(scoped_refptr<StreamParserBuffer> buffer) {
+ return buffer->type() == DemuxerStream::VIDEO;
+}
+
+static bool IsText(scoped_refptr<StreamParserBuffer> buffer) {
+ return buffer->type() == DemuxerStream::TEXT;
+}
+
+// Creates and appends a sequence of StreamParserBuffers to the provided
+// |queue|. |decode_timestamps| determines the number of appended buffers and
+// their sequence of decode timestamps; a |kEnd| timestamp indicates the
+// end of the sequence and no buffer is appended for it. Each new buffer's
+// type will be |type|, and if text, its text track number will be set to
+// |text_track_number|.
+static void GenerateBuffers(const int* decode_timestamps,
+ StreamParserBuffer::Type type,
+ int text_track_number,
+ BufferQueue* queue) {
+ DCHECK(decode_timestamps);
+ DCHECK(queue);
+ DCHECK_NE(type, DemuxerStream::UNKNOWN);
+ DCHECK_LT(type, DemuxerStream::NUM_TYPES);
+ for (int i = 0; decode_timestamps[i] != kEnd; ++i) {
+ scoped_refptr<StreamParserBuffer> buffer =
+ StreamParserBuffer::CopyFrom(kFakeData, sizeof(kFakeData),
+ true, type);
+ if (type == DemuxerStream::TEXT)
+ buffer->set_text_track_number(text_track_number);
+ buffer->SetDecodeTimestamp(
+ base::TimeDelta::FromMicroseconds(decode_timestamps[i]));
+ queue->push_back(buffer);
+ }
+}
+
+class StreamParserTest : public testing::Test {
+ protected:
+ StreamParserTest() {}
+
+ // Returns the number of buffers in |merged_buffers_| for which |predicate|
+ // returns true.
+ size_t CountMatchingMergedBuffers(
+ bool (*predicate)(scoped_refptr<StreamParserBuffer> buffer)) {
+ return static_cast<size_t>(count_if(merged_buffers_.begin(),
+ merged_buffers_.end(),
+ predicate));
+ }
+
+ // Appends test audio buffers in the sequence described by |decode_timestamps|
+ // to |audio_buffers_|. See GenerateBuffers() for |decode_timestamps| format.
+ void GenerateAudioBuffers(const int* decode_timestamps) {
+ GenerateBuffers(decode_timestamps, DemuxerStream::AUDIO, -1,
+ &audio_buffers_);
+ }
+
+ // Appends test video buffers in the sequence described by |decode_timestamps|
+ // to |video_buffers_|. See GenerateBuffers() for |decode_timestamps| format.
+ void GenerateVideoBuffers(const int* decode_timestamps) {
+ GenerateBuffers(decode_timestamps, DemuxerStream::VIDEO, -1,
+ &video_buffers_);
+ }
+
+ // Current tests only need up to two distinct text BufferQueues. This helper
+ // conditionally appends buffers to the underlying |text_buffers_a_| and
+ // |text_buffers_b_| and conditionally inserts these BufferQueues into
+ // |text_map_| keyed by the respective |text_track_number_{a,b}|. If
+ // |decode_timestamps_{a,b}| is NULL, then the corresponding BufferQueue is
+ // neither appended to nor inserted into |text_map_| (though it may previously
+ // have been inserted under either the same or different key). Note that
+ // key collision on map insertion does not replace the previous value.
+ void GenerateTextBuffers(const int* decode_timestamps_a,
+ int text_track_number_a,
+ const int* decode_timestamps_b,
+ int text_track_number_b) {
+ if (decode_timestamps_a) {
+ GenerateBuffers(decode_timestamps_a, DemuxerStream::TEXT,
+ text_track_number_a, &text_buffers_a_);
+ text_map_.insert(std::make_pair(text_track_number_a, text_buffers_a_));
+ }
+
+ if (decode_timestamps_b) {
+ GenerateBuffers(decode_timestamps_b, DemuxerStream::TEXT,
+ text_track_number_b, &text_buffers_b_);
+ text_map_.insert(std::make_pair(text_track_number_b, text_buffers_b_));
+ }
+ }
+
+ // Returns a string that describes the sequence of buffers in
+ // |merged_buffers_|. The string is a concatenation of space-delimited buffer
+ // descriptors in the same sequence as |merged_buffers_|. Each descriptor is
+ // the concatenation of
+ // 1) a single character that describes the buffer's type(), e.g. A, V, or T
+ // for audio, video, or text, respectively
+ // 2) if text, the buffer's text_track_number(), otherwise nothing
+ // 3) ":"
+ // 4) the buffer's decode timestamp.
+ // If |include_type_and_text_track| is false, then items 1, 2, and 3 are
+ // not included in descriptors. This is useful when buffers with different
+ // media types but the same decode timestamp are expected, and the exact
+ // sequence of media types for the tying timestamps is not subject to
+ // verification.
+ std::string MergedBufferQueueString(bool include_type_and_text_track) {
+ std::stringstream results_stream;
+ for (BufferQueue::const_iterator itr = merged_buffers_.begin();
+ itr != merged_buffers_.end();
+ ++itr) {
+ if (itr != merged_buffers_.begin())
+ results_stream << " ";
+ const StreamParserBuffer& buffer = *(*itr);
+ if (include_type_and_text_track) {
+ switch (buffer.type()) {
+ case DemuxerStream::AUDIO:
+ results_stream << "A";
+ break;
+ case DemuxerStream::VIDEO:
+ results_stream << "V";
+ break;
+ case DemuxerStream::TEXT:
+ results_stream << "T";
+ results_stream << buffer.text_track_number();
+ break;
+ default:
+ NOTREACHED();
+ }
+ results_stream << ":";
+ }
+ results_stream << buffer.GetDecodeTimestamp().InMicroseconds();
+ }
+
+ return results_stream.str();
+ }
+
+ // Verifies that MergeBufferQueues() of the current |audio_buffers_|,
+ // |video_buffers_|, |text_map_|, and |merged_buffers_| returns true and
+ // results in an updated |merged_buffers_| that matches expectation. The
+ // expectation, specified in |expected|, is compared to the string resulting
+ // from MergedBufferQueueString() (see comments for that method) with
+ // |verify_type_and_text_track_sequence| passed. |merged_buffers_| is appended
+ // to by the merge, and may be setup by the caller to have some pre-existing
+ // buffers; it is both an input and output of this method.
+ // Regardless of |verify_type_and_text_track_sequence|, the marginal number
+ // of buffers of each type (audio, video, text) resulting from the merge is
+ // also verified to match the number of buffers in |audio_buffers_|,
+ // |video_buffers_|, and |text_map_|, respectively.
+ void VerifyMergeSuccess(const std::string& expected,
+ bool verify_type_and_text_track_sequence) {
+ // |merged_buffers| may already have some buffers. Count them by type for
+ // later inclusion in verification.
+ size_t original_audio_in_merged = CountMatchingMergedBuffers(IsAudio);
+ size_t original_video_in_merged = CountMatchingMergedBuffers(IsVideo);
+ size_t original_text_in_merged = CountMatchingMergedBuffers(IsText);
+
+ EXPECT_TRUE(MergeBufferQueues(audio_buffers_, video_buffers_, text_map_,
+ &merged_buffers_));
+
+ // Verify resulting contents of |merged_buffers| matches |expected|.
+ EXPECT_EQ(expected,
+ MergedBufferQueueString(verify_type_and_text_track_sequence));
+
+ // Verify that the correct number of each type of buffer is in the merge
+ // result.
+ size_t audio_in_merged = CountMatchingMergedBuffers(IsAudio);
+ size_t video_in_merged = CountMatchingMergedBuffers(IsVideo);
+ size_t text_in_merged = CountMatchingMergedBuffers(IsText);
+
+ EXPECT_GE(audio_in_merged, original_audio_in_merged);
+ EXPECT_GE(video_in_merged, original_video_in_merged);
+ EXPECT_GE(text_in_merged, original_text_in_merged);
+
+ EXPECT_EQ(audio_buffers_.size(),
+ audio_in_merged - original_audio_in_merged);
+ EXPECT_EQ(video_buffers_.size(),
+ video_in_merged - original_video_in_merged);
+
+ size_t expected_text_buffer_count = 0;
+ for (TextBufferQueueMap::const_iterator itr = text_map_.begin();
+ itr != text_map_.end();
+ ++itr) {
+ expected_text_buffer_count += itr->second.size();
+ }
+ EXPECT_EQ(expected_text_buffer_count,
+ text_in_merged - original_text_in_merged);
+ }
+
+ // Verifies that MergeBufferQueues() of the current |audio_buffers_|,
+ // |video_buffers_|, |text_map_|, and |merged_buffers_| returns false.
+ void VerifyMergeFailure() {
+ EXPECT_FALSE(MergeBufferQueues(audio_buffers_, video_buffers_, text_map_,
+ &merged_buffers_));
+ }
+
+ // Helper to allow tests to clear all the input BufferQueues (except
+ // |merged_buffers_|) and the TextBufferQueueMap that are used in
+ // VerifyMerge{Success/Failure}().
+ void ClearQueuesAndTextMapButKeepAnyMergedBuffers() {
+ audio_buffers_.clear();
+ video_buffers_.clear();
+ text_buffers_a_.clear();
+ text_buffers_b_.clear();
+ text_map_.clear();
+ }
+
+ private:
+ BufferQueue audio_buffers_, video_buffers_, text_buffers_a_, text_buffers_b_;
xhwang 2014/02/06 00:36:32 I don't find a rule against this, but I rarely see
wolenetz 2014/02/06 23:56:03 Done.
+ BufferQueue merged_buffers_;
+ TextBufferQueueMap text_map_;
+
+ DISALLOW_COPY_AND_ASSIGN(StreamParserTest);
+};
+
+TEST_F(StreamParserTest, MergeBufferQueues_AllEmpty) {
+ std::string expected = "";
+ VerifyMergeSuccess(expected, true);
+}
+
+TEST_F(StreamParserTest, MergeBufferQueues_SingleAudioBuffer) {
+ std::string expected = "A:100";
+ int audio_timestamps[] = { 100, kEnd };
+ GenerateAudioBuffers(audio_timestamps);
+ VerifyMergeSuccess(expected, true);
+}
+
+TEST_F(StreamParserTest, MergeBufferQueues_SingleVideoBuffer) {
+ std::string expected = "V:100";
+ int video_timestamps[] = { 100, kEnd };
+ GenerateVideoBuffers(video_timestamps);
+ VerifyMergeSuccess(expected, true);
+}
+
+TEST_F(StreamParserTest, MergeBufferQueues_SingleTextBuffer) {
+ std::string expected = "T12:100";
+ int text_timestamps[] = { 100, kEnd };
+ GenerateTextBuffers(text_timestamps, 12, NULL, -1);
+ VerifyMergeSuccess(expected, true);
+}
+
+TEST_F(StreamParserTest, MergeBufferQueues_OverlappingAudioVideo) {
+ std::string expected = "A:100 V:101 V:102 A:103 A:104 V:105";
+ int audio_timestamps[] = { 100, 103, 104, kEnd };
+ GenerateAudioBuffers(audio_timestamps);
+ int video_timestamps[] = { 101, 102, 105, kEnd };
+ GenerateVideoBuffers(video_timestamps);
+ VerifyMergeSuccess(expected, true);
+}
+
+TEST_F(StreamParserTest, MergeBufferQueues_OverlappingMultipleText) {
+ std::string expected = "T1:100 T1:101 T2:103 T1:104 T2:105 T2:106";
+ int text_timestamps_1[] = { 100, 101, 104, kEnd };
+ int text_timestamps_2[] = { 103, 105, 106, kEnd };
+ GenerateTextBuffers(text_timestamps_1, 1, text_timestamps_2, 2);
+ VerifyMergeSuccess(expected, true);
+}
+
+TEST_F(StreamParserTest, MergeBufferQueues_OverlappingAudioVideoText) {
+ std::string expected = "A:100 V:101 T1:102 V:103 T2:104 A:105 V:106 T1:107";
+ int audio_timestamps[] = { 100, 105, kEnd };
+ GenerateAudioBuffers(audio_timestamps);
+ int video_timestamps[] = { 101, 103, 106, kEnd };
+ GenerateVideoBuffers(video_timestamps);
+ int text_timestamps_1[] = { 102, 107, kEnd };
+ int text_timestamps_2[] = { 104, kEnd };
+ GenerateTextBuffers(text_timestamps_1, 1, text_timestamps_2, 2);
+ VerifyMergeSuccess(expected, true);
+}
+
+TEST_F(StreamParserTest, MergeBufferQueues_NonDecreasingNoCrossMediaDuplicate) {
+ std::string expected = "A:100 A:100 A:100 V:101 V:101 V:101 A:102 V:103 "
+ "V:103";
+ int audio_timestamps[] = { 100, 100, 100, 102, kEnd };
+ GenerateAudioBuffers(audio_timestamps);
+ int video_timestamps[] = { 101, 101, 101, 103, 103, kEnd };
+ GenerateVideoBuffers(video_timestamps);
+ VerifyMergeSuccess(expected, true);
+}
+
+TEST_F(StreamParserTest, MergeBufferQueues_CrossStreamDuplicates) {
+ // Interface keeps the choice undefined of which stream's buffer wins the
+ // selection when timestamps are tied. Verify at least the right number of
+ // each kind of buffer results, and that buffers are in nondecreasing order.
+ std::string expected = "100 100 100 100 100 100 102 102 102 102 102 102 102";
+ int audio_timestamps[] = { 100, 100, 100, 102, kEnd };
+ GenerateAudioBuffers(audio_timestamps);
+ int video_timestamps[] = { 100, 100, 102, 102, 102, kEnd };
+ GenerateVideoBuffers(video_timestamps);
+ int text_timestamps[] = { 100, 102, 102, 102, kEnd };
+ GenerateTextBuffers(text_timestamps, 1, NULL, -1);
+ VerifyMergeSuccess(expected, false);
+}
+
+TEST_F(StreamParserTest, MergeBufferQueues_InvalidDecreasingSingleStream) {
+ int audio_timestamps[] = { 101, 102, 100, 103, kEnd };
+ GenerateAudioBuffers(audio_timestamps);
+ VerifyMergeFailure();
+}
+
+TEST_F(StreamParserTest, MergeBufferQueues_InvalidDecreasingMultipleStreams) {
+ int audio_timestamps[] = { 101, 102, 100, 103, kEnd };
+ GenerateAudioBuffers(audio_timestamps);
+ int video_timestamps[] = { 104, 100, kEnd };
+ GenerateVideoBuffers(video_timestamps);
+ VerifyMergeFailure();
+}
+
+TEST_F(StreamParserTest, MergeBufferQueues_ValidAppendToExistingMerge) {
+ std::string expected = "A:100 V:101 T1:102 V:103 T2:104 A:105 V:106 T1:107";
+ int audio_timestamps[] = { 100, 105, kEnd };
+ GenerateAudioBuffers(audio_timestamps);
+ int video_timestamps[] = { 101, 103, 106, kEnd };
+ GenerateVideoBuffers(video_timestamps);
+ int text_timestamps_1[] = { 102, 107, kEnd };
+ int text_timestamps_2[] = { 104, kEnd };
+ GenerateTextBuffers(text_timestamps_1, 1, text_timestamps_2, 2);
+ VerifyMergeSuccess(expected, true);
+
+ ClearQueuesAndTextMapButKeepAnyMergedBuffers();
+
+ expected = "A:100 V:101 T1:102 V:103 T2:104 A:105 V:106 T1:107 "
+ "A:107 V:111 T1:112 V:113 T2:114 A:115 V:116 T1:117";
+ int more_audio_timestamps[] = { 107, 115, kEnd };
+ GenerateAudioBuffers(more_audio_timestamps);
+ int more_video_timestamps[] = { 111, 113, 116, kEnd };
+ GenerateVideoBuffers(more_video_timestamps);
+ int more_text_timestamps_1[] = { 112, 117, kEnd };
+ int more_text_timestamps_2[] = { 114, kEnd };
+ GenerateTextBuffers(more_text_timestamps_1, 1, more_text_timestamps_2, 2);
+ VerifyMergeSuccess(expected, true);
+}
+
+TEST_F(StreamParserTest, MergeBufferQueues_InvalidAppendToExistingMerge) {
+ std::string expected = "A:100 V:101 T1:102 V:103 T2:104 A:105 V:106 T1:107";
+ int audio_timestamps[] = { 100, 105, kEnd };
+ GenerateAudioBuffers(audio_timestamps);
+ int video_timestamps[] = { 101, 103, 106, kEnd };
+ GenerateVideoBuffers(video_timestamps);
+ int text_timestamps_1[] = { 102, 107, kEnd };
+ int text_timestamps_2[] = { 104, kEnd };
+ GenerateTextBuffers(text_timestamps_1, 1, text_timestamps_2, 2);
+ VerifyMergeSuccess(expected, true);
+
+ // Appending empty buffers to pre-existing merge result should succeed and not
+ // change the existing result.
+ ClearQueuesAndTextMapButKeepAnyMergedBuffers();
+ VerifyMergeSuccess(expected, true);
+
+ // But appending something with a lower timestamp than the last timestamp
+ // in the pre-existing merge result should fail.
+ int more_audio_timestamps[] = { 106, kEnd };
+ GenerateAudioBuffers(more_audio_timestamps);
+ VerifyMergeFailure();
+}
+
+} // namespace media
+

Powered by Google App Engine
This is Rietveld 408576698