Index: media/base/seekable_audio_buffer_unittest.cc |
diff --git a/media/base/seekable_audio_buffer_unittest.cc b/media/base/seekable_audio_buffer_unittest.cc |
new file mode 100644 |
index 0000000000000000000000000000000000000000..a9ba137c81b6740c94c575319d4721d4a3d89c07 |
--- /dev/null |
+++ b/media/base/seekable_audio_buffer_unittest.cc |
@@ -0,0 +1,503 @@ |
+// Copyright 2013 The Chromium Authors. All rights reserved. |
+// Use of this source code is governed by a BSD-style license that can be |
+// found in the LICENSE file. |
+ |
+#include "base/basictypes.h" |
+#include "base/logging.h" |
+#include "base/memory/scoped_ptr.h" |
+#include "base/strings/stringprintf.h" |
+#include "base/time.h" |
+#include "media/base/audio_buffer.h" |
+#include "media/base/audio_bus.h" |
+#include "media/base/buffers.h" |
+#include "media/base/seekable_audio_buffer.h" |
+#include "testing/gtest/include/gtest/gtest.h" |
+ |
+namespace media { |
+ |
+template <class T> |
+static scoped_refptr<AudioBuffer> MakeInterleavedBuffer( |
scherkus (not reviewing)
2013/06/19 23:38:06
are these duplicated from audio_buffer_unittest.cc
jrummell
2013/06/20 21:47:01
Done.
|
+ SampleFormat format, |
+ int channels, |
+ T start, |
+ T increment, |
+ int frames, |
+ const base::TimeDelta start_time) { |
+ DCHECK(format == kSampleFormatU8 || format == kSampleFormatS16 || |
+ format == kSampleFormatS32 || format == kSampleFormatF32); |
+ |
+ // Create a block of memory with values: |
+ // start |
+ // start + increment |
+ // start + 2 * increment, ... |
+ // Since this is interleaved data, channel 0 data will be: |
+ // start |
+ // start + channels * increment |
+ // start + 2 * channels * increment, ... |
+ int buffer_size = frames * channels * sizeof(T); |
+ scoped_ptr<uint8[]> memory(new uint8[buffer_size]); |
+ uint8* data[] = { memory.get() }; |
+ T* buffer = reinterpret_cast<T*>(memory.get()); |
+ for (int i = 0; i < frames * channels; ++i) { |
+ buffer[i] = start; |
+ start += increment; |
+ } |
+ // Duration is 1 second per frame (for simplicity). |
+ base::TimeDelta duration = base::TimeDelta::FromSeconds(frames); |
+ return AudioBuffer::CopyFrom( |
+ format, channels, frames, data, start_time, duration); |
+} |
+ |
+template <class T> |
+static scoped_refptr<AudioBuffer> MakePlanarBuffer( |
+ SampleFormat format, |
+ int channels, |
+ T start, |
+ T increment, |
+ int frames, |
+ const base::TimeDelta start_time) { |
+ DCHECK(format == kSampleFormatPlanarF32 || format == kSampleFormatPlanarS16); |
+ |
+ // Create multiple blocks of data, one for each channel. |
+ // Values in channel 0 will be: |
+ // start |
+ // start + increment |
+ // start + 2 * increment, ... |
+ // Values in channel 1 will be: |
+ // start + frames * increment |
+ // start + (frames + 1) * increment |
+ // start + (frames + 2) * increment, ... |
+ int buffer_size = frames * sizeof(T); |
+ scoped_ptr<uint8*[]> data(new uint8*[channels]); |
+ scoped_ptr<uint8[]> memory(new uint8[channels * buffer_size]); |
+ for (int i = 0; i < channels; ++i) { |
+ data.get()[i] = memory.get() + i * buffer_size; |
+ T* buffer = reinterpret_cast<T*>(data.get()[i]); |
+ for (int j = 0; j < frames; ++j) { |
+ buffer[j] = start; |
+ start += increment; |
+ } |
+ } |
+ // Duration is 1 second per frame (for simplicity). |
+ base::TimeDelta duration = base::TimeDelta::FromSeconds(frames); |
+ return AudioBuffer::CopyFrom( |
+ format, channels, frames, data.get(), start_time, duration); |
+} |
+ |
+static void VerifyResult(float* channel_data, |
+ int frames, |
+ float start, |
+ float increment) { |
+ for (int i = 0; i < frames; ++i) { |
+ SCOPED_TRACE(base::StringPrintf( |
+ "i=%d/%d start=%f, increment=%f", i, frames, start, increment)); |
+ ASSERT_EQ(channel_data[i], start); |
+ start += increment; |
+ } |
+} |
+ |
+TEST(SeekableAudioBufferTest, AppendAndClear) { |
+ const int channels = 1; |
+ const base::TimeDelta start_time; |
+ SeekableAudioBuffer buffer(1000); |
+ EXPECT_EQ(buffer.forward_capacity(), 1000); |
+ EXPECT_EQ(buffer.forward_frames(), 0); |
+ buffer.set_forward_capacity(2000); |
+ EXPECT_EQ(buffer.forward_capacity(), 2000); |
+ EXPECT_EQ(buffer.forward_frames(), 0); |
+ EXPECT_TRUE(buffer.Append(MakeInterleavedBuffer<uint8>( |
+ kSampleFormatU8, channels, 10, 1, 8, start_time))); |
+ EXPECT_GT(buffer.forward_frames(), 0); |
+ buffer.Clear(); |
+ EXPECT_EQ(buffer.forward_capacity(), 2000); |
+ EXPECT_EQ(buffer.forward_frames(), 0); |
+ EXPECT_TRUE(buffer.Append(MakeInterleavedBuffer<uint8>( |
+ kSampleFormatU8, channels, 20, 1, 8, start_time))); |
+ EXPECT_EQ(buffer.forward_frames(), 8); |
+} |
+ |
+TEST(SeekableAudioBufferTest, MultipleAppend) { |
+ const int channels = 1; |
+ const base::TimeDelta start_time; |
+ SeekableAudioBuffer buffer(1000); |
+ |
+ // Append 40 frames in 5 buffers. |
+ EXPECT_TRUE(buffer.Append(MakeInterleavedBuffer<uint8>( |
+ kSampleFormatU8, channels, 10, 1, 8, start_time))); |
+ EXPECT_EQ(buffer.forward_frames(), 8); |
+ EXPECT_TRUE(buffer.Append(MakeInterleavedBuffer<uint8>( |
+ kSampleFormatU8, channels, 10, 1, 8, start_time))); |
+ EXPECT_EQ(buffer.forward_frames(), 16); |
+ EXPECT_TRUE(buffer.Append(MakeInterleavedBuffer<uint8>( |
+ kSampleFormatU8, channels, 10, 1, 8, start_time))); |
+ EXPECT_EQ(buffer.forward_frames(), 24); |
+ EXPECT_TRUE(buffer.Append(MakeInterleavedBuffer<uint8>( |
+ kSampleFormatU8, channels, 10, 1, 8, start_time))); |
+ EXPECT_EQ(buffer.forward_frames(), 32); |
+ EXPECT_TRUE(buffer.Append(MakeInterleavedBuffer<uint8>( |
+ kSampleFormatU8, channels, 10, 1, 8, start_time))); |
+ EXPECT_EQ(buffer.forward_frames(), 40); |
+} |
+ |
+TEST(SeekableAudioBufferTest, Seek) { |
+ const int channels = 2; |
+ const base::TimeDelta start_time; |
+ SeekableAudioBuffer buffer(1000); |
+ |
+ // Add 6 frames of data. |
+ EXPECT_TRUE(buffer.Append(MakeInterleavedBuffer<float>( |
+ kSampleFormatF32, channels, 1.0f, 1.0f, 6, start_time))); |
+ EXPECT_EQ(buffer.forward_frames(), 6); |
+ |
+ // Seek past 2 frames. |
+ EXPECT_TRUE(buffer.SeekFrames(2)); |
+ EXPECT_EQ(buffer.forward_frames(), 4); |
+ |
+ // Try to seek more frames than exist. |
+ EXPECT_FALSE(buffer.SeekFrames(20)); |
+ EXPECT_EQ(buffer.forward_frames(), 4); |
+ |
+ // Seek to end of data. |
+ EXPECT_TRUE(buffer.SeekFrames(4)); |
+ EXPECT_EQ(buffer.forward_frames(), 0); |
+ |
+ // At end, seek now fails unless 0 specified. |
+ EXPECT_FALSE(buffer.SeekFrames(1)); |
+ EXPECT_FALSE(buffer.SeekFrames(100)); |
+ EXPECT_TRUE(buffer.SeekFrames(0)); |
+} |
+ |
+TEST(SeekableAudioBufferTest, BufferFull) { |
+ const int channels = 1; |
+ const base::TimeDelta start_time; |
+ SeekableAudioBuffer buffer(10); // hold up to 10 frames. |
+ |
+ // Add 24 frames of data, much more than the limit of 10. |
+ EXPECT_TRUE(buffer.Append(MakeInterleavedBuffer<uint8>( |
+ kSampleFormatU8, channels, 10, 1, 8, start_time))); |
+ EXPECT_EQ(buffer.forward_frames(), 8); |
+ EXPECT_FALSE(buffer.Append(MakeInterleavedBuffer<uint8>( |
+ kSampleFormatU8, channels, 10, 1, 8, start_time))); |
+ EXPECT_EQ(buffer.forward_frames(), 16); |
+ EXPECT_FALSE(buffer.Append(MakeInterleavedBuffer<uint8>( |
+ kSampleFormatU8, channels, 10, 1, 8, start_time))); |
+ EXPECT_EQ(buffer.forward_frames(), 24); |
+} |
+ |
+TEST(SeekableAudioBufferTest, ReadF32) { |
+ const int channels = 2; |
+ const base::TimeDelta start_time; |
+ SeekableAudioBuffer buffer(1000); |
+ |
+ // Add 76 frames of data. |
+ EXPECT_TRUE(buffer.Append(MakeInterleavedBuffer<float>( |
+ kSampleFormatF32, channels, 1.0f, 1.0f, 6, start_time))); |
+ EXPECT_TRUE(buffer.Append(MakeInterleavedBuffer<float>( |
+ kSampleFormatF32, channels, 13.0f, 1.0f, 10, start_time))); |
+ EXPECT_TRUE(buffer.Append(MakeInterleavedBuffer<float>( |
+ kSampleFormatF32, channels, 33.0f, 1.0f, 60, start_time))); |
+ EXPECT_EQ(buffer.forward_frames(), 76); |
+ |
+ // Read 3 frames from the buffer. F32 is interleaved, so ch[0] should be |
+ // 1, 3, 5, and ch[1] should be 2, 4, 6. |
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100); |
+ EXPECT_EQ(buffer.ReadFrames(3, bus.get()), 3); |
+ EXPECT_EQ(buffer.forward_frames(), 73); |
+ VerifyResult(bus->channel(0), 3, 1.0f, 2.0f); |
+ VerifyResult(bus->channel(1), 3, 2.0f, 2.0f); |
+ |
+ // Now read 5 frames, which will span buffers. |
+ EXPECT_EQ(buffer.ReadFrames(5, bus.get()), 5); |
+ EXPECT_EQ(buffer.forward_frames(), 68); |
+ VerifyResult(bus->channel(0), 5, 7.0f, 2.0f); |
+ VerifyResult(bus->channel(1), 5, 8.0f, 2.0f); |
+ |
+ // Now skip into the third buffer. |
+ EXPECT_TRUE(buffer.SeekFrames(20)); |
+ EXPECT_EQ(buffer.forward_frames(), 48); |
+ |
+ // Now read 2 frames, which are in the third buffer. |
+ EXPECT_EQ(buffer.ReadFrames(2, bus.get()), 2); |
+ VerifyResult(bus->channel(0), 2, 57.0f, 2.0f); |
+ VerifyResult(bus->channel(1), 2, 58.0f, 2.0f); |
+} |
+ |
+TEST(SeekableAudioBufferTest, ReadU8) { |
+ const int channels = 4; |
+ const base::TimeDelta start_time; |
+ SeekableAudioBuffer buffer(1000); |
+ |
+ // Add 4 frames of data. |
+ EXPECT_TRUE(buffer.Append(MakeInterleavedBuffer<uint8>( |
+ kSampleFormatU8, channels, 128, 1, 4, start_time))); |
+ |
+ // Read all 4 frames from the buffer. Data is interleaved, so ch[0] should be |
+ // 128, 132, 136, 140, other channels similar. However, values are converted |
+ // from [0, 255] to [-1.0, 1.0] with a bias of 128. Thus the first buffer |
+ // value should be 0.0, then 1/127, 2/127, etc. |
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100); |
+ EXPECT_EQ(buffer.ReadFrames(4, bus.get()), 4); |
+ EXPECT_EQ(buffer.forward_frames(), 0); |
+ VerifyResult(bus->channel(0), 4, 0.0f, 4.0f / 127.0f); |
+ VerifyResult(bus->channel(1), 4, 1.0f / 127.0f, 4.0f / 127.0f); |
+ VerifyResult(bus->channel(2), 4, 2.0f / 127.0f, 4.0f / 127.0f); |
+ VerifyResult(bus->channel(3), 4, 3.0f / 127.0f, 4.0f / 127.0f); |
+} |
+ |
+TEST(SeekableAudioBufferTest, ReadS16) { |
+ const int channels = 2; |
+ const base::TimeDelta start_time; |
+ SeekableAudioBuffer buffer(1000); |
+ |
+ // Add 24 frames of data. |
+ EXPECT_TRUE(buffer.Append(MakeInterleavedBuffer<int16>( |
+ kSampleFormatS16, channels, 1, 1, 4, start_time))); |
+ EXPECT_TRUE(buffer.Append(MakeInterleavedBuffer<int16>( |
+ kSampleFormatS16, channels, 9, 1, 20, start_time))); |
+ EXPECT_EQ(buffer.forward_frames(), 24); |
+ |
+ // Read 6 frames from the buffer. Data is interleaved, so ch[0] should be |
+ // 1, 3, 5, 7, 9, 11, and ch[1] should be 2, 4, 6, 8, 10, 12. |
+ // Data is converted to float from -1.0 to 1.0 based on int16 range. |
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100); |
+ EXPECT_EQ(buffer.ReadFrames(6, bus.get()), 6); |
+ EXPECT_EQ(buffer.forward_frames(), 18); |
+ VerifyResult(bus->channel(0), 6, 1.0f / kint16max, 2.0f / kint16max); |
+ VerifyResult(bus->channel(1), 6, 2.0f / kint16max, 2.0f / kint16max); |
+} |
+ |
+TEST(SeekableAudioBufferTest, ReadS32) { |
+ const int channels = 2; |
+ const base::TimeDelta start_time; |
+ SeekableAudioBuffer buffer(1000); |
+ |
+ // Add 24 frames of data. |
+ EXPECT_TRUE(buffer.Append(MakeInterleavedBuffer<int32>( |
+ kSampleFormatS32, channels, 1, 1, 4, start_time))); |
+ EXPECT_TRUE(buffer.Append(MakeInterleavedBuffer<int32>( |
+ kSampleFormatS32, channels, 9, 1, 20, start_time))); |
+ EXPECT_EQ(buffer.forward_frames(), 24); |
+ |
+ // Read 6 frames from the buffer. Data is interleaved, so ch[0] should be |
+ // 1, 3, 5, 7, 100, 106, and ch[1] should be 2, 4, 6, 8, 103, 109. |
+ // Data is converted to float from -1.0 to 1.0 based on int32 range. |
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100); |
+ EXPECT_EQ(buffer.ReadFrames(6, bus.get()), 6); |
+ EXPECT_EQ(buffer.forward_frames(), 18); |
+ VerifyResult(bus->channel(0), 6, 1.0f / kint32max, 2.0f / kint32max); |
+ VerifyResult(bus->channel(1), 6, 2.0f / kint32max, 2.0f / kint32max); |
+ |
+ // Read the next 2 frames. |
+ EXPECT_EQ(buffer.ReadFrames(2, bus.get()), 2); |
+ EXPECT_EQ(buffer.forward_frames(), 16); |
+ VerifyResult(bus->channel(0), 2, 13.0f / kint32max, 2.0f / kint32max); |
+ VerifyResult(bus->channel(1), 2, 14.0f / kint32max, 2.0f / kint32max); |
+} |
+ |
+TEST(SeekableAudioBufferTest, ReadF32Planar) { |
+ const int channels = 2; |
+ const base::TimeDelta start_time; |
+ SeekableAudioBuffer buffer(1000); |
+ |
+ // Add 14 frames of data. |
+ EXPECT_TRUE(buffer.Append(MakePlanarBuffer<float>( |
+ kSampleFormatPlanarF32, channels, 1.0f, 1.0f, 4, start_time))); |
+ EXPECT_TRUE(buffer.Append(MakePlanarBuffer<float>( |
+ kSampleFormatPlanarF32, channels, 50.0f, 1.0f, 10, start_time))); |
+ EXPECT_EQ(buffer.forward_frames(), 14); |
+ |
+ // Read 6 frames from the buffer. F32 is planar, so ch[0] should be |
+ // 1, 2, 3, 4, 50, 51, and ch[1] should be 5, 6, 7, 8, 60, 61. |
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100); |
+ EXPECT_EQ(buffer.ReadFrames(6, bus.get()), 6); |
+ EXPECT_EQ(buffer.forward_frames(), 8); |
+ VerifyResult(bus->channel(0), 4, 1.0f, 1.0f); |
+ VerifyResult(bus->channel(0) + 4, 2, 50.0f, 1.0f); |
+ VerifyResult(bus->channel(1), 4, 5.0f, 1.0f); |
+ VerifyResult(bus->channel(1) + 4, 2, 60.0f, 1.0f); |
+} |
+ |
+TEST(SeekableAudioBufferTest, ReadS16Planar) { |
+ const int channels = 2; |
+ const base::TimeDelta start_time; |
+ SeekableAudioBuffer buffer(1000); |
+ |
+ // Add 24 frames of data. |
+ EXPECT_TRUE(buffer.Append(MakePlanarBuffer<int16>( |
+ kSampleFormatPlanarS16, channels, 1, 1, 4, start_time))); |
+ EXPECT_TRUE(buffer.Append(MakePlanarBuffer<int16>( |
+ kSampleFormatPlanarS16, channels, 100, 5, 20, start_time))); |
+ EXPECT_EQ(buffer.forward_frames(), 24); |
+ |
+ // Read 6 frames from the buffer. Data is planar, so ch[0] should be |
+ // 1, 2, 3, 4, 100, 105, and ch[1] should be 5, 6, 7, 8, 200, 205. |
+ // Data is converted to float from -1.0 to 1.0 based on int16 range. |
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100); |
+ EXPECT_EQ(buffer.ReadFrames(6, bus.get()), 6); |
+ EXPECT_EQ(buffer.forward_frames(), 18); |
+ VerifyResult(bus->channel(0), 4, 1.0f / kint16max, 1.0f / kint16max); |
+ VerifyResult(bus->channel(0) + 4, 2, 100.0f / kint16max, 5.0f / kint16max); |
+ VerifyResult(bus->channel(1), 4, 5.0f / kint16max, 1.0f / kint16max); |
+ VerifyResult(bus->channel(1) + 4, 2, 200.0f / kint16max, 5.0f / kint16max); |
+} |
+ |
+TEST(SeekableAudioBufferTest, ReadManyChannels) { |
+ const int channels = 16; |
+ const base::TimeDelta start_time; |
+ SeekableAudioBuffer buffer(1000); |
+ |
+ // Add 76 frames of data. |
+ EXPECT_TRUE(buffer.Append(MakeInterleavedBuffer<float>( |
+ kSampleFormatF32, channels, 0.0f, 1.0f, 6, start_time))); |
+ EXPECT_TRUE(buffer.Append(MakeInterleavedBuffer<float>( |
+ kSampleFormatF32, channels, 6.0f * channels, 1.0f, 10, start_time))); |
+ EXPECT_TRUE(buffer.Append(MakeInterleavedBuffer<float>( |
+ kSampleFormatF32, channels, 16.0f * channels, 1.0f, 60, start_time))); |
+ EXPECT_EQ(buffer.forward_frames(), 76); |
+ |
+ // Read 3 frames from the buffer. F32 is interleaved, so ch[0] should be |
+ // 1, 17, 33, and ch[1] should be 2, 18, 34. Just check a few channels. |
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100); |
+ EXPECT_EQ(buffer.ReadFrames(30, bus.get()), 30); |
+ EXPECT_EQ(buffer.forward_frames(), 46); |
+ for (int i = 0; i < channels; ++i) { |
+ VerifyResult(bus->channel(i), 30, static_cast<float>(i), 16.0f); |
+ } |
+} |
+ |
+TEST(SeekableAudioBufferTest, Peek) { |
+ const int channels = 4; |
+ const base::TimeDelta start_time; |
+ SeekableAudioBuffer buffer(1000); |
+ |
+ // Add 60 frames of data. |
+ EXPECT_TRUE(buffer.Append(MakeInterleavedBuffer<float>( |
+ kSampleFormatF32, channels, 0.0f, 1.0f, 60, start_time))); |
+ EXPECT_EQ(buffer.forward_frames(), 60); |
+ |
+ // Peek at the first 30 frames. |
+ scoped_ptr<AudioBus> bus1 = AudioBus::Create(channels, 100); |
+ EXPECT_EQ(buffer.forward_frames(), 60); |
+ EXPECT_EQ(buffer.PeekFrames(100, bus1.get()), 60); // only 60 in buffer. |
+ EXPECT_EQ(buffer.PeekFrames(30, bus1.get()), 30); // should get first 30. |
+ EXPECT_EQ(buffer.forward_frames(), 60); |
+ |
+ // Now read the next 30 frames (which should be the same as those peeked at). |
+ scoped_ptr<AudioBus> bus2 = AudioBus::Create(channels, 100); |
+ EXPECT_EQ(buffer.ReadFrames(30, bus2.get()), 30); |
+ for (int i = 0; i < channels; ++i) { |
+ VerifyResult(bus1->channel(i), |
+ 30, |
+ static_cast<float>(i), |
+ static_cast<float>(channels)); |
+ VerifyResult(bus2->channel(i), |
+ 30, |
+ static_cast<float>(i), |
+ static_cast<float>(channels)); |
+ } |
+ |
+ // Peek 10 frames forward |
+ EXPECT_EQ(buffer.PeekFrames(5, 10, bus1.get()), 5); |
+ for (int i = 0; i < channels; ++i) { |
+ VerifyResult(bus1->channel(i), |
+ 5, |
+ static_cast<float>(i + 40 * channels), |
+ static_cast<float>(channels)); |
+ } |
+ |
+ // Peek to the end of the buffer. |
+ EXPECT_EQ(buffer.forward_frames(), 30); |
+ EXPECT_EQ(buffer.PeekFrames(100, bus1.get()), 30); |
+ EXPECT_EQ(buffer.PeekFrames(30, bus1.get()), 30); |
+} |
+ |
+TEST(SeekableAudioBufferTest, Time) { |
+ const int channels = 2; |
+ const base::TimeDelta start_time1; |
+ const base::TimeDelta start_time2 = base::TimeDelta::FromSeconds(30); |
+ SeekableAudioBuffer buffer(1000); |
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100); |
+ |
+ // Add two buffers: |
+ // first: start=0s, duration=10s |
+ // second: start=30s, duration=10s |
+ EXPECT_TRUE(buffer.Append(MakeInterleavedBuffer<int16>( |
+ kSampleFormatS16, channels, 1, 1, 10, start_time1))); |
+ EXPECT_TRUE(buffer.Append(MakeInterleavedBuffer<int16>( |
+ kSampleFormatS16, channels, 1, 1, 10, start_time2))); |
+ EXPECT_EQ(buffer.forward_frames(), 20); |
+ |
+ // Check starting time. |
+ EXPECT_EQ(buffer.current_time(), start_time1); |
+ |
+ // Read 2 frames, should be 2s in (since duration is 1s per sample). |
+ EXPECT_EQ(buffer.ReadFrames(2, bus.get()), 2); |
+ EXPECT_EQ(buffer.current_time(), |
+ start_time1 + base::TimeDelta::FromSeconds(2)); |
+ |
+ // Skip 2 frames. |
+ EXPECT_TRUE(buffer.SeekFrames(2)); |
+ EXPECT_EQ(buffer.current_time(), |
+ start_time1 + base::TimeDelta::FromSeconds(4)); |
+ |
+ // Read until almost the end of buffer1. |
+ EXPECT_EQ(buffer.ReadFrames(5, bus.get()), 5); |
+ EXPECT_EQ(buffer.current_time(), |
+ start_time1 + base::TimeDelta::FromSeconds(9)); |
+ |
+ // Read 1 value, so time moved to buffer2. |
+ EXPECT_EQ(buffer.ReadFrames(1, bus.get()), 1); |
+ EXPECT_EQ(buffer.current_time(), start_time2); |
+ |
+ // Read all 10 frames in buffer2, timestamp should be last time from buffer2. |
+ EXPECT_EQ(buffer.ReadFrames(10, bus.get()), 10); |
+ EXPECT_EQ(buffer.current_time(), |
+ start_time2 + base::TimeDelta::FromSeconds(10)); |
+ |
+ // Try to read more frames (which don't exist), timestamp should remain. |
+ EXPECT_EQ(buffer.ReadFrames(5, bus.get()), 0); |
+ EXPECT_EQ(buffer.current_time(), |
+ start_time2 + base::TimeDelta::FromSeconds(10)); |
+} |
+ |
+TEST(SeekableAudioBufferTest, NoTime) { |
+ const int channels = 2; |
+ SeekableAudioBuffer buffer(1000); |
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100); |
+ |
+ // Add two buffers with no timestamps. Time should always be unknown. |
+ EXPECT_TRUE(buffer.Append(MakeInterleavedBuffer<int16>( |
+ kSampleFormatS16, channels, 1, 1, 10, kNoTimestamp()))); |
+ EXPECT_TRUE(buffer.Append(MakeInterleavedBuffer<int16>( |
+ kSampleFormatS16, channels, 1, 1, 10, kNoTimestamp()))); |
+ EXPECT_EQ(buffer.forward_frames(), 20); |
+ |
+ // Check starting time. |
+ EXPECT_EQ(buffer.current_time(), kNoTimestamp()); |
+ |
+ // Read 2 frames. |
+ EXPECT_EQ(buffer.ReadFrames(2, bus.get()), 2); |
+ EXPECT_EQ(buffer.current_time(), kNoTimestamp()); |
+ |
+ // Skip 2 frames. |
+ EXPECT_TRUE(buffer.SeekFrames(2)); |
+ EXPECT_EQ(buffer.current_time(), kNoTimestamp()); |
+ |
+ // Read until almost the end of buffer1. |
+ EXPECT_EQ(buffer.ReadFrames(5, bus.get()), 5); |
+ EXPECT_EQ(buffer.current_time(), kNoTimestamp()); |
+ |
+ // Read 1 value, so time moved to buffer2. |
+ EXPECT_EQ(buffer.ReadFrames(1, bus.get()), 1); |
+ EXPECT_EQ(buffer.current_time(), kNoTimestamp()); |
+ |
+ // Read all 10 frames in buffer2. |
+ EXPECT_EQ(buffer.ReadFrames(10, bus.get()), 10); |
+ EXPECT_EQ(buffer.current_time(), kNoTimestamp()); |
+ |
+ // Try to read more frames (which don't exist), timestamp should remain. |
+ EXPECT_EQ(buffer.ReadFrames(5, bus.get()), 0); |
+ EXPECT_EQ(buffer.current_time(), kNoTimestamp()); |
+} |
+ |
+} // namespace media |