Index: content/common/gpu/media/video_encode_accelerator_unittest.cc |
diff --git a/content/common/gpu/media/video_encode_accelerator_unittest.cc b/content/common/gpu/media/video_encode_accelerator_unittest.cc |
index 9d1e3b69dc7c9edccf56b2fab6021bc0fb20f23b..0c17c664cd0929df7e8817216db8afe002190bf3 100644 |
--- a/content/common/gpu/media/video_encode_accelerator_unittest.cc |
+++ b/content/common/gpu/media/video_encode_accelerator_unittest.cc |
@@ -33,6 +33,8 @@ |
#error The VideoEncodeAcceleratorUnittest is not supported on this platform. |
#endif |
+#define ALIGN_64_BYTES(x) (((x) + 63) & ~63) |
+ |
using media::VideoEncodeAccelerator; |
namespace content { |
@@ -103,8 +105,10 @@ struct TestStream { |
~TestStream() {} |
gfx::Size size; |
+ std::string in_filename; |
base::MemoryMappedFile input_file; |
media::VideoCodecProfile requested_profile; |
+ base::FilePath temp_file; |
std::string out_filename; |
unsigned int requested_bitrate; |
unsigned int requested_framerate; |
@@ -112,6 +116,75 @@ struct TestStream { |
unsigned int requested_subsequent_framerate; |
}; |
+// Use |coded_size| to copy YUV data into memory from |test_stream| input file. |
+// Also calculate |input_buffer_size| frame size value. |
+static bool PrepareAlignedTempFile(const gfx::Size& coded_size, |
+ TestStream* test_stream, |
+ size_t* input_buffer_size) { |
+ size_t input_num_planes = media::VideoFrame::NumPlanes(kInputFormat); |
+ size_t* padding_size = new size_t[input_num_planes]; |
+ size_t visible_frame_size = 0; |
+ |
+ // YUV plane starting address should be 64 bytes alignment. |
+ // Calculate padding size for each plane, and frame size for visible size |
+ // and coded size. |
+ *input_buffer_size = 0; |
+ for (off_t i = 0; i < input_num_planes; i++) { |
+ size_t size = media::VideoFrame::PlaneAllocationSize( |
+ kInputFormat, i, coded_size); |
+ padding_size[i] = ALIGN_64_BYTES(size) - size; |
+ visible_frame_size += media::VideoFrame::PlaneAllocationSize( |
+ kInputFormat, i, test_stream->size); |
+ *input_buffer_size += ALIGN_64_BYTES(size); |
+ } |
+ |
+ // Test case may have many encoders and memory should be prepared once. |
+ if (!test_stream->input_file.IsValid()) { |
+ base::MemoryMappedFile input_file; |
+ CHECK(base::CreateTemporaryFile(&test_stream->temp_file)); |
+ CHECK(input_file.Initialize(base::FilePath(test_stream->in_filename))); |
+ |
+ size_t num_frames = input_file.length() / visible_frame_size; |
+ uint32 flags = base::File::FLAG_CREATE_ALWAYS | |
+ base::File::FLAG_WRITE | |
+ base::File::FLAG_READ; |
+ |
+ // Create a temporary file with coded_size length |
+ base::File file(base::FilePath(test_stream->temp_file), flags); |
+ file.Write(*input_buffer_size * num_frames - 1, ".", 1); |
+ CHECK(test_stream->input_file.Initialize(file.Pass())); |
+ |
+ off_t src_offset = 0, dest_offset = 0; |
+ while (src_offset < static_cast<off_t>(input_file.length())) { |
+ for (off_t i = 0; i < input_num_planes; i++) { |
+ size_t coded_bpl = |
+ media::VideoFrame::RowBytes(i, kInputFormat, coded_size.width()); |
+ size_t visible_bpl = |
+ media::VideoFrame::RowBytes(i, kInputFormat, |
+ test_stream->size.width()); |
+ off_t rows = media::VideoFrame::Rows(i, kInputFormat, |
+ test_stream->size.height()); |
+ for (off_t j = 0; j < rows; j++) { |
+ char *src = reinterpret_cast<char*>( |
+ const_cast<uint8*>(input_file.data() + src_offset)); |
+ char *dest = reinterpret_cast<char*>( |
+ const_cast<uint8*>(test_stream->input_file.data() + dest_offset)); |
+ memcpy(dest, src, visible_bpl); |
+ src_offset += visible_bpl; |
+ dest_offset += coded_bpl; |
+ } |
+ off_t padding_rows = |
+ media::VideoFrame::Rows(i, kInputFormat, coded_size.height()) - |
+ rows; |
+ dest_offset += padding_rows * coded_bpl + padding_size[i]; |
+ } |
+ } |
+ file.Close(); |
rvargas (doing something else)
2014/08/26 21:25:21
no need to explicitly close the file
henryhsu
2014/08/27 02:59:06
Done.
|
+ } |
+ delete[] padding_size; |
+ return true; |
+} |
+ |
// Parse |data| into its constituent parts, set the various output fields |
// accordingly, read in video stream, and store them to |test_streams|. |
static void ParseAndReadTestStreamData(const base::FilePath::StringType& data, |
@@ -129,7 +202,7 @@ static void ParseAndReadTestStreamData(const base::FilePath::StringType& data, |
CHECK_LE(fields.size(), 9U) << data; |
TestStream* test_stream = new TestStream(); |
- base::FilePath::StringType filename = fields[0]; |
+ test_stream->in_filename = fields[0]; |
int width, height; |
CHECK(base::StringToInt(fields[1], &width)); |
CHECK(base::StringToInt(fields[2], &height)); |
@@ -161,7 +234,6 @@ static void ParseAndReadTestStreamData(const base::FilePath::StringType& data, |
&test_stream->requested_subsequent_framerate)); |
} |
- CHECK(test_stream->input_file.Initialize(base::FilePath(filename))); |
test_streams->push_back(test_stream); |
} |
} |
@@ -552,29 +624,6 @@ VEAClient::VEAClient(const TestStream& test_stream, |
EXPECT_EQ(0, base::WriteFile(out_filename, NULL, 0)); |
} |
- input_buffer_size_ = |
- media::VideoFrame::AllocationSize(kInputFormat, test_stream.size); |
- CHECK_GT(input_buffer_size_, 0UL); |
- |
- // Calculate the number of frames in the input stream by dividing its length |
- // in bytes by frame size in bytes. |
- CHECK_EQ(test_stream_.input_file.length() % input_buffer_size_, 0U) |
- << "Stream byte size is not a product of calculated frame byte size"; |
- num_frames_in_stream_ = test_stream_.input_file.length() / input_buffer_size_; |
- CHECK_GT(num_frames_in_stream_, 0UL); |
- CHECK_LE(num_frames_in_stream_, kMaxFrameNum); |
- |
- // We may need to loop over the stream more than once if more frames than |
- // provided is required for bitrate tests. |
- if (force_bitrate_ && num_frames_in_stream_ < kMinFramesForBitrateTests) { |
- DVLOG(1) << "Stream too short for bitrate test (" << num_frames_in_stream_ |
- << " frames), will loop it to reach " << kMinFramesForBitrateTests |
- << " frames"; |
- num_frames_to_encode_ = kMinFramesForBitrateTests; |
- } else { |
- num_frames_to_encode_ = num_frames_in_stream_; |
- } |
- |
thread_checker_.DetachFromThread(); |
} |
@@ -629,28 +678,44 @@ void VEAClient::RequireBitstreamBuffers(unsigned int input_count, |
ASSERT_EQ(state_, CS_INITIALIZED); |
SetState(CS_ENCODING); |
- // TODO(posciak): For now we only support input streams that meet encoder |
- // size requirements exactly (i.e. coded size == visible size), so that we |
- // can simply mmap the stream file and feed the encoder directly with chunks |
- // of that, instead of memcpying from mmapped file into a separate set of |
- // input buffers that would meet the coded size and alignment requirements. |
- // If/when this is changed, the ARM-specific alignment check below should be |
- // redone as well. |
+ PrepareAlignedTempFile(input_coded_size, |
+ const_cast<TestStream*>(&test_stream_), |
+ &input_buffer_size_); |
+ CHECK_GT(input_buffer_size_, 0UL); |
+ |
+ // Calculate the number of frames in the input stream by dividing its length |
+ // in bytes by frame size in bytes. |
+ CHECK_EQ(test_stream_.input_file.length() % input_buffer_size_, 0U) |
+ << "Stream byte size is not a product of calculated frame byte size"; |
+ num_frames_in_stream_ = test_stream_.input_file.length() / input_buffer_size_; |
+ CHECK_GT(num_frames_in_stream_, 0UL); |
+ CHECK_LE(num_frames_in_stream_, kMaxFrameNum); |
+ |
+ // We may need to loop over the stream more than once if more frames than |
+ // provided is required for bitrate tests. |
+ if (force_bitrate_ && num_frames_in_stream_ < kMinFramesForBitrateTests) { |
+ DVLOG(1) << "Stream too short for bitrate test (" << num_frames_in_stream_ |
+ << " frames), will loop it to reach " << kMinFramesForBitrateTests |
+ << " frames"; |
+ num_frames_to_encode_ = kMinFramesForBitrateTests; |
+ } else { |
+ num_frames_to_encode_ = num_frames_in_stream_; |
+ } |
+ |
input_coded_size_ = input_coded_size; |
- ASSERT_EQ(input_coded_size_, test_stream_.size); |
#if defined(ARCH_CPU_ARMEL) |
// ARM performs CPU cache management with CPU cache line granularity. We thus |
// need to ensure our buffers are CPU cache line-aligned (64 byte-aligned). |
// Otherwise newer kernels will refuse to accept them, and on older kernels |
// we'll be treating ourselves to random corruption. |
// Since we are just mmapping and passing chunks of the input file, to ensure |
- // alignment, if the starting virtual addresses of the frames in it were not |
- // 64 byte-aligned, we'd have to use a separate set of input buffers and copy |
- // the frames into them before sending to the encoder. It would have been an |
- // overkill here though, because, for now at least, we only test resolutions |
- // that result in proper alignment, and it would have also interfered with |
- // performance testing. So just assert that the frame size is a multiple of |
- // 64 bytes. This ensures all frames start at 64-byte boundary, because |
+ // alignment, if the starting virtual addresses of YUV planes of the frames |
+ // in it were not 64 byte-aligned, we'd have to use a separate set of input |
+ // buffers and copy the frames into them before sending to the encoder. |
+ // Now we test resolutions differ from coded size and prepare chunks before |
+ // testing to avoid performance impact. |
+ // So just assert that the frame size is a multiple of 64 bytes. |
+ // This ensures all frames start at 64-byte boundary, because |
// MemoryMappedFile should be mmapp()ed at virtual page start as well. |
ASSERT_EQ(input_buffer_size_ & 63, 0u) |
<< "Frame size has to be a multiple of 64 bytes"; |
@@ -743,8 +808,14 @@ void VEAClient::InputNoLongerNeededCallback(int32 input_id) { |
scoped_refptr<media::VideoFrame> VEAClient::PrepareInputFrame(off_t position) { |
CHECK_LE(position + input_buffer_size_, test_stream_.input_file.length()); |
- uint8* frame_data = |
+ uint8* frame_data_y = |
const_cast<uint8*>(test_stream_.input_file.data() + position); |
+ uint8* frame_data_u = frame_data_y + |
+ ALIGN_64_BYTES(media::VideoFrame::PlaneAllocationSize( |
+ kInputFormat, 0, input_coded_size_)); |
+ uint8* frame_data_v = frame_data_u + |
+ ALIGN_64_BYTES(media::VideoFrame::PlaneAllocationSize( |
+ kInputFormat, 1, input_coded_size_)); |
CHECK_GT(current_framerate_, 0U); |
scoped_refptr<media::VideoFrame> frame = |
@@ -756,9 +827,9 @@ scoped_refptr<media::VideoFrame> VEAClient::PrepareInputFrame(off_t position) { |
input_coded_size_.width(), |
input_coded_size_.width() / 2, |
input_coded_size_.width() / 2, |
- frame_data, |
- frame_data + input_coded_size_.GetArea(), |
- frame_data + (input_coded_size_.GetArea() * 5 / 4), |
+ frame_data_y, |
+ frame_data_u, |
+ frame_data_v, |
base::TimeDelta().FromMilliseconds( |
next_input_id_ * base::Time::kMillisecondsPerSecond / |
current_framerate_), |
@@ -973,6 +1044,10 @@ TEST_P(VideoEncodeAcceleratorTest, TestSimpleEncode) { |
base::Bind(&VEAClient::DestroyEncoder, base::Unretained(clients[i]))); |
} |
+ // Delete temporary files when test finished. |
+ for (size_t i = 0; i < test_streams.size(); i++) |
+ base::DeleteFile(test_streams[i]->temp_file, false); |
+ |
// This ensures all tasks have finished. |
encoder_thread.Stop(); |
} |