Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(508)

Unified Diff: content/common/gpu/media/video_encode_accelerator_unittest.cc

Issue 430583005: Make VEA test support videos with different coded size and visible size (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: address review comments of patch set 9 Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: content/common/gpu/media/video_encode_accelerator_unittest.cc
diff --git a/content/common/gpu/media/video_encode_accelerator_unittest.cc b/content/common/gpu/media/video_encode_accelerator_unittest.cc
index 9d1e3b69dc7c9edccf56b2fab6021bc0fb20f23b..f54c3cf610682b4fa146b0c9977d7ee5623e8a75 100644
--- a/content/common/gpu/media/video_encode_accelerator_unittest.cc
+++ b/content/common/gpu/media/video_encode_accelerator_unittest.cc
@@ -33,6 +33,8 @@
#error The VideoEncodeAcceleratorUnittest is not supported on this platform.
#endif
+#define ALIGN_64_BYTES(x) (((x) + 63) & ~63)
+
using media::VideoEncodeAccelerator;
namespace content {
@@ -103,6 +105,7 @@ struct TestStream {
~TestStream() {}
gfx::Size size;
+ std::string in_filename;
base::MemoryMappedFile input_file;
media::VideoCodecProfile requested_profile;
std::string out_filename;
@@ -112,6 +115,96 @@ struct TestStream {
unsigned int requested_subsequent_framerate;
};
+// ARM performs CPU cache management with CPU cache line granularity. We thus
+// need to ensure our buffers are CPU cache line-aligned (64 byte-aligned).
+// Otherwise newer kernels will refuse to accept them, and on older kernels
+// we'll be treating ourselves to random corruption.
+// Since we are just mmapping and passing chunks of the input file, to ensure
+// alignment, if the starting virtual addresses of YUV planes of the frames
+// in it were not 64 byte-aligned, we'd have to prepare a memory with 64
+// byte-aligned starting address and make sure the addresses of YUV planes of
+// each frame are 64 byte-aligned before sending to the encoder.
+// Now we test resolutions different from coded size and prepare chunks before
+// encoding to avoid performance impact.
+// Use |visible_size| and |coded_size| to copy YUV data into memory from
+// |in_filename|. The copied result will be saved in |input_file|. Also
+// calculate the byte size of an input frame and set it to |coded_buffer_size|.
+static void PrepareInputBuffers(const gfx::Size& visible_size,
+ const gfx::Size& coded_size,
+ const std::string in_filename,
+ base::MemoryMappedFile* input_file,
+ size_t* coded_buffer_size) {
+ base::FilePath temp_file;
+ size_t input_num_planes = media::VideoFrame::NumPlanes(kInputFormat);
+ std::vector<size_t> padding_sizes(input_num_planes);
+ std::vector<size_t> coded_bpl(input_num_planes);
+ std::vector<size_t> visible_bpl(input_num_planes);
+ std::vector<size_t> visible_plane_rows(input_num_planes);
+
+ // YUV plane starting address should be 64 bytes alignment.
+ // Calculate padding size for each plane, and frame allocation size for
wuchengli 2014/08/28 10:17:48 80 char aligned. Move this with the previous line.
henryhsu 2014/08/29 06:36:41 Done.
+ // coded size. And also store bytes per line information of coded size and
wuchengli 2014/08/28 10:17:48 s/And also/Also/
henryhsu 2014/08/29 06:36:41 Done.
+ // visible size.
+ *coded_buffer_size = 0;
+ for (off_t i = 0; i < input_num_planes; i++) {
+ size_t size =
+ media::VideoFrame::PlaneAllocationSize(kInputFormat, i, coded_size);
+ size_t padding_bytes = ALIGN_64_BYTES(size) - size;
+
+ coded_bpl[i] =
+ media::VideoFrame::RowBytes(i, kInputFormat, coded_size.width());
+ visible_bpl[i] =
+ media::VideoFrame::RowBytes(i, kInputFormat, visible_size.width());
+ visible_plane_rows[i] =
+ media::VideoFrame::Rows(i, kInputFormat, visible_size.height());
+ size_t padding_rows =
+ media::VideoFrame::Rows(i, kInputFormat, coded_size.height()) -
+ visible_plane_rows[i];
+ padding_sizes[i] = padding_rows * coded_bpl[i] + padding_bytes;
+ *coded_buffer_size += ALIGN_64_BYTES(size);
wuchengli 2014/08/28 10:17:48 Move this after padding_bytes so the use of |size|
henryhsu 2014/08/29 06:36:41 Done.
+ }
+
+ // Test case may have many encoders and memory should be prepared once.
+ if (input_file->IsValid())
+ return;
+
+ base::MemoryMappedFile src_file;
+ CHECK(base::CreateTemporaryFile(&temp_file));
+ CHECK(src_file.Initialize(base::FilePath(in_filename)));
+
+ size_t visible_buffer_size =
+ media::VideoFrame::AllocationSize(kInputFormat, visible_size);
+ size_t num_frames = src_file.length() / visible_buffer_size;
+ uint32 flags = base::File::FLAG_CREATE_ALWAYS | base::File::FLAG_WRITE |
+ base::File::FLAG_READ;
+
+ // Create a temporary file with coded_size length.
+ base::File file(base::FilePath(temp_file), flags);
+ file.Write(*coded_buffer_size * num_frames - 1, ".", 1);
wuchengli 2014/08/28 10:17:48 Can we use SetLength for this? Is SetLength fast?
henryhsu 2014/08/29 06:36:41 Done.
+ CHECK(input_file->Initialize(file.Pass(), true));
+
+ off_t src_offset = 0, dest_offset = 0;
+ while (src_offset < static_cast<off_t>(src_file.length())) {
+ for (off_t i = 0; i < input_num_planes; i++) {
+#if defined(ARCH_CPU_ARMEL)
+ // Assert that each plane of frame starts at 64-byte boundary.
+ const uint8* ptr = input_file->data() + dest_offset;
+ ASSERT_EQ(reinterpret_cast<off_t>(ptr) & 63, 0)
+ << "Planes of frame should be mapped at a 64 byte boundary";
+#endif
+ for (off_t j = 0; j < visible_plane_rows[i]; j++) {
+ const uint8* src = src_file.data() + src_offset;
+ uint8* dest = const_cast<uint8*>(input_file->data() + dest_offset);
wuchengli 2014/08/28 10:17:48 We should not return a const and access it. Maybe
henryhsu 2014/08/29 06:36:41 Done.
+ memcpy(dest, src, visible_bpl[i]);
+ src_offset += visible_bpl[i];
+ dest_offset += coded_bpl[i];
+ }
+ dest_offset += padding_sizes[i];
+ }
+ }
+ base::DeleteFile(temp_file, false);
+}
+
// Parse |data| into its constituent parts, set the various output fields
// accordingly, read in video stream, and store them to |test_streams|.
static void ParseAndReadTestStreamData(const base::FilePath::StringType& data,
@@ -129,7 +222,7 @@ static void ParseAndReadTestStreamData(const base::FilePath::StringType& data,
CHECK_LE(fields.size(), 9U) << data;
TestStream* test_stream = new TestStream();
- base::FilePath::StringType filename = fields[0];
+ test_stream->in_filename = fields[0];
int width, height;
CHECK(base::StringToInt(fields[1], &width));
CHECK(base::StringToInt(fields[2], &height));
@@ -161,7 +254,6 @@ static void ParseAndReadTestStreamData(const base::FilePath::StringType& data,
&test_stream->requested_subsequent_framerate));
}
- CHECK(test_stream->input_file.Initialize(base::FilePath(filename)));
test_streams->push_back(test_stream);
}
}
@@ -552,29 +644,6 @@ VEAClient::VEAClient(const TestStream& test_stream,
EXPECT_EQ(0, base::WriteFile(out_filename, NULL, 0));
}
- input_buffer_size_ =
- media::VideoFrame::AllocationSize(kInputFormat, test_stream.size);
- CHECK_GT(input_buffer_size_, 0UL);
-
- // Calculate the number of frames in the input stream by dividing its length
- // in bytes by frame size in bytes.
- CHECK_EQ(test_stream_.input_file.length() % input_buffer_size_, 0U)
- << "Stream byte size is not a product of calculated frame byte size";
- num_frames_in_stream_ = test_stream_.input_file.length() / input_buffer_size_;
- CHECK_GT(num_frames_in_stream_, 0UL);
- CHECK_LE(num_frames_in_stream_, kMaxFrameNum);
-
- // We may need to loop over the stream more than once if more frames than
- // provided is required for bitrate tests.
- if (force_bitrate_ && num_frames_in_stream_ < kMinFramesForBitrateTests) {
- DVLOG(1) << "Stream too short for bitrate test (" << num_frames_in_stream_
- << " frames), will loop it to reach " << kMinFramesForBitrateTests
- << " frames";
- num_frames_to_encode_ = kMinFramesForBitrateTests;
- } else {
- num_frames_to_encode_ = num_frames_in_stream_;
- }
-
thread_checker_.DetachFromThread();
}
@@ -629,35 +698,34 @@ void VEAClient::RequireBitstreamBuffers(unsigned int input_count,
ASSERT_EQ(state_, CS_INITIALIZED);
SetState(CS_ENCODING);
- // TODO(posciak): For now we only support input streams that meet encoder
- // size requirements exactly (i.e. coded size == visible size), so that we
- // can simply mmap the stream file and feed the encoder directly with chunks
- // of that, instead of memcpying from mmapped file into a separate set of
- // input buffers that would meet the coded size and alignment requirements.
- // If/when this is changed, the ARM-specific alignment check below should be
- // redone as well.
- input_coded_size_ = input_coded_size;
- ASSERT_EQ(input_coded_size_, test_stream_.size);
-#if defined(ARCH_CPU_ARMEL)
- // ARM performs CPU cache management with CPU cache line granularity. We thus
- // need to ensure our buffers are CPU cache line-aligned (64 byte-aligned).
- // Otherwise newer kernels will refuse to accept them, and on older kernels
- // we'll be treating ourselves to random corruption.
- // Since we are just mmapping and passing chunks of the input file, to ensure
- // alignment, if the starting virtual addresses of the frames in it were not
- // 64 byte-aligned, we'd have to use a separate set of input buffers and copy
- // the frames into them before sending to the encoder. It would have been an
- // overkill here though, because, for now at least, we only test resolutions
- // that result in proper alignment, and it would have also interfered with
- // performance testing. So just assert that the frame size is a multiple of
- // 64 bytes. This ensures all frames start at 64-byte boundary, because
- // MemoryMappedFile should be mmapp()ed at virtual page start as well.
- ASSERT_EQ(input_buffer_size_ & 63, 0u)
- << "Frame size has to be a multiple of 64 bytes";
- ASSERT_EQ(reinterpret_cast<off_t>(test_stream_.input_file.data()) & 63, 0)
- << "Mapped file should be mapped at a 64 byte boundary";
-#endif
+ PrepareInputBuffers(
+ test_stream_.size,
+ input_coded_size,
+ test_stream_.in_filename,
+ const_cast<base::MemoryMappedFile*>(&test_stream_.input_file),
wuchengli 2014/08/28 10:17:48 We should remove the const of test_stream_ because
henryhsu 2014/08/29 06:36:41 Done.
+ &input_buffer_size_);
+ CHECK_GT(input_buffer_size_, 0UL);
+
+ // Calculate the number of frames in the input stream by dividing its length
+ // in bytes by frame size in bytes.
+ CHECK_EQ(test_stream_.input_file.length() % input_buffer_size_, 0U)
+ << "Stream byte size is not a product of calculated frame byte size";
+ num_frames_in_stream_ = test_stream_.input_file.length() / input_buffer_size_;
+ CHECK_GT(num_frames_in_stream_, 0UL);
+ CHECK_LE(num_frames_in_stream_, kMaxFrameNum);
+ // We may need to loop over the stream more than once if more frames than
+ // provided is required for bitrate tests.
+ if (force_bitrate_ && num_frames_in_stream_ < kMinFramesForBitrateTests) {
+ DVLOG(1) << "Stream too short for bitrate test (" << num_frames_in_stream_
+ << " frames), will loop it to reach " << kMinFramesForBitrateTests
+ << " frames";
+ num_frames_to_encode_ = kMinFramesForBitrateTests;
+ } else {
+ num_frames_to_encode_ = num_frames_in_stream_;
+ }
+
+ input_coded_size_ = input_coded_size;
num_required_input_buffers_ = input_count;
ASSERT_GT(num_required_input_buffers_, 0UL);
@@ -743,8 +811,14 @@ void VEAClient::InputNoLongerNeededCallback(int32 input_id) {
scoped_refptr<media::VideoFrame> VEAClient::PrepareInputFrame(off_t position) {
CHECK_LE(position + input_buffer_size_, test_stream_.input_file.length());
- uint8* frame_data =
+ uint8* frame_data_y =
const_cast<uint8*>(test_stream_.input_file.data() + position);
+ uint8* frame_data_u =
+ frame_data_y + ALIGN_64_BYTES(media::VideoFrame::PlaneAllocationSize(
+ kInputFormat, 0, input_coded_size_));
+ uint8* frame_data_v =
+ frame_data_u + ALIGN_64_BYTES(media::VideoFrame::PlaneAllocationSize(
+ kInputFormat, 1, input_coded_size_));
CHECK_GT(current_framerate_, 0U);
scoped_refptr<media::VideoFrame> frame =
@@ -756,9 +830,9 @@ scoped_refptr<media::VideoFrame> VEAClient::PrepareInputFrame(off_t position) {
input_coded_size_.width(),
input_coded_size_.width() / 2,
input_coded_size_.width() / 2,
- frame_data,
- frame_data + input_coded_size_.GetArea(),
- frame_data + (input_coded_size_.GetArea() * 5 / 4),
+ frame_data_y,
+ frame_data_u,
+ frame_data_v,
base::TimeDelta().FromMilliseconds(
next_input_id_ * base::Time::kMillisecondsPerSecond /
current_framerate_),

Powered by Google App Engine
This is Rietveld 408576698