| Index: media/gpu/video_encode_accelerator_unittest.cc
|
| diff --git a/media/gpu/video_encode_accelerator_unittest.cc b/media/gpu/video_encode_accelerator_unittest.cc
|
| index ab0e9c07597cae85a7fa503fd9d9b9eb92c42b1d..2c88da8ec0918283cbd89a62097daeb1165d23fc 100644
|
| --- a/media/gpu/video_encode_accelerator_unittest.cc
|
| +++ b/media/gpu/video_encode_accelerator_unittest.cc
|
| @@ -16,8 +16,8 @@
|
| #include "base/bind.h"
|
| #include "base/command_line.h"
|
| #include "base/files/file_util.h"
|
| -#include "base/files/memory_mapped_file.h"
|
| #include "base/macros.h"
|
| +#include "base/memory/aligned_memory.h"
|
| #include "base/memory/scoped_vector.h"
|
| #include "base/message_loop/message_loop.h"
|
| #include "base/numerics/safe_conversions.h"
|
| @@ -165,9 +165,9 @@ struct TestStream {
|
| // And the file must be an I420 (YUV planar) raw stream.
|
| std::string in_filename;
|
|
|
| - // A vector used to prepare aligned input buffers of |in_filename|. This
|
| - // makes sure starting address of YUV planes are 64 bytes-aligned.
|
| - std::vector<char> aligned_in_file_data;
|
| + // An aligned memory used to prepare aligned input buffers of |in_filename|.
|
| + // This makes sure starting address of YUV planes are 64 bytes-aligned.
|
| + char* aligned_in_file_data;
|
|
|
| // Byte size of a frame of |aligned_in_file_data|.
|
| size_t aligned_buffer_size;
|
| @@ -220,12 +220,12 @@ static void CreateAlignedInputStreamFile(const gfx::Size& coded_size,
|
| TestStream* test_stream) {
|
| // Test case may have many encoders and memory should be prepared once.
|
| if (test_stream->coded_size == coded_size &&
|
| - !test_stream->aligned_in_file_data.empty())
|
| + test_stream->aligned_in_file_data)
|
| return;
|
|
|
| // All encoders in multiple encoder test reuse the same test_stream, make
|
| // sure they requested the same coded_size
|
| - ASSERT_TRUE(test_stream->aligned_in_file_data.empty() ||
|
| + ASSERT_TRUE(!test_stream->aligned_in_file_data ||
|
| coded_size == test_stream->coded_size);
|
| test_stream->coded_size = coded_size;
|
|
|
| @@ -270,8 +270,9 @@ static void CreateAlignedInputStreamFile(const gfx::Size& coded_size,
|
| test_stream->num_frames = src_file_size / visible_buffer_size;
|
|
|
| LOG_ASSERT(test_stream->aligned_buffer_size > 0UL);
|
| - test_stream->aligned_in_file_data.resize(test_stream->aligned_buffer_size *
|
| - test_stream->num_frames);
|
| + test_stream->aligned_in_file_data = reinterpret_cast<char*>(
|
| + base::AlignedAlloc(
|
| + test_stream->aligned_buffer_size * test_stream->num_frames, 64));
|
|
|
| base::File src(src_file, base::File::FLAG_OPEN | base::File::FLAG_READ);
|
| std::vector<char> src_data(visible_buffer_size);
|
| @@ -298,8 +299,7 @@ static void CreateAlignedInputStreamFile(const gfx::Size& coded_size,
|
|
|
| // Assert that memory mapped of file starts at 64 byte boundary. So each
|
| // plane of frames also start at 64 byte boundary.
|
| - ASSERT_EQ(reinterpret_cast<off_t>(&test_stream->aligned_in_file_data[0]) & 63,
|
| - 0)
|
| + ASSERT_EQ(reinterpret_cast<off_t>(test_stream->aligned_in_file_data) & 63, 0)
|
| << "File should be mapped at a 64 byte boundary";
|
|
|
| LOG_ASSERT(test_stream->num_frames > 0UL);
|
| @@ -338,6 +338,7 @@ static void ParseAndReadTestStreamData(const base::FilePath::StringType& data,
|
| LOG_ASSERT(profile > VIDEO_CODEC_PROFILE_UNKNOWN);
|
| LOG_ASSERT(profile <= VIDEO_CODEC_PROFILE_MAX);
|
| test_stream->requested_profile = static_cast<VideoCodecProfile>(profile);
|
| + test_stream->aligned_in_file_data = NULL;
|
|
|
| if (fields.size() >= 5 && !fields[4].empty())
|
| test_stream->out_filename = fields[4];
|
| @@ -393,6 +394,9 @@ class VideoEncodeAcceleratorTestEnvironment : public ::testing::Environment {
|
| }
|
|
|
| virtual void TearDown() {
|
| + for (size_t i = 0; i < test_streams_.size(); i++) {
|
| + base::AlignedFree(test_streams_[i]->aligned_in_file_data);
|
| + }
|
| log_file_.reset();
|
| }
|
|
|
| @@ -1297,7 +1301,7 @@ void VEAClient::InputNoLongerNeededCallback(int32_t input_id) {
|
|
|
| scoped_refptr<VideoFrame> VEAClient::CreateFrame(off_t position) {
|
| uint8_t* frame_data_y =
|
| - reinterpret_cast<uint8_t*>(&test_stream_->aligned_in_file_data[0]) +
|
| + reinterpret_cast<uint8_t*>(test_stream_->aligned_in_file_data) +
|
| position;
|
| uint8_t* frame_data_u = frame_data_y + test_stream_->aligned_plane_size[0];
|
| uint8_t* frame_data_v = frame_data_u + test_stream_->aligned_plane_size[1];
|
| @@ -1318,7 +1322,7 @@ scoped_refptr<VideoFrame> VEAClient::CreateFrame(off_t position) {
|
| scoped_refptr<VideoFrame> VEAClient::PrepareInputFrame(off_t position,
|
| int32_t* input_id) {
|
| CHECK_LE(position + test_stream_->aligned_buffer_size,
|
| - test_stream_->aligned_in_file_data.size());
|
| + test_stream_->aligned_buffer_size * test_stream_->num_frames);
|
|
|
| scoped_refptr<VideoFrame> frame = CreateFrame(position);
|
| EXPECT_TRUE(frame);
|
| @@ -1347,7 +1351,8 @@ void VEAClient::FeedEncoderWithOneInput() {
|
| return;
|
|
|
| size_t bytes_left =
|
| - test_stream_->aligned_in_file_data.size() - pos_in_input_stream_;
|
| + test_stream_->aligned_buffer_size * test_stream_->num_frames -
|
| + pos_in_input_stream_;
|
| if (bytes_left < test_stream_->aligned_buffer_size) {
|
| DCHECK_EQ(bytes_left, 0UL);
|
| // Rewind if at the end of stream and we are still encoding.
|
|
|