Chromium Code Reviews| Index: content/common/gpu/media/video_encode_accelerator_unittest.cc |
| diff --git a/content/common/gpu/media/video_encode_accelerator_unittest.cc b/content/common/gpu/media/video_encode_accelerator_unittest.cc |
| index 9d1e3b69dc7c9edccf56b2fab6021bc0fb20f23b..36571ac2686b480ad82a945b449997d1c99fc17c 100644 |
| --- a/content/common/gpu/media/video_encode_accelerator_unittest.cc |
| +++ b/content/common/gpu/media/video_encode_accelerator_unittest.cc |
| @@ -33,6 +33,8 @@ |
| #error The VideoEncodeAcceleratorUnittest is not supported on this platform. |
| #endif |
| +#define ALIGN_64_BYTES(x) (((x) + 63) & ~63) |
| + |
| using media::VideoEncodeAccelerator; |
| namespace content { |
| @@ -96,22 +98,122 @@ base::FilePath::StringType* g_test_stream_data; |
| struct TestStream { |
| TestStream() |
| - : requested_bitrate(0), |
| + : input_file(NULL), |
| + requested_bitrate(0), |
| requested_framerate(0), |
| requested_subsequent_bitrate(0), |
| requested_subsequent_framerate(0) {} |
| ~TestStream() {} |
| gfx::Size size; |
| - base::MemoryMappedFile input_file; |
| - media::VideoCodecProfile requested_profile; |
| + |
| + // Input file name and the file must be an I420 (YUV planar) raw stream. |
| + std::string in_filename; |
| + |
| + // The memory mapped of |temp_file| |
| + base::MemoryMappedFile* input_file; |
|
wuchengli
2014/09/01 08:20:42
Use scoped_ptr to indicate ownership.
henryhsu
2014/09/01 08:57:36
Done.
|
| + |
| + // A temporary file used to prepare input buffers. |
| + base::FilePath temp_file; |
| + |
| std::string out_filename; |
| + media::VideoCodecProfile requested_profile; |
| unsigned int requested_bitrate; |
| unsigned int requested_framerate; |
| unsigned int requested_subsequent_bitrate; |
| unsigned int requested_subsequent_framerate; |
| }; |
| +// ARM performs CPU cache management with CPU cache line granularity. We thus |
| +// need to ensure our buffers are CPU cache line-aligned (64 byte-aligned). |
| +// Otherwise newer kernels will refuse to accept them, and on older kernels |
| +// we'll be treating ourselves to random corruption. |
| +// Since we are just mmapping and passing chunks of the input file, to ensure |
| +// alignment, if the starting virtual addresses of YUV planes of the frames |
| +// in it were not 64 byte-aligned, we'd have to prepare a memory with 64 |
| +// byte-aligned starting address and make sure the addresses of YUV planes of |
| +// each frame are 64 byte-aligned before sending to the encoder. |
| +// Now we test resolutions different from coded size and prepare chunks before |
| +// encoding to avoid performance impact. |
| +// Use |visible_size| and |coded_size| to copy YUV data into memory from |
| +// |in_filename|. The copied result will be saved in |input_file|. Also |
| +// calculate the byte size of an input frame and set it to |coded_buffer_size|. |
| +// |temp_file| is used to prepare input buffers and will be deleted after test |
| +// finished. |
| +static void PrepareInputBuffers(const gfx::Size& visible_size, |
| + const gfx::Size& coded_size, |
| + const std::string in_filename, |
| + base::MemoryMappedFile* input_file, |
| + base::FilePath* temp_file, |
| + size_t* coded_buffer_size) { |
| + size_t input_num_planes = media::VideoFrame::NumPlanes(kInputFormat); |
| + std::vector<size_t> padding_sizes(input_num_planes); |
| + std::vector<size_t> coded_bpl(input_num_planes); |
| + std::vector<size_t> visible_bpl(input_num_planes); |
| + std::vector<size_t> visible_plane_rows(input_num_planes); |
| + |
| + // YUV plane starting address should be 64 bytes alignment. Calculate padding |
| + // size for each plane, and frame allocation size for coded size. Also store |
| + // bytes per line information of coded size and visible size. |
| + *coded_buffer_size = 0; |
| + for (off_t i = 0; i < input_num_planes; i++) { |
| + size_t size = |
| + media::VideoFrame::PlaneAllocationSize(kInputFormat, i, coded_size); |
| + size_t padding_bytes = ALIGN_64_BYTES(size) - size; |
| + *coded_buffer_size += ALIGN_64_BYTES(size); |
| + |
| + coded_bpl[i] = |
| + media::VideoFrame::RowBytes(i, kInputFormat, coded_size.width()); |
| + visible_bpl[i] = |
| + media::VideoFrame::RowBytes(i, kInputFormat, visible_size.width()); |
| + visible_plane_rows[i] = |
| + media::VideoFrame::Rows(i, kInputFormat, visible_size.height()); |
| + size_t padding_rows = |
| + media::VideoFrame::Rows(i, kInputFormat, coded_size.height()) - |
| + visible_plane_rows[i]; |
| + padding_sizes[i] = padding_rows * coded_bpl[i] + padding_bytes; |
| + } |
| + |
| + // Test case may have many encoders and memory should be prepared once. |
| + if (input_file && input_file->IsValid()) |
| + return; |
| + |
| + base::MemoryMappedFile src_file; |
| + CHECK(base::CreateTemporaryFile(temp_file)); |
| + CHECK(src_file.Initialize(base::FilePath(in_filename))); |
| + |
| + size_t visible_buffer_size = |
| + media::VideoFrame::AllocationSize(kInputFormat, visible_size); |
| + size_t num_frames = src_file.length() / visible_buffer_size; |
| + uint32 flags = base::File::FLAG_CREATE_ALWAYS | base::File::FLAG_WRITE | |
| + base::File::FLAG_READ; |
| + |
| + // Create a temporary file with coded_size length. |
| + base::File file(*temp_file, flags); |
| + file.SetLength(*coded_buffer_size * num_frames); |
| + CHECK(input_file->Initialize(file.Pass(), true)); |
| + |
| + off_t src_offset = 0, dest_offset = 0; |
| + while (src_offset < static_cast<off_t>(src_file.length())) { |
| + for (off_t i = 0; i < input_num_planes; i++) { |
| +#if defined(ARCH_CPU_ARMEL) |
| + // Assert that each plane of frame starts at 64-byte boundary. |
| + const uint8* ptr = input_file->data() + dest_offset; |
| + ASSERT_EQ(reinterpret_cast<off_t>(ptr) & 63, 0) |
| + << "Planes of frame should be mapped at a 64 byte boundary"; |
| +#endif |
| + for (off_t j = 0; j < visible_plane_rows[i]; j++) { |
| + const uint8* src = src_file.data() + src_offset; |
| + uint8* dest = input_file->data() + dest_offset; |
| + memcpy(dest, src, visible_bpl[i]); |
| + src_offset += visible_bpl[i]; |
| + dest_offset += coded_bpl[i]; |
| + } |
| + dest_offset += padding_sizes[i]; |
| + } |
| + } |
| +} |
| + |
| // Parse |data| into its constituent parts, set the various output fields |
| // accordingly, read in video stream, and store them to |test_streams|. |
| static void ParseAndReadTestStreamData(const base::FilePath::StringType& data, |
| @@ -128,8 +230,9 @@ static void ParseAndReadTestStreamData(const base::FilePath::StringType& data, |
| CHECK_GE(fields.size(), 4U) << data; |
| CHECK_LE(fields.size(), 9U) << data; |
| TestStream* test_stream = new TestStream(); |
| + test_stream->input_file = new base::MemoryMappedFile(); |
| - base::FilePath::StringType filename = fields[0]; |
| + test_stream->in_filename = fields[0]; |
| int width, height; |
| CHECK(base::StringToInt(fields[1], &width)); |
| CHECK(base::StringToInt(fields[2], &height)); |
| @@ -161,7 +264,6 @@ static void ParseAndReadTestStreamData(const base::FilePath::StringType& data, |
| &test_stream->requested_subsequent_framerate)); |
| } |
| - CHECK(test_stream->input_file.Initialize(base::FilePath(filename))); |
| test_streams->push_back(test_stream); |
| } |
| } |
| @@ -368,7 +470,7 @@ scoped_ptr<StreamValidator> StreamValidator::Create( |
| class VEAClient : public VideoEncodeAccelerator::Client { |
| public: |
| - VEAClient(const TestStream& test_stream, |
| + VEAClient(TestStream& test_stream, |
| ClientStateNotification<ClientState>* note, |
| bool save_to_file, |
| unsigned int keyframe_period, |
| @@ -430,7 +532,7 @@ class VEAClient : public VideoEncodeAccelerator::Client { |
| ClientState state_; |
| scoped_ptr<VideoEncodeAccelerator> encoder_; |
| - const TestStream& test_stream_; |
| + TestStream& test_stream_; |
| // Used to notify another thread about the state. VEAClient does not own this. |
| ClientStateNotification<ClientState>* note_; |
| @@ -507,7 +609,7 @@ class VEAClient : public VideoEncodeAccelerator::Client { |
| base::ThreadChecker thread_checker_; |
| }; |
| -VEAClient::VEAClient(const TestStream& test_stream, |
| +VEAClient::VEAClient(TestStream& test_stream, |
| ClientStateNotification<ClientState>* note, |
| bool save_to_file, |
| unsigned int keyframe_period, |
| @@ -552,29 +654,6 @@ VEAClient::VEAClient(const TestStream& test_stream, |
| EXPECT_EQ(0, base::WriteFile(out_filename, NULL, 0)); |
| } |
| - input_buffer_size_ = |
| - media::VideoFrame::AllocationSize(kInputFormat, test_stream.size); |
| - CHECK_GT(input_buffer_size_, 0UL); |
| - |
| - // Calculate the number of frames in the input stream by dividing its length |
| - // in bytes by frame size in bytes. |
| - CHECK_EQ(test_stream_.input_file.length() % input_buffer_size_, 0U) |
| - << "Stream byte size is not a product of calculated frame byte size"; |
| - num_frames_in_stream_ = test_stream_.input_file.length() / input_buffer_size_; |
| - CHECK_GT(num_frames_in_stream_, 0UL); |
| - CHECK_LE(num_frames_in_stream_, kMaxFrameNum); |
| - |
| - // We may need to loop over the stream more than once if more frames than |
| - // provided is required for bitrate tests. |
| - if (force_bitrate_ && num_frames_in_stream_ < kMinFramesForBitrateTests) { |
| - DVLOG(1) << "Stream too short for bitrate test (" << num_frames_in_stream_ |
| - << " frames), will loop it to reach " << kMinFramesForBitrateTests |
| - << " frames"; |
| - num_frames_to_encode_ = kMinFramesForBitrateTests; |
| - } else { |
| - num_frames_to_encode_ = num_frames_in_stream_; |
| - } |
| - |
| thread_checker_.DetachFromThread(); |
| } |
| @@ -629,35 +708,35 @@ void VEAClient::RequireBitstreamBuffers(unsigned int input_count, |
| ASSERT_EQ(state_, CS_INITIALIZED); |
| SetState(CS_ENCODING); |
| - // TODO(posciak): For now we only support input streams that meet encoder |
| - // size requirements exactly (i.e. coded size == visible size), so that we |
| - // can simply mmap the stream file and feed the encoder directly with chunks |
| - // of that, instead of memcpying from mmapped file into a separate set of |
| - // input buffers that would meet the coded size and alignment requirements. |
| - // If/when this is changed, the ARM-specific alignment check below should be |
| - // redone as well. |
| - input_coded_size_ = input_coded_size; |
| - ASSERT_EQ(input_coded_size_, test_stream_.size); |
| -#if defined(ARCH_CPU_ARMEL) |
| - // ARM performs CPU cache management with CPU cache line granularity. We thus |
| - // need to ensure our buffers are CPU cache line-aligned (64 byte-aligned). |
| - // Otherwise newer kernels will refuse to accept them, and on older kernels |
| - // we'll be treating ourselves to random corruption. |
| - // Since we are just mmapping and passing chunks of the input file, to ensure |
| - // alignment, if the starting virtual addresses of the frames in it were not |
| - // 64 byte-aligned, we'd have to use a separate set of input buffers and copy |
| - // the frames into them before sending to the encoder. It would have been an |
| - // overkill here though, because, for now at least, we only test resolutions |
| - // that result in proper alignment, and it would have also interfered with |
| - // performance testing. So just assert that the frame size is a multiple of |
| - // 64 bytes. This ensures all frames start at 64-byte boundary, because |
| - // MemoryMappedFile should be mmapp()ed at virtual page start as well. |
| - ASSERT_EQ(input_buffer_size_ & 63, 0u) |
| - << "Frame size has to be a multiple of 64 bytes"; |
| - ASSERT_EQ(reinterpret_cast<off_t>(test_stream_.input_file.data()) & 63, 0) |
| - << "Mapped file should be mapped at a 64 byte boundary"; |
| -#endif |
| + PrepareInputBuffers(test_stream_.size, |
| + input_coded_size, |
| + test_stream_.in_filename, |
| + test_stream_.input_file, |
| + &test_stream_.temp_file, |
| + &input_buffer_size_); |
| + CHECK_GT(input_buffer_size_, 0UL); |
| + |
| + // Calculate the number of frames in the input stream by dividing its length |
| + // in bytes by frame size in bytes. |
| + CHECK_EQ(test_stream_.input_file->length() % input_buffer_size_, 0U) |
| + << "Stream byte size is not a product of calculated frame byte size"; |
| + num_frames_in_stream_ = |
| + test_stream_.input_file->length() / input_buffer_size_; |
| + CHECK_GT(num_frames_in_stream_, 0UL); |
| + CHECK_LE(num_frames_in_stream_, kMaxFrameNum); |
| + |
| + // We may need to loop over the stream more than once if more frames than |
| + // provided is required for bitrate tests. |
| + if (force_bitrate_ && num_frames_in_stream_ < kMinFramesForBitrateTests) { |
| + DVLOG(1) << "Stream too short for bitrate test (" << num_frames_in_stream_ |
| + << " frames), will loop it to reach " << kMinFramesForBitrateTests |
| + << " frames"; |
| + num_frames_to_encode_ = kMinFramesForBitrateTests; |
| + } else { |
| + num_frames_to_encode_ = num_frames_in_stream_; |
| + } |
| + input_coded_size_ = input_coded_size; |
| num_required_input_buffers_ = input_count; |
| ASSERT_GT(num_required_input_buffers_, 0UL); |
| @@ -741,10 +820,16 @@ void VEAClient::InputNoLongerNeededCallback(int32 input_id) { |
| } |
| scoped_refptr<media::VideoFrame> VEAClient::PrepareInputFrame(off_t position) { |
| - CHECK_LE(position + input_buffer_size_, test_stream_.input_file.length()); |
| + CHECK_LE(position + input_buffer_size_, test_stream_.input_file->length()); |
| - uint8* frame_data = |
| - const_cast<uint8*>(test_stream_.input_file.data() + position); |
| + uint8* frame_data_y = |
| + const_cast<uint8*>(test_stream_.input_file->data() + position); |
| + uint8* frame_data_u = |
| + frame_data_y + ALIGN_64_BYTES(media::VideoFrame::PlaneAllocationSize( |
| + kInputFormat, 0, input_coded_size_)); |
| + uint8* frame_data_v = |
| + frame_data_u + ALIGN_64_BYTES(media::VideoFrame::PlaneAllocationSize( |
| + kInputFormat, 1, input_coded_size_)); |
| CHECK_GT(current_framerate_, 0U); |
| scoped_refptr<media::VideoFrame> frame = |
| @@ -756,9 +841,9 @@ scoped_refptr<media::VideoFrame> VEAClient::PrepareInputFrame(off_t position) { |
| input_coded_size_.width(), |
| input_coded_size_.width() / 2, |
| input_coded_size_.width() / 2, |
| - frame_data, |
| - frame_data + input_coded_size_.GetArea(), |
| - frame_data + (input_coded_size_.GetArea() * 5 / 4), |
| + frame_data_y, |
| + frame_data_u, |
| + frame_data_v, |
| base::TimeDelta().FromMilliseconds( |
| next_input_id_ * base::Time::kMillisecondsPerSecond / |
| current_framerate_), |
| @@ -782,7 +867,8 @@ void VEAClient::FeedEncoderWithInputs() { |
| while (inputs_at_client_.size() < |
| num_required_input_buffers_ + kNumExtraInputFrames) { |
| - size_t bytes_left = test_stream_.input_file.length() - pos_in_input_stream_; |
| + size_t bytes_left = |
| + test_stream_.input_file->length() - pos_in_input_stream_; |
| if (bytes_left < input_buffer_size_) { |
| DCHECK_EQ(bytes_left, 0UL); |
| // Rewind if at the end of stream and we are still encoding. |
| @@ -975,6 +1061,11 @@ TEST_P(VideoEncodeAcceleratorTest, TestSimpleEncode) { |
| // This ensures all tasks have finished. |
| encoder_thread.Stop(); |
| + |
| + for (size_t i = 0; i < test_streams.size(); i++) { |
| + delete test_streams[i]->input_file; |
| + base::DeleteFile(test_streams[i]->temp_file, false); |
| + } |
| } |
| INSTANTIATE_TEST_CASE_P( |