OLD | NEW |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/at_exit.h" | 5 #include "base/at_exit.h" |
6 #include "base/bind.h" | 6 #include "base/bind.h" |
7 #include "base/command_line.h" | 7 #include "base/command_line.h" |
8 #include "base/file_util.h" | 8 #include "base/file_util.h" |
9 #include "base/files/memory_mapped_file.h" | 9 #include "base/files/memory_mapped_file.h" |
10 #include "base/memory/scoped_vector.h" | 10 #include "base/memory/scoped_vector.h" |
(...skipping 15 matching lines...) Expand all Loading... |
26 #endif | 26 #endif |
27 | 27 |
28 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL) | 28 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL) |
29 #include "content/common/gpu/media/v4l2_video_encode_accelerator.h" | 29 #include "content/common/gpu/media/v4l2_video_encode_accelerator.h" |
30 #elif defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY) && defined(USE_X11) | 30 #elif defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY) && defined(USE_X11) |
31 #include "content/common/gpu/media/vaapi_video_encode_accelerator.h" | 31 #include "content/common/gpu/media/vaapi_video_encode_accelerator.h" |
32 #else | 32 #else |
33 #error The VideoEncodeAcceleratorUnittest is not supported on this platform. | 33 #error The VideoEncodeAcceleratorUnittest is not supported on this platform. |
34 #endif | 34 #endif |
35 | 35 |
| 36 #define ALIGN_64_BYTES(x) (((x) + 63) & ~63) |
| 37 |
36 using media::VideoEncodeAccelerator; | 38 using media::VideoEncodeAccelerator; |
37 | 39 |
38 namespace content { | 40 namespace content { |
39 namespace { | 41 namespace { |
40 | 42 |
41 const media::VideoFrame::Format kInputFormat = media::VideoFrame::I420; | 43 const media::VideoFrame::Format kInputFormat = media::VideoFrame::I420; |
42 | 44 |
43 // Arbitrarily chosen to add some depth to the pipeline. | 45 // Arbitrarily chosen to add some depth to the pipeline. |
44 const unsigned int kNumOutputBuffers = 4; | 46 const unsigned int kNumOutputBuffers = 4; |
45 const unsigned int kNumExtraInputFrames = 4; | 47 const unsigned int kNumExtraInputFrames = 4; |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
96 | 98 |
97 struct TestStream { | 99 struct TestStream { |
98 TestStream() | 100 TestStream() |
99 : requested_bitrate(0), | 101 : requested_bitrate(0), |
100 requested_framerate(0), | 102 requested_framerate(0), |
101 requested_subsequent_bitrate(0), | 103 requested_subsequent_bitrate(0), |
102 requested_subsequent_framerate(0) {} | 104 requested_subsequent_framerate(0) {} |
103 ~TestStream() {} | 105 ~TestStream() {} |
104 | 106 |
105 gfx::Size size; | 107 gfx::Size size; |
106 base::MemoryMappedFile input_file; | 108 |
| 109 // Input file name and the file must be an I420 (YUV planar) raw stream. |
| 110 std::string in_filename; |
| 111 |
| 112 // The memory mapped of |temp_file| |
| 113 scoped_ptr<base::MemoryMappedFile> input_file; |
| 114 |
| 115 // A temporary file used to prepare input buffers. |
| 116 base::FilePath temp_file; |
| 117 |
| 118 std::string out_filename; |
107 media::VideoCodecProfile requested_profile; | 119 media::VideoCodecProfile requested_profile; |
108 std::string out_filename; | |
109 unsigned int requested_bitrate; | 120 unsigned int requested_bitrate; |
110 unsigned int requested_framerate; | 121 unsigned int requested_framerate; |
111 unsigned int requested_subsequent_bitrate; | 122 unsigned int requested_subsequent_bitrate; |
112 unsigned int requested_subsequent_framerate; | 123 unsigned int requested_subsequent_framerate; |
113 }; | 124 }; |
114 | 125 |
| 126 // ARM performs CPU cache management with CPU cache line granularity. We thus |
| 127 // need to ensure our buffers are CPU cache line-aligned (64 byte-aligned). |
| 128 // Otherwise newer kernels will refuse to accept them, and on older kernels |
| 129 // we'll be treating ourselves to random corruption. |
| 130 // Since we are just mmapping and passing chunks of the input file, to ensure |
| 131 // alignment, if the starting virtual addresses of YUV planes of the frames |
| 132 // in it were not 64 byte-aligned, we'd have to prepare a memory with 64 |
| 133 // byte-aligned starting address and make sure the addresses of YUV planes of |
| 134 // each frame are 64 byte-aligned before sending to the encoder. |
| 135 // Now we test resolutions different from coded size and prepare chunks before |
| 136 // encoding to avoid performance impact. |
| 137 // Use |visible_size| and |coded_size| to copy YUV data into memory from |
| 138 // |in_filename|. The copied result will be saved in |input_file|. Also |
| 139 // calculate the byte size of an input frame and set it to |coded_buffer_size|. |
| 140 // |temp_file| is used to prepare input buffers and will be deleted after test |
| 141 // finished. |
| 142 static void PrepareInputBuffers(const gfx::Size& visible_size, |
| 143 const gfx::Size& coded_size, |
| 144 const std::string in_filename, |
| 145 base::MemoryMappedFile* input_file, |
| 146 base::FilePath* temp_file, |
| 147 size_t* coded_buffer_size) { |
| 148 size_t input_num_planes = media::VideoFrame::NumPlanes(kInputFormat); |
| 149 std::vector<size_t> padding_sizes(input_num_planes); |
| 150 std::vector<size_t> coded_bpl(input_num_planes); |
| 151 std::vector<size_t> visible_bpl(input_num_planes); |
| 152 std::vector<size_t> visible_plane_rows(input_num_planes); |
| 153 |
| 154 // YUV plane starting address should be 64 bytes alignment. Calculate padding |
| 155 // size for each plane, and frame allocation size for coded size. Also store |
| 156 // bytes per line information of coded size and visible size. |
| 157 *coded_buffer_size = 0; |
| 158 for (off_t i = 0; i < input_num_planes; i++) { |
| 159 size_t size = |
| 160 media::VideoFrame::PlaneAllocationSize(kInputFormat, i, coded_size); |
| 161 size_t padding_bytes = ALIGN_64_BYTES(size) - size; |
| 162 *coded_buffer_size += ALIGN_64_BYTES(size); |
| 163 |
| 164 coded_bpl[i] = |
| 165 media::VideoFrame::RowBytes(i, kInputFormat, coded_size.width()); |
| 166 visible_bpl[i] = |
| 167 media::VideoFrame::RowBytes(i, kInputFormat, visible_size.width()); |
| 168 visible_plane_rows[i] = |
| 169 media::VideoFrame::Rows(i, kInputFormat, visible_size.height()); |
| 170 size_t padding_rows = |
| 171 media::VideoFrame::Rows(i, kInputFormat, coded_size.height()) - |
| 172 visible_plane_rows[i]; |
| 173 padding_sizes[i] = padding_rows * coded_bpl[i] + padding_bytes; |
| 174 } |
| 175 |
| 176 // Test case may have many encoders and memory should be prepared once. |
| 177 if (input_file && input_file->IsValid()) |
| 178 return; |
| 179 |
| 180 base::MemoryMappedFile src_file; |
| 181 CHECK(base::CreateTemporaryFile(temp_file)); |
| 182 CHECK(src_file.Initialize(base::FilePath(in_filename))); |
| 183 |
| 184 size_t visible_buffer_size = |
| 185 media::VideoFrame::AllocationSize(kInputFormat, visible_size); |
| 186 size_t num_frames = src_file.length() / visible_buffer_size; |
| 187 uint32 flags = base::File::FLAG_CREATE_ALWAYS | base::File::FLAG_WRITE | |
| 188 base::File::FLAG_READ; |
| 189 |
| 190 // Create a temporary file with coded_size length. |
| 191 base::File file(*temp_file, flags); |
| 192 file.SetLength(*coded_buffer_size * num_frames); |
| 193 CHECK(input_file->Initialize(file.Pass(), true)); |
| 194 |
| 195 off_t src_offset = 0, dest_offset = 0; |
| 196 while (src_offset < static_cast<off_t>(src_file.length())) { |
| 197 for (off_t i = 0; i < input_num_planes; i++) { |
| 198 #if defined(ARCH_CPU_ARMEL) |
| 199 // Assert that each plane of frame starts at 64-byte boundary. |
| 200 const uint8* ptr = input_file->data() + dest_offset; |
| 201 ASSERT_EQ(reinterpret_cast<off_t>(ptr) & 63, 0) |
| 202 << "Planes of frame should be mapped at a 64 byte boundary"; |
| 203 #endif |
| 204 for (off_t j = 0; j < visible_plane_rows[i]; j++) { |
| 205 const uint8* src = src_file.data() + src_offset; |
| 206 uint8* dest = input_file->data() + dest_offset; |
| 207 memcpy(dest, src, visible_bpl[i]); |
| 208 src_offset += visible_bpl[i]; |
| 209 dest_offset += coded_bpl[i]; |
| 210 } |
| 211 dest_offset += padding_sizes[i]; |
| 212 } |
| 213 } |
| 214 } |
| 215 |
115 // Parse |data| into its constituent parts, set the various output fields | 216 // Parse |data| into its constituent parts, set the various output fields |
116 // accordingly, read in video stream, and store them to |test_streams|. | 217 // accordingly, read in video stream, and store them to |test_streams|. |
117 static void ParseAndReadTestStreamData(const base::FilePath::StringType& data, | 218 static void ParseAndReadTestStreamData(const base::FilePath::StringType& data, |
118 ScopedVector<TestStream>* test_streams) { | 219 ScopedVector<TestStream>* test_streams) { |
119 // Split the string to individual test stream data. | 220 // Split the string to individual test stream data. |
120 std::vector<base::FilePath::StringType> test_streams_data; | 221 std::vector<base::FilePath::StringType> test_streams_data; |
121 base::SplitString(data, ';', &test_streams_data); | 222 base::SplitString(data, ';', &test_streams_data); |
122 CHECK_GE(test_streams_data.size(), 1U) << data; | 223 CHECK_GE(test_streams_data.size(), 1U) << data; |
123 | 224 |
124 // Parse each test stream data and read the input file. | 225 // Parse each test stream data and read the input file. |
125 for (size_t index = 0; index < test_streams_data.size(); ++index) { | 226 for (size_t index = 0; index < test_streams_data.size(); ++index) { |
126 std::vector<base::FilePath::StringType> fields; | 227 std::vector<base::FilePath::StringType> fields; |
127 base::SplitString(test_streams_data[index], ':', &fields); | 228 base::SplitString(test_streams_data[index], ':', &fields); |
128 CHECK_GE(fields.size(), 4U) << data; | 229 CHECK_GE(fields.size(), 4U) << data; |
129 CHECK_LE(fields.size(), 9U) << data; | 230 CHECK_LE(fields.size(), 9U) << data; |
130 TestStream* test_stream = new TestStream(); | 231 TestStream* test_stream = new TestStream(); |
| 232 test_stream->input_file.reset(new base::MemoryMappedFile()); |
131 | 233 |
132 base::FilePath::StringType filename = fields[0]; | 234 test_stream->in_filename = fields[0]; |
133 int width, height; | 235 int width, height; |
134 CHECK(base::StringToInt(fields[1], &width)); | 236 CHECK(base::StringToInt(fields[1], &width)); |
135 CHECK(base::StringToInt(fields[2], &height)); | 237 CHECK(base::StringToInt(fields[2], &height)); |
136 test_stream->size = gfx::Size(width, height); | 238 test_stream->size = gfx::Size(width, height); |
137 CHECK(!test_stream->size.IsEmpty()); | 239 CHECK(!test_stream->size.IsEmpty()); |
138 int profile; | 240 int profile; |
139 CHECK(base::StringToInt(fields[3], &profile)); | 241 CHECK(base::StringToInt(fields[3], &profile)); |
140 CHECK_GT(profile, media::VIDEO_CODEC_PROFILE_UNKNOWN); | 242 CHECK_GT(profile, media::VIDEO_CODEC_PROFILE_UNKNOWN); |
141 CHECK_LE(profile, media::VIDEO_CODEC_PROFILE_MAX); | 243 CHECK_LE(profile, media::VIDEO_CODEC_PROFILE_MAX); |
142 test_stream->requested_profile = | 244 test_stream->requested_profile = |
(...skipping 11 matching lines...) Expand all Loading... |
154 if (fields.size() >= 8 && !fields[7].empty()) { | 256 if (fields.size() >= 8 && !fields[7].empty()) { |
155 CHECK(base::StringToUint(fields[7], | 257 CHECK(base::StringToUint(fields[7], |
156 &test_stream->requested_subsequent_bitrate)); | 258 &test_stream->requested_subsequent_bitrate)); |
157 } | 259 } |
158 | 260 |
159 if (fields.size() >= 9 && !fields[8].empty()) { | 261 if (fields.size() >= 9 && !fields[8].empty()) { |
160 CHECK(base::StringToUint(fields[8], | 262 CHECK(base::StringToUint(fields[8], |
161 &test_stream->requested_subsequent_framerate)); | 263 &test_stream->requested_subsequent_framerate)); |
162 } | 264 } |
163 | 265 |
164 CHECK(test_stream->input_file.Initialize(base::FilePath(filename))); | |
165 test_streams->push_back(test_stream); | 266 test_streams->push_back(test_stream); |
166 } | 267 } |
167 } | 268 } |
168 | 269 |
169 // Set default parameters of |test_streams| and update the parameters according | 270 // Set default parameters of |test_streams| and update the parameters according |
170 // to |mid_stream_bitrate_switch| and |mid_stream_framerate_switch|. | 271 // to |mid_stream_bitrate_switch| and |mid_stream_framerate_switch|. |
171 static void UpdateTestStreamData(bool mid_stream_bitrate_switch, | 272 static void UpdateTestStreamData(bool mid_stream_bitrate_switch, |
172 bool mid_stream_framerate_switch, | 273 bool mid_stream_framerate_switch, |
173 ScopedVector<TestStream>* test_streams) { | 274 ScopedVector<TestStream>* test_streams) { |
174 for (size_t i = 0; i < test_streams->size(); i++) { | 275 for (size_t i = 0; i < test_streams->size(); i++) { |
(...skipping 186 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
361 validator.reset(new VP8Validator(frame_cb)); | 462 validator.reset(new VP8Validator(frame_cb)); |
362 } else { | 463 } else { |
363 LOG(FATAL) << "Unsupported profile: " << profile; | 464 LOG(FATAL) << "Unsupported profile: " << profile; |
364 } | 465 } |
365 | 466 |
366 return validator.Pass(); | 467 return validator.Pass(); |
367 } | 468 } |
368 | 469 |
369 class VEAClient : public VideoEncodeAccelerator::Client { | 470 class VEAClient : public VideoEncodeAccelerator::Client { |
370 public: | 471 public: |
371 VEAClient(const TestStream& test_stream, | 472 VEAClient(TestStream& test_stream, |
372 ClientStateNotification<ClientState>* note, | 473 ClientStateNotification<ClientState>* note, |
373 bool save_to_file, | 474 bool save_to_file, |
374 unsigned int keyframe_period, | 475 unsigned int keyframe_period, |
375 bool force_bitrate, | 476 bool force_bitrate, |
376 bool test_perf); | 477 bool test_perf); |
377 virtual ~VEAClient(); | 478 virtual ~VEAClient(); |
378 void CreateEncoder(); | 479 void CreateEncoder(); |
379 void DestroyEncoder(); | 480 void DestroyEncoder(); |
380 | 481 |
381 // Return the number of encoded frames per second. | 482 // Return the number of encoded frames per second. |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
423 // the performance test. | 524 // the performance test. |
424 void VerifyPerf(); | 525 void VerifyPerf(); |
425 | 526 |
426 // Prepare and return a frame wrapping the data at |position| bytes in | 527 // Prepare and return a frame wrapping the data at |position| bytes in |
427 // the input stream, ready to be sent to encoder. | 528 // the input stream, ready to be sent to encoder. |
428 scoped_refptr<media::VideoFrame> PrepareInputFrame(off_t position); | 529 scoped_refptr<media::VideoFrame> PrepareInputFrame(off_t position); |
429 | 530 |
430 ClientState state_; | 531 ClientState state_; |
431 scoped_ptr<VideoEncodeAccelerator> encoder_; | 532 scoped_ptr<VideoEncodeAccelerator> encoder_; |
432 | 533 |
433 const TestStream& test_stream_; | 534 TestStream& test_stream_; |
434 // Used to notify another thread about the state. VEAClient does not own this. | 535 // Used to notify another thread about the state. VEAClient does not own this. |
435 ClientStateNotification<ClientState>* note_; | 536 ClientStateNotification<ClientState>* note_; |
436 | 537 |
437 // Ids assigned to VideoFrames (start at 1 for easy comparison with | 538 // Ids assigned to VideoFrames (start at 1 for easy comparison with |
438 // num_encoded_frames_). | 539 // num_encoded_frames_). |
439 std::set<int32> inputs_at_client_; | 540 std::set<int32> inputs_at_client_; |
440 int32 next_input_id_; | 541 int32 next_input_id_; |
441 | 542 |
442 // Ids for output BitstreamBuffers. | 543 // Ids for output BitstreamBuffers. |
443 typedef std::map<int32, base::SharedMemory*> IdToSHM; | 544 typedef std::map<int32, base::SharedMemory*> IdToSHM; |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
500 // The time when the encoding started. | 601 // The time when the encoding started. |
501 base::TimeTicks encode_start_time_; | 602 base::TimeTicks encode_start_time_; |
502 | 603 |
503 // The time when the last encoded frame is ready. | 604 // The time when the last encoded frame is ready. |
504 base::TimeTicks last_frame_ready_time_; | 605 base::TimeTicks last_frame_ready_time_; |
505 | 606 |
506 // All methods of this class should be run on the same thread. | 607 // All methods of this class should be run on the same thread. |
507 base::ThreadChecker thread_checker_; | 608 base::ThreadChecker thread_checker_; |
508 }; | 609 }; |
509 | 610 |
510 VEAClient::VEAClient(const TestStream& test_stream, | 611 VEAClient::VEAClient(TestStream& test_stream, |
511 ClientStateNotification<ClientState>* note, | 612 ClientStateNotification<ClientState>* note, |
512 bool save_to_file, | 613 bool save_to_file, |
513 unsigned int keyframe_period, | 614 unsigned int keyframe_period, |
514 bool force_bitrate, | 615 bool force_bitrate, |
515 bool test_perf) | 616 bool test_perf) |
516 : state_(CS_CREATED), | 617 : state_(CS_CREATED), |
517 test_stream_(test_stream), | 618 test_stream_(test_stream), |
518 note_(note), | 619 note_(note), |
519 next_input_id_(1), | 620 next_input_id_(1), |
520 next_output_buffer_id_(0), | 621 next_output_buffer_id_(0), |
(...skipping 24 matching lines...) Expand all Loading... |
545 CHECK(validator_.get()); | 646 CHECK(validator_.get()); |
546 | 647 |
547 if (save_to_file_) { | 648 if (save_to_file_) { |
548 CHECK(!test_stream_.out_filename.empty()); | 649 CHECK(!test_stream_.out_filename.empty()); |
549 base::FilePath out_filename(test_stream_.out_filename); | 650 base::FilePath out_filename(test_stream_.out_filename); |
550 // This creates or truncates out_filename. | 651 // This creates or truncates out_filename. |
551 // Without it, AppendToFile() will not work. | 652 // Without it, AppendToFile() will not work. |
552 EXPECT_EQ(0, base::WriteFile(out_filename, NULL, 0)); | 653 EXPECT_EQ(0, base::WriteFile(out_filename, NULL, 0)); |
553 } | 654 } |
554 | 655 |
555 input_buffer_size_ = | |
556 media::VideoFrame::AllocationSize(kInputFormat, test_stream.size); | |
557 CHECK_GT(input_buffer_size_, 0UL); | |
558 | |
559 // Calculate the number of frames in the input stream by dividing its length | |
560 // in bytes by frame size in bytes. | |
561 CHECK_EQ(test_stream_.input_file.length() % input_buffer_size_, 0U) | |
562 << "Stream byte size is not a product of calculated frame byte size"; | |
563 num_frames_in_stream_ = test_stream_.input_file.length() / input_buffer_size_; | |
564 CHECK_GT(num_frames_in_stream_, 0UL); | |
565 CHECK_LE(num_frames_in_stream_, kMaxFrameNum); | |
566 | |
567 // We may need to loop over the stream more than once if more frames than | |
568 // provided is required for bitrate tests. | |
569 if (force_bitrate_ && num_frames_in_stream_ < kMinFramesForBitrateTests) { | |
570 DVLOG(1) << "Stream too short for bitrate test (" << num_frames_in_stream_ | |
571 << " frames), will loop it to reach " << kMinFramesForBitrateTests | |
572 << " frames"; | |
573 num_frames_to_encode_ = kMinFramesForBitrateTests; | |
574 } else { | |
575 num_frames_to_encode_ = num_frames_in_stream_; | |
576 } | |
577 | |
578 thread_checker_.DetachFromThread(); | 656 thread_checker_.DetachFromThread(); |
579 } | 657 } |
580 | 658 |
581 VEAClient::~VEAClient() { CHECK(!has_encoder()); } | 659 VEAClient::~VEAClient() { CHECK(!has_encoder()); } |
582 | 660 |
583 void VEAClient::CreateEncoder() { | 661 void VEAClient::CreateEncoder() { |
584 DCHECK(thread_checker_.CalledOnValidThread()); | 662 DCHECK(thread_checker_.CalledOnValidThread()); |
585 CHECK(!has_encoder()); | 663 CHECK(!has_encoder()); |
586 | 664 |
587 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL) | 665 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL) |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
622 return num_encoded_frames_ / duration.InSecondsF(); | 700 return num_encoded_frames_ / duration.InSecondsF(); |
623 } | 701 } |
624 | 702 |
625 void VEAClient::RequireBitstreamBuffers(unsigned int input_count, | 703 void VEAClient::RequireBitstreamBuffers(unsigned int input_count, |
626 const gfx::Size& input_coded_size, | 704 const gfx::Size& input_coded_size, |
627 size_t output_size) { | 705 size_t output_size) { |
628 DCHECK(thread_checker_.CalledOnValidThread()); | 706 DCHECK(thread_checker_.CalledOnValidThread()); |
629 ASSERT_EQ(state_, CS_INITIALIZED); | 707 ASSERT_EQ(state_, CS_INITIALIZED); |
630 SetState(CS_ENCODING); | 708 SetState(CS_ENCODING); |
631 | 709 |
632 // TODO(posciak): For now we only support input streams that meet encoder | 710 PrepareInputBuffers(test_stream_.size, |
633 // size requirements exactly (i.e. coded size == visible size), so that we | 711 input_coded_size, |
634 // can simply mmap the stream file and feed the encoder directly with chunks | 712 test_stream_.in_filename, |
635 // of that, instead of memcpying from mmapped file into a separate set of | 713 test_stream_.input_file.get(), |
636 // input buffers that would meet the coded size and alignment requirements. | 714 &test_stream_.temp_file, |
637 // If/when this is changed, the ARM-specific alignment check below should be | 715 &input_buffer_size_); |
638 // redone as well. | 716 CHECK_GT(input_buffer_size_, 0UL); |
| 717 |
| 718 // Calculate the number of frames in the input stream by dividing its length |
| 719 // in bytes by frame size in bytes. |
| 720 CHECK_EQ(test_stream_.input_file->length() % input_buffer_size_, 0U) |
| 721 << "Stream byte size is not a product of calculated frame byte size"; |
| 722 num_frames_in_stream_ = |
| 723 test_stream_.input_file->length() / input_buffer_size_; |
| 724 CHECK_GT(num_frames_in_stream_, 0UL); |
| 725 CHECK_LE(num_frames_in_stream_, kMaxFrameNum); |
| 726 |
| 727 // We may need to loop over the stream more than once if more frames than |
| 728 // provided is required for bitrate tests. |
| 729 if (force_bitrate_ && num_frames_in_stream_ < kMinFramesForBitrateTests) { |
| 730 DVLOG(1) << "Stream too short for bitrate test (" << num_frames_in_stream_ |
| 731 << " frames), will loop it to reach " << kMinFramesForBitrateTests |
| 732 << " frames"; |
| 733 num_frames_to_encode_ = kMinFramesForBitrateTests; |
| 734 } else { |
| 735 num_frames_to_encode_ = num_frames_in_stream_; |
| 736 } |
| 737 |
639 input_coded_size_ = input_coded_size; | 738 input_coded_size_ = input_coded_size; |
640 ASSERT_EQ(input_coded_size_, test_stream_.size); | |
641 #if defined(ARCH_CPU_ARMEL) | |
642 // ARM performs CPU cache management with CPU cache line granularity. We thus | |
643 // need to ensure our buffers are CPU cache line-aligned (64 byte-aligned). | |
644 // Otherwise newer kernels will refuse to accept them, and on older kernels | |
645 // we'll be treating ourselves to random corruption. | |
646 // Since we are just mmapping and passing chunks of the input file, to ensure | |
647 // alignment, if the starting virtual addresses of the frames in it were not | |
648 // 64 byte-aligned, we'd have to use a separate set of input buffers and copy | |
649 // the frames into them before sending to the encoder. It would have been an | |
650 // overkill here though, because, for now at least, we only test resolutions | |
651 // that result in proper alignment, and it would have also interfered with | |
652 // performance testing. So just assert that the frame size is a multiple of | |
653 // 64 bytes. This ensures all frames start at 64-byte boundary, because | |
654 // MemoryMappedFile should be mmapp()ed at virtual page start as well. | |
655 ASSERT_EQ(input_buffer_size_ & 63, 0u) | |
656 << "Frame size has to be a multiple of 64 bytes"; | |
657 ASSERT_EQ(reinterpret_cast<off_t>(test_stream_.input_file.data()) & 63, 0) | |
658 << "Mapped file should be mapped at a 64 byte boundary"; | |
659 #endif | |
660 | |
661 num_required_input_buffers_ = input_count; | 739 num_required_input_buffers_ = input_count; |
662 ASSERT_GT(num_required_input_buffers_, 0UL); | 740 ASSERT_GT(num_required_input_buffers_, 0UL); |
663 | 741 |
664 output_buffer_size_ = output_size; | 742 output_buffer_size_ = output_size; |
665 ASSERT_GT(output_buffer_size_, 0UL); | 743 ASSERT_GT(output_buffer_size_, 0UL); |
666 | 744 |
667 for (unsigned int i = 0; i < kNumOutputBuffers; ++i) { | 745 for (unsigned int i = 0; i < kNumOutputBuffers; ++i) { |
668 base::SharedMemory* shm = new base::SharedMemory(); | 746 base::SharedMemory* shm = new base::SharedMemory(); |
669 CHECK(shm->CreateAndMapAnonymous(output_buffer_size_)); | 747 CHECK(shm->CreateAndMapAnonymous(output_buffer_size_)); |
670 output_shms_.push_back(shm); | 748 output_shms_.push_back(shm); |
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
734 } | 812 } |
735 | 813 |
736 void VEAClient::InputNoLongerNeededCallback(int32 input_id) { | 814 void VEAClient::InputNoLongerNeededCallback(int32 input_id) { |
737 std::set<int32>::iterator it = inputs_at_client_.find(input_id); | 815 std::set<int32>::iterator it = inputs_at_client_.find(input_id); |
738 ASSERT_NE(it, inputs_at_client_.end()); | 816 ASSERT_NE(it, inputs_at_client_.end()); |
739 inputs_at_client_.erase(it); | 817 inputs_at_client_.erase(it); |
740 FeedEncoderWithInputs(); | 818 FeedEncoderWithInputs(); |
741 } | 819 } |
742 | 820 |
743 scoped_refptr<media::VideoFrame> VEAClient::PrepareInputFrame(off_t position) { | 821 scoped_refptr<media::VideoFrame> VEAClient::PrepareInputFrame(off_t position) { |
744 CHECK_LE(position + input_buffer_size_, test_stream_.input_file.length()); | 822 CHECK_LE(position + input_buffer_size_, test_stream_.input_file->length()); |
745 | 823 |
746 uint8* frame_data = | 824 uint8* frame_data_y = |
747 const_cast<uint8*>(test_stream_.input_file.data() + position); | 825 const_cast<uint8*>(test_stream_.input_file->data() + position); |
| 826 uint8* frame_data_u = |
| 827 frame_data_y + ALIGN_64_BYTES(media::VideoFrame::PlaneAllocationSize( |
| 828 kInputFormat, 0, input_coded_size_)); |
| 829 uint8* frame_data_v = |
| 830 frame_data_u + ALIGN_64_BYTES(media::VideoFrame::PlaneAllocationSize( |
| 831 kInputFormat, 1, input_coded_size_)); |
748 | 832 |
749 CHECK_GT(current_framerate_, 0U); | 833 CHECK_GT(current_framerate_, 0U); |
750 scoped_refptr<media::VideoFrame> frame = | 834 scoped_refptr<media::VideoFrame> frame = |
751 media::VideoFrame::WrapExternalYuvData( | 835 media::VideoFrame::WrapExternalYuvData( |
752 kInputFormat, | 836 kInputFormat, |
753 input_coded_size_, | 837 input_coded_size_, |
754 gfx::Rect(test_stream_.size), | 838 gfx::Rect(test_stream_.size), |
755 test_stream_.size, | 839 test_stream_.size, |
756 input_coded_size_.width(), | 840 input_coded_size_.width(), |
757 input_coded_size_.width() / 2, | 841 input_coded_size_.width() / 2, |
758 input_coded_size_.width() / 2, | 842 input_coded_size_.width() / 2, |
759 frame_data, | 843 frame_data_y, |
760 frame_data + input_coded_size_.GetArea(), | 844 frame_data_u, |
761 frame_data + (input_coded_size_.GetArea() * 5 / 4), | 845 frame_data_v, |
762 base::TimeDelta().FromMilliseconds( | 846 base::TimeDelta().FromMilliseconds( |
763 next_input_id_ * base::Time::kMillisecondsPerSecond / | 847 next_input_id_ * base::Time::kMillisecondsPerSecond / |
764 current_framerate_), | 848 current_framerate_), |
765 media::BindToCurrentLoop( | 849 media::BindToCurrentLoop( |
766 base::Bind(&VEAClient::InputNoLongerNeededCallback, | 850 base::Bind(&VEAClient::InputNoLongerNeededCallback, |
767 base::Unretained(this), | 851 base::Unretained(this), |
768 next_input_id_))); | 852 next_input_id_))); |
769 | 853 |
770 CHECK(inputs_at_client_.insert(next_input_id_).second); | 854 CHECK(inputs_at_client_.insert(next_input_id_).second); |
771 ++next_input_id_; | 855 ++next_input_id_; |
772 | 856 |
773 return frame; | 857 return frame; |
774 } | 858 } |
775 | 859 |
776 void VEAClient::FeedEncoderWithInputs() { | 860 void VEAClient::FeedEncoderWithInputs() { |
777 if (!has_encoder()) | 861 if (!has_encoder()) |
778 return; | 862 return; |
779 | 863 |
780 if (state_ != CS_ENCODING) | 864 if (state_ != CS_ENCODING) |
781 return; | 865 return; |
782 | 866 |
783 while (inputs_at_client_.size() < | 867 while (inputs_at_client_.size() < |
784 num_required_input_buffers_ + kNumExtraInputFrames) { | 868 num_required_input_buffers_ + kNumExtraInputFrames) { |
785 size_t bytes_left = test_stream_.input_file.length() - pos_in_input_stream_; | 869 size_t bytes_left = |
| 870 test_stream_.input_file->length() - pos_in_input_stream_; |
786 if (bytes_left < input_buffer_size_) { | 871 if (bytes_left < input_buffer_size_) { |
787 DCHECK_EQ(bytes_left, 0UL); | 872 DCHECK_EQ(bytes_left, 0UL); |
788 // Rewind if at the end of stream and we are still encoding. | 873 // Rewind if at the end of stream and we are still encoding. |
789 // This is to flush the encoder with additional frames from the beginning | 874 // This is to flush the encoder with additional frames from the beginning |
790 // of the stream, or if the stream is shorter that the number of frames | 875 // of the stream, or if the stream is shorter that the number of frames |
791 // we require for bitrate tests. | 876 // we require for bitrate tests. |
792 pos_in_input_stream_ = 0; | 877 pos_in_input_stream_ = 0; |
793 continue; | 878 continue; |
794 } | 879 } |
795 | 880 |
(...skipping 172 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
968 ASSERT_EQ(notes[i]->Wait(), state_transitions[state_no]); | 1053 ASSERT_EQ(notes[i]->Wait(), state_transitions[state_no]); |
969 | 1054 |
970 for (size_t i = 0; i < num_concurrent_encoders; ++i) { | 1055 for (size_t i = 0; i < num_concurrent_encoders; ++i) { |
971 encoder_thread.message_loop()->PostTask( | 1056 encoder_thread.message_loop()->PostTask( |
972 FROM_HERE, | 1057 FROM_HERE, |
973 base::Bind(&VEAClient::DestroyEncoder, base::Unretained(clients[i]))); | 1058 base::Bind(&VEAClient::DestroyEncoder, base::Unretained(clients[i]))); |
974 } | 1059 } |
975 | 1060 |
976 // This ensures all tasks have finished. | 1061 // This ensures all tasks have finished. |
977 encoder_thread.Stop(); | 1062 encoder_thread.Stop(); |
| 1063 |
| 1064 for (size_t i = 0; i < test_streams.size(); i++) { |
| 1065 test_streams[i]->input_file.reset(); |
| 1066 base::DeleteFile(test_streams[i]->temp_file, false); |
| 1067 } |
978 } | 1068 } |
979 | 1069 |
980 INSTANTIATE_TEST_CASE_P( | 1070 INSTANTIATE_TEST_CASE_P( |
981 SimpleEncode, | 1071 SimpleEncode, |
982 VideoEncodeAcceleratorTest, | 1072 VideoEncodeAcceleratorTest, |
983 ::testing::Values(MakeTuple(1, true, 0, false, false, false, false))); | 1073 ::testing::Values(MakeTuple(1, true, 0, false, false, false, false))); |
984 | 1074 |
985 INSTANTIATE_TEST_CASE_P( | 1075 INSTANTIATE_TEST_CASE_P( |
986 EncoderPerf, | 1076 EncoderPerf, |
987 VideoEncodeAcceleratorTest, | 1077 VideoEncodeAcceleratorTest, |
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1054 test_stream_data->assign(it->second.c_str()); | 1144 test_stream_data->assign(it->second.c_str()); |
1055 continue; | 1145 continue; |
1056 } | 1146 } |
1057 if (it->first == "v" || it->first == "vmodule") | 1147 if (it->first == "v" || it->first == "vmodule") |
1058 continue; | 1148 continue; |
1059 LOG(FATAL) << "Unexpected switch: " << it->first << ":" << it->second; | 1149 LOG(FATAL) << "Unexpected switch: " << it->first << ":" << it->second; |
1060 } | 1150 } |
1061 | 1151 |
1062 return RUN_ALL_TESTS(); | 1152 return RUN_ALL_TESTS(); |
1063 } | 1153 } |
OLD | NEW |