Index: webrtc/modules/video_coding/codecs/test/videoprocessor.h |
diff --git a/webrtc/modules/video_coding/codecs/test/videoprocessor.h b/webrtc/modules/video_coding/codecs/test/videoprocessor.h |
index 724036b68ae795a1c313b9e637424b12b6db9292..57e9f4ec93497be6f62b5975fda0e50f9120f186 100644 |
--- a/webrtc/modules/video_coding/codecs/test/videoprocessor.h |
+++ b/webrtc/modules/video_coding/codecs/test/videoprocessor.h |
@@ -25,6 +25,9 @@ |
#include "webrtc/modules/video_coding/utility/vp9_uncompressed_header_parser.h" |
#include "webrtc/rtc_base/buffer.h" |
#include "webrtc/rtc_base/checks.h" |
+#include "webrtc/rtc_base/constructormagic.h" |
+#include "webrtc/rtc_base/sequenced_task_checker.h" |
+#include "webrtc/rtc_base/task_queue.h" |
#include "webrtc/test/testsupport/frame_reader.h" |
#include "webrtc/test/testsupport/frame_writer.h" |
@@ -110,12 +113,6 @@ struct TestConfig { |
// If HW or SW codec should be used. |
bool hw_codec = false; |
- |
- // In batch mode, the VideoProcessor is fed all the frames for processing |
- // before any metrics are calculated. This is useful for pipelining HW codecs, |
- // for which some calculated metrics otherwise would be incorrect. The |
- // downside with batch mode is that mid-test rate allocation is not supported. |
- bool batch_mode = false; |
}; |
// Handles encoding/decoding of video using the VideoEncoder/VideoDecoder |
@@ -155,100 +152,126 @@ class VideoProcessor { |
// Tears down callbacks and releases the encoder and decoder. |
void Release(); |
- // Processes a single frame. Returns true as long as there's more frames |
- // available in the source clip. |
- // |frame_number| must be an integer >= 0. |
- bool ProcessFrame(int frame_number); |
- |
- // Updates the encoder with the target |bit_rate| and the |frame_rate|. |
- void SetRates(int bit_rate, int frame_rate); |
+ // Processes a single frame. The frames must be processed in order, and the |
+ // VideoProcessor must be initialized first. |
+ void ProcessFrame(int frame_number); |
- // Return the size of the encoded frame in bytes. Dropped frames by the |
- // encoder are regarded as zero size. |
- size_t EncodedFrameSize(int frame_number); |
+ // Updates the encoder with target rates. Must be called at least once. |
+ void SetRates(int bitrate_kbps, int framerate_fps); |
- // Return the encoded frame type (key or delta). |
- FrameType EncodedFrameType(int frame_number); |
+ // Returns the number of frames that have been decoded. |
+ int NumFramesDecoded() const; |
- // Return the qp used by encoder. |
- int GetQpFromEncoder(int frame_number); |
+ // TODO(brandtr): Get rid of these functions by moving the corresponding QP |
+ // fields to the Stats object. |
+ int GetQpFromEncoder(int frame_number) const; |
+ int GetQpFromBitstream(int frame_number) const; |
- // Return the qp from the qp parser. |
- int GetQpFromBitstream(int frame_number); |
+ // Returns the number of dropped frames. |
+ std::vector<int> NumberDroppedFramesPerRateUpdate() const; |
- // Return the number of dropped frames. |
- int NumberDroppedFrames(); |
- |
- // Return the number of spatial resizes. |
- int NumberSpatialResizes(); |
+ // Returns the number of spatial resizes. |
+ std::vector<int> NumberSpatialResizesPerRateUpdate() const; |
private: |
// Container that holds per-frame information that needs to be stored between |
// calls to Encode and Decode, as well as the corresponding callbacks. It is |
// not directly used for statistics -- for that, test::FrameStatistic is used. |
+ // TODO(brandtr): Get rid of this struct and use the Stats class instead. |
struct FrameInfo { |
- FrameInfo() |
- : timestamp(0), |
- encode_start_ns(0), |
- decode_start_ns(0), |
- encoded_frame_size(0), |
- encoded_frame_type(kVideoFrameDelta), |
- decoded_width(0), |
- decoded_height(0), |
- manipulated_length(0), |
- qp_encoder(0), |
- qp_bitstream(0) {} |
- |
- uint32_t timestamp; |
- int64_t encode_start_ns; |
- int64_t decode_start_ns; |
- size_t encoded_frame_size; |
- FrameType encoded_frame_type; |
- int decoded_width; |
- int decoded_height; |
- size_t manipulated_length; |
- int qp_encoder; |
- int qp_bitstream; |
+ int64_t encode_start_ns = 0; |
+ int64_t decode_start_ns = 0; |
+ int qp_encoder = 0; |
+ int qp_bitstream = 0; |
+ int decoded_width = 0; |
+ int decoded_height = 0; |
+ size_t manipulated_length = 0; |
}; |
- // Callback class required to implement according to the VideoEncoder API. |
class VideoProcessorEncodeCompleteCallback |
: public webrtc::EncodedImageCallback { |
public: |
explicit VideoProcessorEncodeCompleteCallback( |
VideoProcessor* video_processor) |
- : video_processor_(video_processor) {} |
+ : video_processor_(video_processor), |
+ task_queue_(rtc::TaskQueue::Current()) {} |
+ |
Result OnEncodedImage( |
const webrtc::EncodedImage& encoded_image, |
const webrtc::CodecSpecificInfo* codec_specific_info, |
const webrtc::RTPFragmentationHeader* fragmentation) override { |
- // Forward to parent class. |
RTC_CHECK(codec_specific_info); |
+ |
+ if (task_queue_ && !task_queue_->IsCurrent()) { |
+ task_queue_->PostTask(std::unique_ptr<rtc::QueuedTask>( |
+ new EncodeCallbackTask(video_processor_, encoded_image, |
+ codec_specific_info, fragmentation))); |
+ return Result(Result::OK, 0); |
+ } |
+ |
video_processor_->FrameEncoded(codec_specific_info->codecType, |
encoded_image, fragmentation); |
return Result(Result::OK, 0); |
} |
private: |
+ class EncodeCallbackTask : public rtc::QueuedTask { |
+ public: |
+ EncodeCallbackTask(VideoProcessor* video_processor, |
+ const webrtc::EncodedImage& encoded_image, |
+ const webrtc::CodecSpecificInfo* codec_specific_info, |
+ const webrtc::RTPFragmentationHeader* fragmentation) |
+ : video_processor_(video_processor), |
+ buffer_(encoded_image._buffer, encoded_image._length), |
+ encoded_image_(encoded_image), |
+ codec_specific_info_(*codec_specific_info) { |
+ encoded_image_._buffer = buffer_.data(); |
+ RTC_CHECK(fragmentation); |
+ fragmentation_.CopyFrom(*fragmentation); |
+ } |
+ |
+ bool Run() override { |
+ video_processor_->FrameEncoded(codec_specific_info_.codecType, |
+ encoded_image_, &fragmentation_); |
+ return true; |
+ } |
+ |
+ private: |
+ VideoProcessor* const video_processor_; |
+ rtc::Buffer buffer_; |
+ webrtc::EncodedImage encoded_image_; |
+ const webrtc::CodecSpecificInfo codec_specific_info_; |
+ webrtc::RTPFragmentationHeader fragmentation_; |
+ }; |
+ |
VideoProcessor* const video_processor_; |
+ rtc::TaskQueue* const task_queue_; |
}; |
- // Callback class required to implement according to the VideoDecoder API. |
class VideoProcessorDecodeCompleteCallback |
: public webrtc::DecodedImageCallback { |
public: |
explicit VideoProcessorDecodeCompleteCallback( |
VideoProcessor* video_processor) |
- : video_processor_(video_processor) {} |
+ : video_processor_(video_processor), |
+ task_queue_(rtc::TaskQueue::Current()) {} |
+ |
int32_t Decoded(webrtc::VideoFrame& image) override { |
- // Forward to parent class. |
+ if (task_queue_ && !task_queue_->IsCurrent()) { |
+ task_queue_->PostTask( |
+ [this, image]() { video_processor_->FrameDecoded(image); }); |
+ return 0; |
+ } |
+ |
video_processor_->FrameDecoded(image); |
return 0; |
} |
+ |
int32_t Decoded(webrtc::VideoFrame& image, |
int64_t decode_time_ms) override { |
return Decoded(image); |
} |
+ |
void Decoded(webrtc::VideoFrame& image, |
rtc::Optional<int32_t> decode_time_ms, |
rtc::Optional<uint8_t> qp) override { |
@@ -257,31 +280,35 @@ class VideoProcessor { |
private: |
VideoProcessor* const video_processor_; |
+ rtc::TaskQueue* const task_queue_; |
}; |
- // Invoked by the callback when a frame has completed encoding. |
+ // Invoked by the callback adapter when a frame has completed encoding. |
void FrameEncoded(webrtc::VideoCodecType codec, |
const webrtc::EncodedImage& encodedImage, |
const webrtc::RTPFragmentationHeader* fragmentation); |
- // Invoked by the callback when a frame has completed decoding. |
+ // Invoked by the callback adapter when a frame has completed decoding. |
void FrameDecoded(const webrtc::VideoFrame& image); |
// Use the frame number as the basis for timestamp to identify frames. Let the |
// first timestamp be non-zero, to not make the IvfFileWriter believe that we |
// want to use capture timestamps in the IVF files. |
- uint32_t FrameNumberToTimestamp(int frame_number); |
- int TimestampToFrameNumber(uint32_t timestamp); |
+ uint32_t FrameNumberToTimestamp(int frame_number) const; |
+ int TimestampToFrameNumber(uint32_t timestamp) const; |
+ |
+ bool initialized_ GUARDED_BY(sequence_checker_); |
- TestConfig config_; |
+ TestConfig config_ GUARDED_BY(sequence_checker_); |
webrtc::VideoEncoder* const encoder_; |
webrtc::VideoDecoder* const decoder_; |
const std::unique_ptr<VideoBitrateAllocator> bitrate_allocator_; |
// Adapters for the codec callbacks. |
- const std::unique_ptr<EncodedImageCallback> encode_callback_; |
- const std::unique_ptr<DecodedImageCallback> decode_callback_; |
+ VideoProcessorEncodeCompleteCallback encode_callback_; |
+ VideoProcessorDecodeCompleteCallback decode_callback_; |
+ int num_frames_decoded_ GUARDED_BY(sequence_checker_); |
// Fake network. |
PacketManipulator* const packet_manipulator_; |
@@ -298,26 +325,29 @@ class VideoProcessor { |
IvfFileWriter* const encoded_frame_writer_; |
FrameWriter* const decoded_frame_writer_; |
- bool initialized_; |
- |
// Frame metadata for all frames that have been added through a call to |
// ProcessFrames(). We need to store this metadata over the course of the |
// test run, to support pipelining HW codecs. |
- std::vector<FrameInfo> frame_infos_; |
- int last_encoded_frame_num_; |
- int last_decoded_frame_num_; |
+ std::vector<FrameInfo> frame_infos_ GUARDED_BY(sequence_checker_); |
+ int last_encoded_frame_num_ GUARDED_BY(sequence_checker_); |
+ int last_decoded_frame_num_ GUARDED_BY(sequence_checker_); |
// Keep track of if we have excluded the first key frame from packet loss. |
- bool first_key_frame_has_been_excluded_; |
+ bool first_key_frame_has_been_excluded_ GUARDED_BY(sequence_checker_); |
// Keep track of the last successfully decoded frame, since we write that |
// frame to disk when decoding fails. |
- rtc::Buffer last_decoded_frame_buffer_; |
+ rtc::Buffer last_decoded_frame_buffer_ GUARDED_BY(sequence_checker_); |
// Statistics. |
Stats* stats_; |
- int num_dropped_frames_; |
- int num_spatial_resizes_; |
+ std::vector<int> num_dropped_frames_ GUARDED_BY(sequence_checker_); |
+ std::vector<int> num_spatial_resizes_ GUARDED_BY(sequence_checker_); |
+ int rate_update_index_ GUARDED_BY(sequence_checker_); |
+ |
+ rtc::SequencedTaskChecker sequence_checker_; |
+ |
+ RTC_DISALLOW_COPY_AND_ASSIGN(VideoProcessor); |
}; |
} // namespace test |