Index: media/cast/sender/video_sender.cc |
diff --git a/media/cast/sender/video_sender.cc b/media/cast/sender/video_sender.cc |
index e7628327887a2566257e3be2ef14433acb200dad..69340bed36a2406a9bbe6092ef48d6874796e069 100644 |
--- a/media/cast/sender/video_sender.cc |
+++ b/media/cast/sender/video_sender.cc |
@@ -19,16 +19,21 @@ |
namespace media { |
namespace cast { |
+namespace { |
+ |
// The following two constants are used to adjust the target |
// playout delay (when allowed). They were calculated using |
// a combination of cast_benchmark runs and manual testing. |
- |
+// |
// This is how many round trips we think we need on the network. |
const int kRoundTripsNeeded = 4; |
-// This is an estimate of all the the constant time needed |
-// independent of network quality. |
+// This is an estimate of all the the constant time needed independent of |
+// network quality (e.g., additional time that accounts for encode and decode |
+// time). |
const int kConstantTimeMs = 75; |
+} // namespace |
+ |
// Note, we use a fixed bitrate value when external video encoder is used. |
// Some hardware encoder shows bad behavior if we set the bitrate too |
// frequently, e.g. quality drop, not abiding by target bitrate, etc. |
@@ -95,11 +100,6 @@ VideoSender::VideoSender( |
transport_config.ssrc = video_config.ssrc; |
transport_config.feedback_ssrc = video_config.incoming_feedback_ssrc; |
transport_config.rtp_payload_type = video_config.rtp_payload_type; |
- transport_config.stored_frames = |
- std::min(kMaxUnackedFrames, |
- 1 + static_cast<int>(max_playout_delay_ * |
- max_frame_rate_ / |
- base::TimeDelta::FromSeconds(1))); |
transport_config.aes_key = video_config.aes_key; |
transport_config.aes_iv_mask = video_config.aes_iv_mask; |
@@ -141,8 +141,24 @@ void VideoSender::InsertRawVideoFrame( |
"timestamp", capture_time.ToInternalValue(), |
"rtp_timestamp", rtp_timestamp); |
- if (ShouldDropNextFrame(capture_time)) { |
- VLOG(1) << "Dropping frame due to too many frames currently in-flight."; |
+ // Drop frames that are out-of-order since the duration calculations assume |
+ // frame timestamps are monotonically non-decreasing. |
+ if (!last_enqueued_frame_reference_time_.is_null() && |
+ capture_time < last_enqueued_frame_reference_time_) { |
hubbe
2014/09/18 17:56:05
We don't want frames where capture time is identic
miu
2014/09/18 21:38:43
Done.
|
+ VLOG(1) << "Dropping video frame: Reference time is out-of-order."; |
+ return; |
+ } |
+ |
+ // Two video frames are needed to compute the exact media duration added by |
+ // the next frame. If there are no frames in the encoder, compute a guess |
+ // based on the configured |max_frame_rate_|. Any error introduced by this |
+ // guess will be eliminated when |duration_in_encoder_| is updated in |
+ // OnEncodedVideoFrame(). |
+ const base::TimeDelta duration_added_by_next_frame = frames_in_encoder_ > 0 ? |
+ capture_time - last_enqueued_frame_reference_time_ : |
+ base::TimeDelta::FromMicroseconds(1000000.0 / max_frame_rate_ + 0.5); |
hubbe
2014/09/18 17:56:05
why + 0.5 ?
miu
2014/09/18 21:38:43
Rounding to nearest int. I made this more explici
hubbe
2014/09/18 22:22:59
Why not just use TimeDelta::FromSecondsD(1.0 / max
miu
2014/09/19 19:39:43
Because that would be the right way to do it. ;-)
|
+ |
+ if (ShouldDropNextFrame(duration_added_by_next_frame)) { |
base::TimeDelta new_target_delay = std::min( |
current_round_trip_time_ * kRoundTripsNeeded + |
base::TimeDelta::FromMilliseconds(kConstantTimeMs), |
@@ -168,6 +184,8 @@ void VideoSender::InsertRawVideoFrame( |
weak_factory_.GetWeakPtr(), |
bitrate))) { |
frames_in_encoder_++; |
+ duration_in_encoder_ += duration_added_by_next_frame; |
+ last_enqueued_frame_reference_time_ = capture_time; |
} else { |
VLOG(1) << "Encoder rejected a frame. Skipping..."; |
} |
@@ -177,6 +195,17 @@ int VideoSender::GetNumberOfFramesInEncoder() const { |
return frames_in_encoder_; |
} |
+base::TimeDelta VideoSender::GetInFlightMediaDuration() const { |
+ if (GetUnacknowledgedFrameCount() > 0) { |
+ const uint32 oldest_unacked_frame_id = latest_acked_frame_id_ + 1; |
+ return duration_in_encoder_ + |
hubbe
2014/09/18 17:56:05
Isn't this equivalent to:
GetRecordedReferenceTim
miu
2014/09/18 21:38:43
Ah, good point. But, I think you meant:
last_e
|
+ (GetRecordedReferenceTime(last_sent_frame_id_) - |
+ GetRecordedReferenceTime(oldest_unacked_frame_id)); |
+ } else { |
+ return duration_in_encoder_; |
+ } |
+} |
+ |
void VideoSender::OnAck(uint32 frame_id) { |
video_encoder_->LatestFrameIdToReference(frame_id); |
} |
@@ -196,6 +225,9 @@ void VideoSender::OnEncodedVideoFrame( |
frames_in_encoder_--; |
DCHECK_GE(frames_in_encoder_, 0); |
+ duration_in_encoder_ = |
+ last_enqueued_frame_reference_time_ - encoded_frame->reference_time; |
+ |
SendEncodedFrame(encoder_bitrate, encoded_frame.Pass()); |
} |