Chromium Code Reviews| Index: media/cast/audio_receiver/audio_receiver.cc |
| diff --git a/media/cast/audio_receiver/audio_receiver.cc b/media/cast/audio_receiver/audio_receiver.cc |
| index 212419c81d616180cd868debab7c9f1dc0f13bab..7bd26e417b366998513fbe9b8062098ae1a85a9d 100644 |
| --- a/media/cast/audio_receiver/audio_receiver.cc |
| +++ b/media/cast/audio_receiver/audio_receiver.cc |
| @@ -29,8 +29,9 @@ AudioReceiver::AudioReceiver(scoped_refptr<CastEnvironment> cast_environment, |
| event_subscriber_(kReceiverRtcpEventHistorySize, AUDIO_EVENT), |
| codec_(audio_config.codec), |
| frequency_(audio_config.frequency), |
| - target_delay_delta_( |
| + target_playout_delay_( |
| base::TimeDelta::FromMilliseconds(audio_config.rtp_max_delay_ms)), |
| + reports_are_scheduled_(false), |
| framer_(cast_environment->Clock(), |
| this, |
| audio_config.incoming_ssrc, |
| @@ -47,12 +48,12 @@ AudioReceiver::AudioReceiver(scoped_refptr<CastEnvironment> cast_environment, |
| audio_config.incoming_ssrc, |
| audio_config.rtcp_c_name, |
| true), |
| - is_waiting_for_consecutive_frame_(false), |
| + is_waiting_to_emit_frames_(false), |
| weak_factory_(this) { |
| if (!audio_config.use_external_decoder) |
| audio_decoder_.reset(new AudioDecoder(cast_environment, audio_config)); |
| decryptor_.Initialize(audio_config.aes_key, audio_config.aes_iv_mask); |
| - rtcp_.SetTargetDelay(target_delay_delta_); |
| + rtcp_.SetTargetDelay(target_playout_delay_); |
| cast_environment_->Logging()->AddRawEventSubscriber(&event_subscriber_); |
| memset(frame_id_to_rtp_timestamp_, 0, sizeof(frame_id_to_rtp_timestamp_)); |
| } |
| @@ -62,24 +63,12 @@ AudioReceiver::~AudioReceiver() { |
| cast_environment_->Logging()->RemoveRawEventSubscriber(&event_subscriber_); |
| } |
| -void AudioReceiver::InitializeTimers() { |
| - DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); |
| - ScheduleNextRtcpReport(); |
| - ScheduleNextCastMessage(); |
| -} |
| - |
| void AudioReceiver::OnReceivedPayloadData(const uint8* payload_data, |
| size_t payload_size, |
| const RtpCastHeader& rtp_header) { |
| DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); |
| - base::TimeTicks now = cast_environment_->Clock()->NowTicks(); |
| - // TODO(pwestin): update this as video to refresh over time. |
| - if (time_first_incoming_packet_.is_null()) { |
| - InitializeTimers(); |
| - first_incoming_rtp_timestamp_ = rtp_header.rtp_timestamp; |
| - time_first_incoming_packet_ = now; |
| - } |
| + const base::TimeTicks now = cast_environment_->Clock()->NowTicks(); |
| frame_id_to_rtp_timestamp_[rtp_header.frame_id & 0xff] = |
| rtp_header.rtp_timestamp; |
| @@ -175,29 +164,27 @@ void AudioReceiver::EmitAvailableEncodedFrames() { |
| VLOG(1) << "Wait for more audio packets to produce a completed frame."; |
| return; // OnReceivedPayloadData() will invoke this method in the future. |
| } |
| + if (!is_consecutively_next_frame && |
|
hubbe
2014/05/14 23:12:23
Hmm, I think the framer should not return non-deco
miu
2014/05/16 22:45:47
Done.
|
| + !transport::CanDropFramesForCodec(codec_)) { |
| + VLOG(1) << "Wait for the next frame in sequence (codec requirement)."; |
| + return; // OnReceivedPayloadData() will invoke this method in the future. |
| + } |
| + |
| + const base::TimeTicks playout_time = |
| + GetPlayoutTime(encoded_frame->rtp_timestamp); |
| // If |framer_| has a frame ready that is out of sequence, examine the |
| // playout time to determine whether it's acceptable to continue, thereby |
| // skipping one or more frames. Skip if the missing frame wouldn't complete |
| // playing before the start of playback of the available frame. |
| - const base::TimeTicks now = cast_environment_->Clock()->NowTicks(); |
| - const base::TimeTicks playout_time = |
| - GetPlayoutTime(now, encoded_frame->rtp_timestamp); |
| if (!is_consecutively_next_frame) { |
| + const base::TimeTicks now = cast_environment_->Clock()->NowTicks(); |
| // TODO(miu): Also account for expected decode time here? |
| const base::TimeTicks earliest_possible_end_time_of_missing_frame = |
| now + base::TimeDelta::FromMilliseconds(kTypicalAudioFrameDurationMs); |
| if (earliest_possible_end_time_of_missing_frame < playout_time) { |
| VLOG(1) << "Wait for next consecutive frame instead of skipping."; |
| - if (!is_waiting_for_consecutive_frame_) { |
|
miu
2014/05/13 23:19:03
Explanation for "unnecessary factoring" here: I ha
|
| - is_waiting_for_consecutive_frame_ = true; |
| - cast_environment_->PostDelayedTask( |
| - CastEnvironment::MAIN, |
| - FROM_HERE, |
| - base::Bind(&AudioReceiver::EmitAvailableEncodedFramesAfterWaiting, |
| - weak_factory_.GetWeakPtr()), |
| - playout_time - now); |
| - } |
| + RetryEmitAfterWaiting(playout_time - now); |
| return; |
| } |
| } |
| @@ -228,13 +215,54 @@ void AudioReceiver::EmitAvailableEncodedFrames() { |
| } |
| } |
| +void AudioReceiver::RetryEmitAfterWaiting(base::TimeDelta wait_time) { |
| + DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); |
| + if (is_waiting_to_emit_frames_) |
| + return; |
| + is_waiting_to_emit_frames_ = true; |
| + cast_environment_->PostDelayedTask( |
| + CastEnvironment::MAIN, |
| + FROM_HERE, |
| + base::Bind(&AudioReceiver::EmitAvailableEncodedFramesAfterWaiting, |
| + weak_factory_.GetWeakPtr()), |
| + wait_time); |
| +} |
| + |
| void AudioReceiver::EmitAvailableEncodedFramesAfterWaiting() { |
| DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); |
| - DCHECK(is_waiting_for_consecutive_frame_); |
| - is_waiting_for_consecutive_frame_ = false; |
| + DCHECK(is_waiting_to_emit_frames_); |
| + is_waiting_to_emit_frames_ = false; |
| EmitAvailableEncodedFrames(); |
| } |
| +base::TimeTicks AudioReceiver::GetPlayoutTime(uint32 rtp_timestamp) const { |
| + base::TimeTicks capture_time = rtcp_.ToApproximateCaptureTime(rtp_timestamp, |
| + frequency_); |
| + |
| + // HACK: The sender should have provided Sender Reports which allow this |
| + // receiver to map RTP timestamps back to the time the frame was captured on |
| + // the sender. It should have done this before sending the first frame, but |
| + // the spec does not currently require this. Therefore, if the data is |
| + // missing, this receiver is forced to take a guess. |
| + // |
| + // The guess is based on a number of assumptions which in many environments |
| + // will be completely wrong: |
| + // 1. The difference between the sender clock and receiver clock (relative |
| + // to NTP epoch) is very close to zero. |
| + // 2. The amount of time the sender took to encode/process the frame before |
| + // transport is approximately 1/2 the amount of time between frames. |
| + // 3. Perfect network conditions (i.e., negligible latency, no packet loss, |
| + // frames are arriving in-order, etc.). |
| + if (capture_time.is_null()) { |
| + VLOG(1) << ("Guessing playout time because sender has not yet sent lip " |
| + "sync info. Expect jank in the near future!"); |
| + capture_time = cast_environment_->Clock()->NowTicks() - |
| + (base::TimeDelta::FromMilliseconds(kTypicalAudioFrameDurationMs) / 2); |
| + } |
| + |
| + return capture_time + target_playout_delay_; |
| +} |
| + |
| void AudioReceiver::IncomingPacket(scoped_ptr<Packet> packet) { |
| DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); |
| if (Rtcp::IsRtcpPacket(&packet->front(), packet->size())) { |
| @@ -242,12 +270,11 @@ void AudioReceiver::IncomingPacket(scoped_ptr<Packet> packet) { |
| } else { |
| ReceivedPacket(&packet->front(), packet->size()); |
| } |
| -} |
| - |
| -void AudioReceiver::SetTargetDelay(base::TimeDelta target_delay) { |
| - DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); |
| - target_delay_delta_ = target_delay; |
| - rtcp_.SetTargetDelay(target_delay_delta_); |
| + if (!reports_are_scheduled_) { |
| + ScheduleNextRtcpReport(); |
| + ScheduleNextCastMessage(); |
| + reports_are_scheduled_ = true; |
| + } |
| } |
| void AudioReceiver::CastFeedback(const RtcpCastMessage& cast_message) { |
| @@ -264,64 +291,6 @@ void AudioReceiver::CastFeedback(const RtcpCastMessage& cast_message) { |
| rtcp_.SendRtcpFromRtpReceiver(&cast_message, &rtcp_events); |
| } |
| -base::TimeTicks AudioReceiver::GetPlayoutTime(base::TimeTicks now, |
| - uint32 rtp_timestamp) { |
| - DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); |
| - // Senders time in ms when this frame was recorded. |
| - // Note: the senders clock and our local clock might not be synced. |
| - base::TimeTicks rtp_timestamp_in_ticks; |
| - base::TimeTicks playout_time; |
| - if (time_offset_ == base::TimeDelta()) { |
| - if (rtcp_.RtpTimestampInSenderTime(frequency_, |
| - first_incoming_rtp_timestamp_, |
| - &rtp_timestamp_in_ticks)) { |
| - time_offset_ = time_first_incoming_packet_ - rtp_timestamp_in_ticks; |
| - // TODO(miu): As clocks drift w.r.t. each other, and other factors take |
| - // effect, |time_offset_| should be updated. Otherwise, we might as well |
| - // always compute the time offsets agnostic of RTCP's time data. |
| - } else { |
| - // We have not received any RTCP to sync the stream play it out as soon as |
| - // possible. |
| - |
| - // BUG: This means we're literally switching to a different timeline a |
| - // short time after a cast receiver has been running. Re-enable |
| - // End2EndTest.StartSenderBeforeReceiver once this is fixed. |
| - // http://crbug.com/356942 |
| - uint32 rtp_timestamp_diff = rtp_timestamp - first_incoming_rtp_timestamp_; |
| - |
| - int frequency_khz = frequency_ / 1000; |
| - base::TimeDelta rtp_time_diff_delta = |
| - base::TimeDelta::FromMilliseconds(rtp_timestamp_diff / frequency_khz); |
| - base::TimeDelta time_diff_delta = now - time_first_incoming_packet_; |
| - |
| - playout_time = now + std::max(rtp_time_diff_delta - time_diff_delta, |
| - base::TimeDelta()); |
| - } |
| - } |
| - if (playout_time.is_null()) { |
| - // This can fail if we have not received any RTCP packets in a long time. |
| - if (rtcp_.RtpTimestampInSenderTime(frequency_, rtp_timestamp, |
| - &rtp_timestamp_in_ticks)) { |
| - playout_time = |
| - rtp_timestamp_in_ticks + time_offset_ + target_delay_delta_; |
| - } else { |
| - playout_time = now; |
| - } |
| - } |
| - |
| - // TODO(miu): This is broken since we literally switch timelines once |rtcp_| |
| - // can provide us the |time_offset_|. Furthermore, this "getter" method may |
| - // be called on frames received out-of-order, which means the playout times |
| - // for earlier frames will be computed incorrectly. |
| -#if 0 |
| - // Don't allow the playout time to go backwards. |
| - if (last_playout_time_ > playout_time) playout_time = last_playout_time_; |
| - last_playout_time_ = playout_time; |
| -#endif |
| - |
| - return playout_time; |
| -} |
| - |
| void AudioReceiver::ScheduleNextRtcpReport() { |
| DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); |
| base::TimeDelta time_to_send = rtcp_.TimeToSendNextRtcpReport() - |