| OLD | NEW |
| 1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "media/cast/audio_receiver/audio_receiver.h" | 5 #include "media/cast/receiver/frame_receiver.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 | 8 |
| 9 #include "base/big_endian.h" |
| 9 #include "base/bind.h" | 10 #include "base/bind.h" |
| 10 #include "base/logging.h" | 11 #include "base/logging.h" |
| 11 #include "base/message_loop/message_loop.h" | 12 #include "base/message_loop/message_loop.h" |
| 12 #include "media/cast/audio_receiver/audio_decoder.h" | 13 #include "media/cast/cast_environment.h" |
| 13 #include "media/cast/transport/cast_transport_defines.h" | |
| 14 | 14 |
| 15 namespace { | 15 namespace { |
| 16 const int kMinSchedulingDelayMs = 1; | 16 const int kMinSchedulingDelayMs = 1; |
| 17 } // namespace | 17 } // namespace |
| 18 | 18 |
| 19 namespace media { | 19 namespace media { |
| 20 namespace cast { | 20 namespace cast { |
| 21 | 21 |
| 22 AudioReceiver::AudioReceiver(scoped_refptr<CastEnvironment> cast_environment, | 22 FrameReceiver::FrameReceiver( |
| 23 const FrameReceiverConfig& audio_config, | 23 const scoped_refptr<CastEnvironment>& cast_environment, |
| 24 transport::PacedPacketSender* const packet_sender) | 24 const FrameReceiverConfig& config, |
| 25 : RtpReceiver(cast_environment->Clock(), &audio_config, NULL), | 25 EventMediaType event_media_type, |
| 26 cast_environment_(cast_environment), | 26 transport::PacedPacketSender* const packet_sender) |
| 27 event_subscriber_(kReceiverRtcpEventHistorySize, AUDIO_EVENT), | 27 : cast_environment_(cast_environment), |
| 28 codec_(audio_config.codec.audio), | 28 packet_parser_(config.incoming_ssrc, config.rtp_payload_type), |
| 29 frequency_(audio_config.frequency), | 29 stats_(cast_environment->Clock()), |
| 30 event_media_type_(event_media_type), |
| 31 event_subscriber_(kReceiverRtcpEventHistorySize, event_media_type), |
| 32 rtp_timebase_(config.frequency), |
| 30 target_playout_delay_( | 33 target_playout_delay_( |
| 31 base::TimeDelta::FromMilliseconds(audio_config.rtp_max_delay_ms)), | 34 base::TimeDelta::FromMilliseconds(config.rtp_max_delay_ms)), |
| 32 expected_frame_duration_( | 35 expected_frame_duration_( |
| 33 base::TimeDelta::FromSeconds(1) / audio_config.max_frame_rate), | 36 base::TimeDelta::FromSeconds(1) / config.max_frame_rate), |
| 34 reports_are_scheduled_(false), | 37 reports_are_scheduled_(false), |
| 35 framer_(cast_environment->Clock(), | 38 framer_(cast_environment->Clock(), |
| 36 this, | 39 this, |
| 37 audio_config.incoming_ssrc, | 40 config.incoming_ssrc, |
| 38 true, | 41 true, |
| 39 audio_config.rtp_max_delay_ms * audio_config.max_frame_rate / | 42 config.rtp_max_delay_ms * config.max_frame_rate / 1000), |
| 40 1000), | 43 rtcp_(cast_environment_, |
| 41 rtcp_(cast_environment, | |
| 42 NULL, | 44 NULL, |
| 43 NULL, | 45 NULL, |
| 44 packet_sender, | 46 packet_sender, |
| 45 GetStatistics(), | 47 &stats_, |
| 46 audio_config.rtcp_mode, | 48 config.rtcp_mode, |
| 47 base::TimeDelta::FromMilliseconds(audio_config.rtcp_interval), | 49 base::TimeDelta::FromMilliseconds(config.rtcp_interval), |
| 48 audio_config.feedback_ssrc, | 50 config.feedback_ssrc, |
| 49 audio_config.incoming_ssrc, | 51 config.incoming_ssrc, |
| 50 audio_config.rtcp_c_name, | 52 config.rtcp_c_name, |
| 51 true), | 53 event_media_type), |
| 52 is_waiting_for_consecutive_frame_(false), | 54 is_waiting_for_consecutive_frame_(false), |
| 53 lip_sync_drift_(ClockDriftSmoother::GetDefaultTimeConstant()), | 55 lip_sync_drift_(ClockDriftSmoother::GetDefaultTimeConstant()), |
| 54 weak_factory_(this) { | 56 weak_factory_(this) { |
| 55 DCHECK_GT(audio_config.rtp_max_delay_ms, 0); | 57 DCHECK_GT(config.rtp_max_delay_ms, 0); |
| 56 DCHECK_GT(audio_config.max_frame_rate, 0); | 58 DCHECK_GT(config.max_frame_rate, 0); |
| 57 audio_decoder_.reset(new AudioDecoder(cast_environment, audio_config)); | 59 decryptor_.Initialize(config.aes_key, config.aes_iv_mask); |
| 58 decryptor_.Initialize(audio_config.aes_key, audio_config.aes_iv_mask); | |
| 59 rtcp_.SetTargetDelay(target_playout_delay_); | 60 rtcp_.SetTargetDelay(target_playout_delay_); |
| 60 cast_environment_->Logging()->AddRawEventSubscriber(&event_subscriber_); | 61 cast_environment_->Logging()->AddRawEventSubscriber(&event_subscriber_); |
| 61 memset(frame_id_to_rtp_timestamp_, 0, sizeof(frame_id_to_rtp_timestamp_)); | 62 memset(frame_id_to_rtp_timestamp_, 0, sizeof(frame_id_to_rtp_timestamp_)); |
| 62 } | 63 } |
| 63 | 64 |
| 64 AudioReceiver::~AudioReceiver() { | 65 FrameReceiver::~FrameReceiver() { |
| 65 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | 66 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); |
| 66 cast_environment_->Logging()->RemoveRawEventSubscriber(&event_subscriber_); | 67 cast_environment_->Logging()->RemoveRawEventSubscriber(&event_subscriber_); |
| 67 } | 68 } |
| 68 | 69 |
| 69 void AudioReceiver::OnReceivedPayloadData(const uint8* payload_data, | 70 void FrameReceiver::RequestEncodedFrame(const FrameEncodedCallback& callback) { |
| 70 size_t payload_size, | 71 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); |
| 71 const RtpCastHeader& rtp_header) { | 72 frame_request_queue_.push_back(callback); |
| 73 EmitAvailableEncodedFrames(); |
| 74 } |
| 75 |
| 76 bool FrameReceiver::ProcessPacket(scoped_ptr<Packet> packet) { |
| 77 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); |
| 78 |
| 79 if (Rtcp::IsRtcpPacket(&packet->front(), packet->size())) { |
| 80 rtcp_.IncomingRtcpPacket(&packet->front(), packet->size()); |
| 81 } else { |
| 82 RtpCastHeader rtp_header; |
| 83 const uint8* payload_data; |
| 84 size_t payload_size; |
| 85 if (!packet_parser_.ParsePacket(&packet->front(), |
| 86 packet->size(), |
| 87 &rtp_header, |
| 88 &payload_data, |
| 89 &payload_size)) { |
| 90 return false; |
| 91 } |
| 92 |
| 93 ProcessParsedPacket(rtp_header, payload_data, payload_size); |
| 94 stats_.UpdateStatistics(rtp_header); |
| 95 } |
| 96 |
| 97 if (!reports_are_scheduled_) { |
| 98 ScheduleNextRtcpReport(); |
| 99 ScheduleNextCastMessage(); |
| 100 reports_are_scheduled_ = true; |
| 101 } |
| 102 |
| 103 return true; |
| 104 } |
| 105 |
| 106 // static |
| 107 bool FrameReceiver::ParseSenderSsrc(const uint8* packet, |
| 108 size_t length, |
| 109 uint32* ssrc) { |
| 110 base::BigEndianReader big_endian_reader( |
| 111 reinterpret_cast<const char*>(packet), length); |
| 112 return big_endian_reader.Skip(8) && big_endian_reader.ReadU32(ssrc); |
| 113 } |
| 114 |
| 115 void FrameReceiver::ProcessParsedPacket(const RtpCastHeader& rtp_header, |
| 116 const uint8* payload_data, |
| 117 size_t payload_size) { |
| 72 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | 118 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); |
| 73 | 119 |
| 74 const base::TimeTicks now = cast_environment_->Clock()->NowTicks(); | 120 const base::TimeTicks now = cast_environment_->Clock()->NowTicks(); |
| 75 | 121 |
| 76 frame_id_to_rtp_timestamp_[rtp_header.frame_id & 0xff] = | 122 frame_id_to_rtp_timestamp_[rtp_header.frame_id & 0xff] = |
| 77 rtp_header.rtp_timestamp; | 123 rtp_header.rtp_timestamp; |
| 78 cast_environment_->Logging()->InsertPacketEvent( | 124 cast_environment_->Logging()->InsertPacketEvent( |
| 79 now, PACKET_RECEIVED, AUDIO_EVENT, rtp_header.rtp_timestamp, | 125 now, PACKET_RECEIVED, event_media_type_, rtp_header.rtp_timestamp, |
| 80 rtp_header.frame_id, rtp_header.packet_id, rtp_header.max_packet_id, | 126 rtp_header.frame_id, rtp_header.packet_id, rtp_header.max_packet_id, |
| 81 payload_size); | 127 payload_size); |
| 82 | 128 |
| 83 bool duplicate = false; | 129 bool duplicate = false; |
| 84 const bool complete = | 130 const bool complete = |
| 85 framer_.InsertPacket(payload_data, payload_size, rtp_header, &duplicate); | 131 framer_.InsertPacket(payload_data, payload_size, rtp_header, &duplicate); |
| 86 | 132 |
| 87 // Duplicate packets are ignored. | 133 // Duplicate packets are ignored. |
| 88 if (duplicate) | 134 if (duplicate) |
| 89 return; | 135 return; |
| (...skipping 14 matching lines...) Expand all Loading... |
| 104 } | 150 } |
| 105 // |lip_sync_reference_time_| is always incremented according to the time | 151 // |lip_sync_reference_time_| is always incremented according to the time |
| 106 // delta computed from the difference in RTP timestamps. Then, | 152 // delta computed from the difference in RTP timestamps. Then, |
| 107 // |lip_sync_drift_| accounts for clock drift and also smoothes-out any | 153 // |lip_sync_drift_| accounts for clock drift and also smoothes-out any |
| 108 // sudden/discontinuous shifts in the series of reference time values. | 154 // sudden/discontinuous shifts in the series of reference time values. |
| 109 if (lip_sync_reference_time_.is_null()) { | 155 if (lip_sync_reference_time_.is_null()) { |
| 110 lip_sync_reference_time_ = fresh_sync_reference; | 156 lip_sync_reference_time_ = fresh_sync_reference; |
| 111 } else { | 157 } else { |
| 112 lip_sync_reference_time_ += RtpDeltaToTimeDelta( | 158 lip_sync_reference_time_ += RtpDeltaToTimeDelta( |
| 113 static_cast<int32>(fresh_sync_rtp - lip_sync_rtp_timestamp_), | 159 static_cast<int32>(fresh_sync_rtp - lip_sync_rtp_timestamp_), |
| 114 frequency_); | 160 rtp_timebase_); |
| 115 } | 161 } |
| 116 lip_sync_rtp_timestamp_ = fresh_sync_rtp; | 162 lip_sync_rtp_timestamp_ = fresh_sync_rtp; |
| 117 lip_sync_drift_.Update( | 163 lip_sync_drift_.Update( |
| 118 now, fresh_sync_reference - lip_sync_reference_time_); | 164 now, fresh_sync_reference - lip_sync_reference_time_); |
| 119 } | 165 } |
| 120 | 166 |
| 121 // Frame not complete; wait for more packets. | 167 // Another frame is complete from a non-duplicate packet. Attempt to emit |
| 122 if (!complete) | 168 // more frames to satisfy enqueued requests. |
| 123 return; | 169 if (complete) |
| 124 | 170 EmitAvailableEncodedFrames(); |
| 125 EmitAvailableEncodedFrames(); | |
| 126 } | 171 } |
| 127 | 172 |
| 128 void AudioReceiver::GetRawAudioFrame( | 173 void FrameReceiver::CastFeedback(const RtcpCastMessage& cast_message) { |
| 129 const AudioFrameDecodedCallback& callback) { | |
| 130 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | 174 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); |
| 131 DCHECK(!callback.is_null()); | 175 |
| 132 DCHECK(audio_decoder_.get()); | 176 base::TimeTicks now = cast_environment_->Clock()->NowTicks(); |
| 133 GetEncodedAudioFrame(base::Bind( | 177 RtpTimestamp rtp_timestamp = |
| 134 &AudioReceiver::DecodeEncodedAudioFrame, | 178 frame_id_to_rtp_timestamp_[cast_message.ack_frame_id_ & 0xff]; |
| 135 // Note: Use of Unretained is safe since this Closure is guaranteed to be | 179 cast_environment_->Logging()->InsertFrameEvent( |
| 136 // invoked before destruction of |this|. | 180 now, FRAME_ACK_SENT, event_media_type_, |
| 137 base::Unretained(this), | 181 rtp_timestamp, cast_message.ack_frame_id_); |
| 138 callback)); | 182 |
| 183 ReceiverRtcpEventSubscriber::RtcpEventMultiMap rtcp_events; |
| 184 event_subscriber_.GetRtcpEventsAndReset(&rtcp_events); |
| 185 rtcp_.SendRtcpFromRtpReceiver(&cast_message, &rtcp_events); |
| 139 } | 186 } |
| 140 | 187 |
| 141 void AudioReceiver::DecodeEncodedAudioFrame( | 188 void FrameReceiver::EmitAvailableEncodedFrames() { |
| 142 const AudioFrameDecodedCallback& callback, | |
| 143 scoped_ptr<transport::EncodedFrame> encoded_frame) { | |
| 144 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | |
| 145 if (!encoded_frame) { | |
| 146 callback.Run(make_scoped_ptr<AudioBus>(NULL), base::TimeTicks(), false); | |
| 147 return; | |
| 148 } | |
| 149 const uint32 frame_id = encoded_frame->frame_id; | |
| 150 const uint32 rtp_timestamp = encoded_frame->rtp_timestamp; | |
| 151 const base::TimeTicks playout_time = encoded_frame->reference_time; | |
| 152 audio_decoder_->DecodeFrame(encoded_frame.Pass(), | |
| 153 base::Bind(&AudioReceiver::EmitRawAudioFrame, | |
| 154 cast_environment_, | |
| 155 callback, | |
| 156 frame_id, | |
| 157 rtp_timestamp, | |
| 158 playout_time)); | |
| 159 } | |
| 160 | |
| 161 // static | |
| 162 void AudioReceiver::EmitRawAudioFrame( | |
| 163 const scoped_refptr<CastEnvironment>& cast_environment, | |
| 164 const AudioFrameDecodedCallback& callback, | |
| 165 uint32 frame_id, | |
| 166 uint32 rtp_timestamp, | |
| 167 const base::TimeTicks& playout_time, | |
| 168 scoped_ptr<AudioBus> audio_bus, | |
| 169 bool is_continuous) { | |
| 170 DCHECK(cast_environment->CurrentlyOn(CastEnvironment::MAIN)); | |
| 171 if (audio_bus.get()) { | |
| 172 const base::TimeTicks now = cast_environment->Clock()->NowTicks(); | |
| 173 cast_environment->Logging()->InsertFrameEvent( | |
| 174 now, FRAME_DECODED, AUDIO_EVENT, rtp_timestamp, frame_id); | |
| 175 cast_environment->Logging()->InsertFrameEventWithDelay( | |
| 176 now, FRAME_PLAYOUT, AUDIO_EVENT, rtp_timestamp, frame_id, | |
| 177 playout_time - now); | |
| 178 } | |
| 179 callback.Run(audio_bus.Pass(), playout_time, is_continuous); | |
| 180 } | |
| 181 | |
| 182 void AudioReceiver::GetEncodedAudioFrame(const FrameEncodedCallback& callback) { | |
| 183 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | |
| 184 frame_request_queue_.push_back(callback); | |
| 185 EmitAvailableEncodedFrames(); | |
| 186 } | |
| 187 | |
| 188 void AudioReceiver::EmitAvailableEncodedFrames() { | |
| 189 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | 189 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); |
| 190 | 190 |
| 191 while (!frame_request_queue_.empty()) { | 191 while (!frame_request_queue_.empty()) { |
| 192 // Attempt to peek at the next completed frame from the |framer_|. | 192 // Attempt to peek at the next completed frame from the |framer_|. |
| 193 // TODO(miu): We should only be peeking at the metadata, and not copying the | 193 // TODO(miu): We should only be peeking at the metadata, and not copying the |
| 194 // payload yet! Or, at least, peek using a StringPiece instead of a copy. | 194 // payload yet! Or, at least, peek using a StringPiece instead of a copy. |
| 195 scoped_ptr<transport::EncodedFrame> encoded_frame( | 195 scoped_ptr<transport::EncodedFrame> encoded_frame( |
| 196 new transport::EncodedFrame()); | 196 new transport::EncodedFrame()); |
| 197 bool is_consecutively_next_frame = false; | 197 bool is_consecutively_next_frame = false; |
| 198 bool have_multiple_complete_frames = false; | 198 bool have_multiple_complete_frames = false; |
| 199 if (!framer_.GetEncodedFrame(encoded_frame.get(), | 199 if (!framer_.GetEncodedFrame(encoded_frame.get(), |
| 200 &is_consecutively_next_frame, | 200 &is_consecutively_next_frame, |
| 201 &have_multiple_complete_frames)) { | 201 &have_multiple_complete_frames)) { |
| 202 VLOG(1) << "Wait for more audio packets to produce a completed frame."; | 202 VLOG(1) << "Wait for more packets to produce a completed frame."; |
| 203 return; // OnReceivedPayloadData() will invoke this method in the future. | 203 return; // ProcessParsedPacket() will invoke this method in the future. |
| 204 } | 204 } |
| 205 | 205 |
| 206 const base::TimeTicks now = cast_environment_->Clock()->NowTicks(); | 206 const base::TimeTicks now = cast_environment_->Clock()->NowTicks(); |
| 207 const base::TimeTicks playout_time = | 207 const base::TimeTicks playout_time = |
| 208 GetPlayoutTime(encoded_frame->rtp_timestamp); | 208 GetPlayoutTime(encoded_frame->rtp_timestamp); |
| 209 | 209 |
| 210 // If we have multiple decodable frames, and the current frame is | 210 // If we have multiple decodable frames, and the current frame is |
| 211 // too old, then skip it and decode the next frame instead. | 211 // too old, then skip it and decode the next frame instead. |
| 212 if (have_multiple_complete_frames && now > playout_time) { | 212 if (have_multiple_complete_frames && now > playout_time) { |
| 213 framer_.ReleaseFrame(encoded_frame->frame_id); | 213 framer_.ReleaseFrame(encoded_frame->frame_id); |
| 214 continue; | 214 continue; |
| 215 } | 215 } |
| 216 | 216 |
| 217 // If |framer_| has a frame ready that is out of sequence, examine the | 217 // If |framer_| has a frame ready that is out of sequence, examine the |
| 218 // playout time to determine whether it's acceptable to continue, thereby | 218 // playout time to determine whether it's acceptable to continue, thereby |
| 219 // skipping one or more frames. Skip if the missing frame wouldn't complete | 219 // skipping one or more frames. Skip if the missing frame wouldn't complete |
| 220 // playing before the start of playback of the available frame. | 220 // playing before the start of playback of the available frame. |
| 221 if (!is_consecutively_next_frame) { | 221 if (!is_consecutively_next_frame) { |
| 222 // TODO(miu): Also account for expected decode time here? | 222 // TODO(miu): Also account for expected decode time here? |
| 223 const base::TimeTicks earliest_possible_end_time_of_missing_frame = | 223 const base::TimeTicks earliest_possible_end_time_of_missing_frame = |
| 224 now + expected_frame_duration_; | 224 now + expected_frame_duration_; |
| 225 if (earliest_possible_end_time_of_missing_frame < playout_time) { | 225 if (earliest_possible_end_time_of_missing_frame < playout_time) { |
| 226 VLOG(1) << "Wait for next consecutive frame instead of skipping."; | 226 VLOG(1) << "Wait for next consecutive frame instead of skipping."; |
| 227 if (!is_waiting_for_consecutive_frame_) { | 227 if (!is_waiting_for_consecutive_frame_) { |
| 228 is_waiting_for_consecutive_frame_ = true; | 228 is_waiting_for_consecutive_frame_ = true; |
| 229 cast_environment_->PostDelayedTask( | 229 cast_environment_->PostDelayedTask( |
| 230 CastEnvironment::MAIN, | 230 CastEnvironment::MAIN, |
| 231 FROM_HERE, | 231 FROM_HERE, |
| 232 base::Bind(&AudioReceiver::EmitAvailableEncodedFramesAfterWaiting, | 232 base::Bind(&FrameReceiver::EmitAvailableEncodedFramesAfterWaiting, |
| 233 weak_factory_.GetWeakPtr()), | 233 weak_factory_.GetWeakPtr()), |
| 234 playout_time - now); | 234 playout_time - now); |
| 235 } | 235 } |
| 236 return; | 236 return; |
| 237 } | 237 } |
| 238 } | 238 } |
| 239 | 239 |
| 240 // Decrypt the payload data in the frame, if crypto is being used. | 240 // Decrypt the payload data in the frame, if crypto is being used. |
| 241 if (decryptor_.initialized()) { | 241 if (decryptor_.initialized()) { |
| 242 std::string decrypted_audio_data; | 242 std::string decrypted_data; |
| 243 if (!decryptor_.Decrypt(encoded_frame->frame_id, | 243 if (!decryptor_.Decrypt(encoded_frame->frame_id, |
| 244 encoded_frame->data, | 244 encoded_frame->data, |
| 245 &decrypted_audio_data)) { | 245 &decrypted_data)) { |
| 246 // Decryption failed. Give up on this frame, releasing it from the | 246 // Decryption failed. Give up on this frame. |
| 247 // jitter buffer. | |
| 248 framer_.ReleaseFrame(encoded_frame->frame_id); | 247 framer_.ReleaseFrame(encoded_frame->frame_id); |
| 249 continue; | 248 continue; |
| 250 } | 249 } |
| 251 encoded_frame->data.swap(decrypted_audio_data); | 250 encoded_frame->data.swap(decrypted_data); |
| 252 } | 251 } |
| 253 | 252 |
| 254 // At this point, we have a decrypted EncodedFrame ready to be emitted. | 253 // At this point, we have a decrypted EncodedFrame ready to be emitted. |
| 255 encoded_frame->reference_time = playout_time; | 254 encoded_frame->reference_time = playout_time; |
| 256 framer_.ReleaseFrame(encoded_frame->frame_id); | 255 framer_.ReleaseFrame(encoded_frame->frame_id); |
| 257 cast_environment_->PostTask(CastEnvironment::MAIN, | 256 cast_environment_->PostTask(CastEnvironment::MAIN, |
| 258 FROM_HERE, | 257 FROM_HERE, |
| 259 base::Bind(frame_request_queue_.front(), | 258 base::Bind(frame_request_queue_.front(), |
| 260 base::Passed(&encoded_frame))); | 259 base::Passed(&encoded_frame))); |
| 261 frame_request_queue_.pop_front(); | 260 frame_request_queue_.pop_front(); |
| 262 } | 261 } |
| 263 } | 262 } |
| 264 | 263 |
| 265 void AudioReceiver::EmitAvailableEncodedFramesAfterWaiting() { | 264 void FrameReceiver::EmitAvailableEncodedFramesAfterWaiting() { |
| 266 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | 265 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); |
| 267 DCHECK(is_waiting_for_consecutive_frame_); | 266 DCHECK(is_waiting_for_consecutive_frame_); |
| 268 is_waiting_for_consecutive_frame_ = false; | 267 is_waiting_for_consecutive_frame_ = false; |
| 269 EmitAvailableEncodedFrames(); | 268 EmitAvailableEncodedFrames(); |
| 270 } | 269 } |
| 271 | 270 |
| 272 base::TimeTicks AudioReceiver::GetPlayoutTime(uint32 rtp_timestamp) const { | 271 base::TimeTicks FrameReceiver::GetPlayoutTime(uint32 rtp_timestamp) const { |
| 273 return lip_sync_reference_time_ + | 272 return lip_sync_reference_time_ + |
| 274 lip_sync_drift_.Current() + | 273 lip_sync_drift_.Current() + |
| 275 RtpDeltaToTimeDelta( | 274 RtpDeltaToTimeDelta( |
| 276 static_cast<int32>(rtp_timestamp - lip_sync_rtp_timestamp_), | 275 static_cast<int32>(rtp_timestamp - lip_sync_rtp_timestamp_), |
| 277 frequency_) + | 276 rtp_timebase_) + |
| 278 target_playout_delay_; | 277 target_playout_delay_; |
| 279 } | 278 } |
| 280 | 279 |
| 281 void AudioReceiver::IncomingPacket(scoped_ptr<Packet> packet) { | 280 void FrameReceiver::ScheduleNextCastMessage() { |
| 282 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | |
| 283 if (Rtcp::IsRtcpPacket(&packet->front(), packet->size())) { | |
| 284 rtcp_.IncomingRtcpPacket(&packet->front(), packet->size()); | |
| 285 } else { | |
| 286 ReceivedPacket(&packet->front(), packet->size()); | |
| 287 } | |
| 288 if (!reports_are_scheduled_) { | |
| 289 ScheduleNextRtcpReport(); | |
| 290 ScheduleNextCastMessage(); | |
| 291 reports_are_scheduled_ = true; | |
| 292 } | |
| 293 } | |
| 294 | |
| 295 void AudioReceiver::CastFeedback(const RtcpCastMessage& cast_message) { | |
| 296 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | |
| 297 base::TimeTicks now = cast_environment_->Clock()->NowTicks(); | |
| 298 RtpTimestamp rtp_timestamp = | |
| 299 frame_id_to_rtp_timestamp_[cast_message.ack_frame_id_ & 0xff]; | |
| 300 cast_environment_->Logging()->InsertFrameEvent( | |
| 301 now, FRAME_ACK_SENT, AUDIO_EVENT, rtp_timestamp, | |
| 302 cast_message.ack_frame_id_); | |
| 303 | |
| 304 ReceiverRtcpEventSubscriber::RtcpEventMultiMap rtcp_events; | |
| 305 event_subscriber_.GetRtcpEventsAndReset(&rtcp_events); | |
| 306 rtcp_.SendRtcpFromRtpReceiver(&cast_message, &rtcp_events); | |
| 307 } | |
| 308 | |
| 309 void AudioReceiver::ScheduleNextRtcpReport() { | |
| 310 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | |
| 311 base::TimeDelta time_to_send = rtcp_.TimeToSendNextRtcpReport() - | |
| 312 cast_environment_->Clock()->NowTicks(); | |
| 313 | |
| 314 time_to_send = std::max( | |
| 315 time_to_send, base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs)); | |
| 316 | |
| 317 cast_environment_->PostDelayedTask( | |
| 318 CastEnvironment::MAIN, | |
| 319 FROM_HERE, | |
| 320 base::Bind(&AudioReceiver::SendNextRtcpReport, | |
| 321 weak_factory_.GetWeakPtr()), | |
| 322 time_to_send); | |
| 323 } | |
| 324 | |
| 325 void AudioReceiver::SendNextRtcpReport() { | |
| 326 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | |
| 327 // TODO(pwestin): add logging. | |
| 328 rtcp_.SendRtcpFromRtpReceiver(NULL, NULL); | |
| 329 ScheduleNextRtcpReport(); | |
| 330 } | |
| 331 | |
| 332 // Cast messages should be sent within a maximum interval. Schedule a call | |
| 333 // if not triggered elsewhere, e.g. by the cast message_builder. | |
| 334 void AudioReceiver::ScheduleNextCastMessage() { | |
| 335 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | 281 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); |
| 336 base::TimeTicks send_time; | 282 base::TimeTicks send_time; |
| 337 framer_.TimeToSendNextCastMessage(&send_time); | 283 framer_.TimeToSendNextCastMessage(&send_time); |
| 338 base::TimeDelta time_to_send = | 284 base::TimeDelta time_to_send = |
| 339 send_time - cast_environment_->Clock()->NowTicks(); | 285 send_time - cast_environment_->Clock()->NowTicks(); |
| 340 time_to_send = std::max( | 286 time_to_send = std::max( |
| 341 time_to_send, base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs)); | 287 time_to_send, base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs)); |
| 342 cast_environment_->PostDelayedTask( | 288 cast_environment_->PostDelayedTask( |
| 343 CastEnvironment::MAIN, | 289 CastEnvironment::MAIN, |
| 344 FROM_HERE, | 290 FROM_HERE, |
| 345 base::Bind(&AudioReceiver::SendNextCastMessage, | 291 base::Bind(&FrameReceiver::SendNextCastMessage, |
| 346 weak_factory_.GetWeakPtr()), | 292 weak_factory_.GetWeakPtr()), |
| 347 time_to_send); | 293 time_to_send); |
| 348 } | 294 } |
| 349 | 295 |
| 350 void AudioReceiver::SendNextCastMessage() { | 296 void FrameReceiver::SendNextCastMessage() { |
| 351 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | 297 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); |
| 352 framer_.SendCastMessage(); // Will only send a message if it is time. | 298 framer_.SendCastMessage(); // Will only send a message if it is time. |
| 353 ScheduleNextCastMessage(); | 299 ScheduleNextCastMessage(); |
| 354 } | 300 } |
| 355 | 301 |
| 302 void FrameReceiver::ScheduleNextRtcpReport() { |
| 303 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); |
| 304 base::TimeDelta time_to_next = rtcp_.TimeToSendNextRtcpReport() - |
| 305 cast_environment_->Clock()->NowTicks(); |
| 306 |
| 307 time_to_next = std::max( |
| 308 time_to_next, base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs)); |
| 309 |
| 310 cast_environment_->PostDelayedTask( |
| 311 CastEnvironment::MAIN, |
| 312 FROM_HERE, |
| 313 base::Bind(&FrameReceiver::SendNextRtcpReport, |
| 314 weak_factory_.GetWeakPtr()), |
| 315 time_to_next); |
| 316 } |
| 317 |
| 318 void FrameReceiver::SendNextRtcpReport() { |
| 319 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); |
| 320 rtcp_.SendRtcpFromRtpReceiver(NULL, NULL); |
| 321 ScheduleNextRtcpReport(); |
| 322 } |
| 323 |
| 356 } // namespace cast | 324 } // namespace cast |
| 357 } // namespace media | 325 } // namespace media |
| OLD | NEW |