OLD | NEW |
| (Empty) |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "media/cast/audio_receiver/audio_receiver.h" | |
6 | |
7 #include <algorithm> | |
8 | |
9 #include "base/bind.h" | |
10 #include "base/logging.h" | |
11 #include "base/message_loop/message_loop.h" | |
12 #include "media/cast/audio_receiver/audio_decoder.h" | |
13 #include "media/cast/transport/cast_transport_defines.h" | |
14 | |
15 namespace { | |
16 const int kMinSchedulingDelayMs = 1; | |
17 } // namespace | |
18 | |
19 namespace media { | |
20 namespace cast { | |
21 | |
22 AudioReceiver::AudioReceiver(scoped_refptr<CastEnvironment> cast_environment, | |
23 const FrameReceiverConfig& audio_config, | |
24 transport::PacedPacketSender* const packet_sender) | |
25 : RtpReceiver(cast_environment->Clock(), &audio_config, NULL), | |
26 cast_environment_(cast_environment), | |
27 event_subscriber_(kReceiverRtcpEventHistorySize, AUDIO_EVENT), | |
28 codec_(audio_config.codec.audio), | |
29 frequency_(audio_config.frequency), | |
30 target_playout_delay_( | |
31 base::TimeDelta::FromMilliseconds(audio_config.rtp_max_delay_ms)), | |
32 expected_frame_duration_( | |
33 base::TimeDelta::FromSeconds(1) / audio_config.max_frame_rate), | |
34 reports_are_scheduled_(false), | |
35 framer_(cast_environment->Clock(), | |
36 this, | |
37 audio_config.incoming_ssrc, | |
38 true, | |
39 audio_config.rtp_max_delay_ms * audio_config.max_frame_rate / | |
40 1000), | |
41 rtcp_(cast_environment, | |
42 NULL, | |
43 NULL, | |
44 packet_sender, | |
45 GetStatistics(), | |
46 audio_config.rtcp_mode, | |
47 base::TimeDelta::FromMilliseconds(audio_config.rtcp_interval), | |
48 audio_config.feedback_ssrc, | |
49 audio_config.incoming_ssrc, | |
50 audio_config.rtcp_c_name, | |
51 true), | |
52 is_waiting_for_consecutive_frame_(false), | |
53 lip_sync_drift_(ClockDriftSmoother::GetDefaultTimeConstant()), | |
54 weak_factory_(this) { | |
55 DCHECK_GT(audio_config.rtp_max_delay_ms, 0); | |
56 DCHECK_GT(audio_config.max_frame_rate, 0); | |
57 audio_decoder_.reset(new AudioDecoder(cast_environment, audio_config)); | |
58 decryptor_.Initialize(audio_config.aes_key, audio_config.aes_iv_mask); | |
59 rtcp_.SetTargetDelay(target_playout_delay_); | |
60 cast_environment_->Logging()->AddRawEventSubscriber(&event_subscriber_); | |
61 memset(frame_id_to_rtp_timestamp_, 0, sizeof(frame_id_to_rtp_timestamp_)); | |
62 } | |
63 | |
64 AudioReceiver::~AudioReceiver() { | |
65 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | |
66 cast_environment_->Logging()->RemoveRawEventSubscriber(&event_subscriber_); | |
67 } | |
68 | |
69 void AudioReceiver::OnReceivedPayloadData(const uint8* payload_data, | |
70 size_t payload_size, | |
71 const RtpCastHeader& rtp_header) { | |
72 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | |
73 | |
74 const base::TimeTicks now = cast_environment_->Clock()->NowTicks(); | |
75 | |
76 frame_id_to_rtp_timestamp_[rtp_header.frame_id & 0xff] = | |
77 rtp_header.rtp_timestamp; | |
78 cast_environment_->Logging()->InsertPacketEvent( | |
79 now, PACKET_RECEIVED, AUDIO_EVENT, rtp_header.rtp_timestamp, | |
80 rtp_header.frame_id, rtp_header.packet_id, rtp_header.max_packet_id, | |
81 payload_size); | |
82 | |
83 bool duplicate = false; | |
84 const bool complete = | |
85 framer_.InsertPacket(payload_data, payload_size, rtp_header, &duplicate); | |
86 | |
87 // Duplicate packets are ignored. | |
88 if (duplicate) | |
89 return; | |
90 | |
91 // Update lip-sync values upon receiving the first packet of each frame, or if | |
92 // they have never been set yet. | |
93 if (rtp_header.packet_id == 0 || lip_sync_reference_time_.is_null()) { | |
94 RtpTimestamp fresh_sync_rtp; | |
95 base::TimeTicks fresh_sync_reference; | |
96 if (!rtcp_.GetLatestLipSyncTimes(&fresh_sync_rtp, &fresh_sync_reference)) { | |
97 // HACK: The sender should have provided Sender Reports before the first | |
98 // frame was sent. However, the spec does not currently require this. | |
99 // Therefore, when the data is missing, the local clock is used to | |
100 // generate reference timestamps. | |
101 VLOG(2) << "Lip sync info missing. Falling-back to local clock."; | |
102 fresh_sync_rtp = rtp_header.rtp_timestamp; | |
103 fresh_sync_reference = now; | |
104 } | |
105 // |lip_sync_reference_time_| is always incremented according to the time | |
106 // delta computed from the difference in RTP timestamps. Then, | |
107 // |lip_sync_drift_| accounts for clock drift and also smoothes-out any | |
108 // sudden/discontinuous shifts in the series of reference time values. | |
109 if (lip_sync_reference_time_.is_null()) { | |
110 lip_sync_reference_time_ = fresh_sync_reference; | |
111 } else { | |
112 lip_sync_reference_time_ += RtpDeltaToTimeDelta( | |
113 static_cast<int32>(fresh_sync_rtp - lip_sync_rtp_timestamp_), | |
114 frequency_); | |
115 } | |
116 lip_sync_rtp_timestamp_ = fresh_sync_rtp; | |
117 lip_sync_drift_.Update( | |
118 now, fresh_sync_reference - lip_sync_reference_time_); | |
119 } | |
120 | |
121 // Frame not complete; wait for more packets. | |
122 if (!complete) | |
123 return; | |
124 | |
125 EmitAvailableEncodedFrames(); | |
126 } | |
127 | |
128 void AudioReceiver::GetRawAudioFrame( | |
129 const AudioFrameDecodedCallback& callback) { | |
130 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | |
131 DCHECK(!callback.is_null()); | |
132 DCHECK(audio_decoder_.get()); | |
133 GetEncodedAudioFrame(base::Bind( | |
134 &AudioReceiver::DecodeEncodedAudioFrame, | |
135 // Note: Use of Unretained is safe since this Closure is guaranteed to be | |
136 // invoked before destruction of |this|. | |
137 base::Unretained(this), | |
138 callback)); | |
139 } | |
140 | |
141 void AudioReceiver::DecodeEncodedAudioFrame( | |
142 const AudioFrameDecodedCallback& callback, | |
143 scoped_ptr<transport::EncodedFrame> encoded_frame) { | |
144 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | |
145 if (!encoded_frame) { | |
146 callback.Run(make_scoped_ptr<AudioBus>(NULL), base::TimeTicks(), false); | |
147 return; | |
148 } | |
149 const uint32 frame_id = encoded_frame->frame_id; | |
150 const uint32 rtp_timestamp = encoded_frame->rtp_timestamp; | |
151 const base::TimeTicks playout_time = encoded_frame->reference_time; | |
152 audio_decoder_->DecodeFrame(encoded_frame.Pass(), | |
153 base::Bind(&AudioReceiver::EmitRawAudioFrame, | |
154 cast_environment_, | |
155 callback, | |
156 frame_id, | |
157 rtp_timestamp, | |
158 playout_time)); | |
159 } | |
160 | |
161 // static | |
162 void AudioReceiver::EmitRawAudioFrame( | |
163 const scoped_refptr<CastEnvironment>& cast_environment, | |
164 const AudioFrameDecodedCallback& callback, | |
165 uint32 frame_id, | |
166 uint32 rtp_timestamp, | |
167 const base::TimeTicks& playout_time, | |
168 scoped_ptr<AudioBus> audio_bus, | |
169 bool is_continuous) { | |
170 DCHECK(cast_environment->CurrentlyOn(CastEnvironment::MAIN)); | |
171 if (audio_bus.get()) { | |
172 const base::TimeTicks now = cast_environment->Clock()->NowTicks(); | |
173 cast_environment->Logging()->InsertFrameEvent( | |
174 now, FRAME_DECODED, AUDIO_EVENT, rtp_timestamp, frame_id); | |
175 cast_environment->Logging()->InsertFrameEventWithDelay( | |
176 now, FRAME_PLAYOUT, AUDIO_EVENT, rtp_timestamp, frame_id, | |
177 playout_time - now); | |
178 } | |
179 callback.Run(audio_bus.Pass(), playout_time, is_continuous); | |
180 } | |
181 | |
182 void AudioReceiver::GetEncodedAudioFrame(const FrameEncodedCallback& callback) { | |
183 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | |
184 frame_request_queue_.push_back(callback); | |
185 EmitAvailableEncodedFrames(); | |
186 } | |
187 | |
188 void AudioReceiver::EmitAvailableEncodedFrames() { | |
189 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | |
190 | |
191 while (!frame_request_queue_.empty()) { | |
192 // Attempt to peek at the next completed frame from the |framer_|. | |
193 // TODO(miu): We should only be peeking at the metadata, and not copying the | |
194 // payload yet! Or, at least, peek using a StringPiece instead of a copy. | |
195 scoped_ptr<transport::EncodedFrame> encoded_frame( | |
196 new transport::EncodedFrame()); | |
197 bool is_consecutively_next_frame = false; | |
198 bool have_multiple_complete_frames = false; | |
199 if (!framer_.GetEncodedFrame(encoded_frame.get(), | |
200 &is_consecutively_next_frame, | |
201 &have_multiple_complete_frames)) { | |
202 VLOG(1) << "Wait for more audio packets to produce a completed frame."; | |
203 return; // OnReceivedPayloadData() will invoke this method in the future. | |
204 } | |
205 | |
206 const base::TimeTicks now = cast_environment_->Clock()->NowTicks(); | |
207 const base::TimeTicks playout_time = | |
208 GetPlayoutTime(encoded_frame->rtp_timestamp); | |
209 | |
210 // If we have multiple decodable frames, and the current frame is | |
211 // too old, then skip it and decode the next frame instead. | |
212 if (have_multiple_complete_frames && now > playout_time) { | |
213 framer_.ReleaseFrame(encoded_frame->frame_id); | |
214 continue; | |
215 } | |
216 | |
217 // If |framer_| has a frame ready that is out of sequence, examine the | |
218 // playout time to determine whether it's acceptable to continue, thereby | |
219 // skipping one or more frames. Skip if the missing frame wouldn't complete | |
220 // playing before the start of playback of the available frame. | |
221 if (!is_consecutively_next_frame) { | |
222 // TODO(miu): Also account for expected decode time here? | |
223 const base::TimeTicks earliest_possible_end_time_of_missing_frame = | |
224 now + expected_frame_duration_; | |
225 if (earliest_possible_end_time_of_missing_frame < playout_time) { | |
226 VLOG(1) << "Wait for next consecutive frame instead of skipping."; | |
227 if (!is_waiting_for_consecutive_frame_) { | |
228 is_waiting_for_consecutive_frame_ = true; | |
229 cast_environment_->PostDelayedTask( | |
230 CastEnvironment::MAIN, | |
231 FROM_HERE, | |
232 base::Bind(&AudioReceiver::EmitAvailableEncodedFramesAfterWaiting, | |
233 weak_factory_.GetWeakPtr()), | |
234 playout_time - now); | |
235 } | |
236 return; | |
237 } | |
238 } | |
239 | |
240 // Decrypt the payload data in the frame, if crypto is being used. | |
241 if (decryptor_.initialized()) { | |
242 std::string decrypted_audio_data; | |
243 if (!decryptor_.Decrypt(encoded_frame->frame_id, | |
244 encoded_frame->data, | |
245 &decrypted_audio_data)) { | |
246 // Decryption failed. Give up on this frame, releasing it from the | |
247 // jitter buffer. | |
248 framer_.ReleaseFrame(encoded_frame->frame_id); | |
249 continue; | |
250 } | |
251 encoded_frame->data.swap(decrypted_audio_data); | |
252 } | |
253 | |
254 // At this point, we have a decrypted EncodedFrame ready to be emitted. | |
255 encoded_frame->reference_time = playout_time; | |
256 framer_.ReleaseFrame(encoded_frame->frame_id); | |
257 cast_environment_->PostTask(CastEnvironment::MAIN, | |
258 FROM_HERE, | |
259 base::Bind(frame_request_queue_.front(), | |
260 base::Passed(&encoded_frame))); | |
261 frame_request_queue_.pop_front(); | |
262 } | |
263 } | |
264 | |
265 void AudioReceiver::EmitAvailableEncodedFramesAfterWaiting() { | |
266 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | |
267 DCHECK(is_waiting_for_consecutive_frame_); | |
268 is_waiting_for_consecutive_frame_ = false; | |
269 EmitAvailableEncodedFrames(); | |
270 } | |
271 | |
272 base::TimeTicks AudioReceiver::GetPlayoutTime(uint32 rtp_timestamp) const { | |
273 return lip_sync_reference_time_ + | |
274 lip_sync_drift_.Current() + | |
275 RtpDeltaToTimeDelta( | |
276 static_cast<int32>(rtp_timestamp - lip_sync_rtp_timestamp_), | |
277 frequency_) + | |
278 target_playout_delay_; | |
279 } | |
280 | |
281 void AudioReceiver::IncomingPacket(scoped_ptr<Packet> packet) { | |
282 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | |
283 if (Rtcp::IsRtcpPacket(&packet->front(), packet->size())) { | |
284 rtcp_.IncomingRtcpPacket(&packet->front(), packet->size()); | |
285 } else { | |
286 ReceivedPacket(&packet->front(), packet->size()); | |
287 } | |
288 if (!reports_are_scheduled_) { | |
289 ScheduleNextRtcpReport(); | |
290 ScheduleNextCastMessage(); | |
291 reports_are_scheduled_ = true; | |
292 } | |
293 } | |
294 | |
295 void AudioReceiver::CastFeedback(const RtcpCastMessage& cast_message) { | |
296 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | |
297 base::TimeTicks now = cast_environment_->Clock()->NowTicks(); | |
298 RtpTimestamp rtp_timestamp = | |
299 frame_id_to_rtp_timestamp_[cast_message.ack_frame_id_ & 0xff]; | |
300 cast_environment_->Logging()->InsertFrameEvent( | |
301 now, FRAME_ACK_SENT, AUDIO_EVENT, rtp_timestamp, | |
302 cast_message.ack_frame_id_); | |
303 | |
304 ReceiverRtcpEventSubscriber::RtcpEventMultiMap rtcp_events; | |
305 event_subscriber_.GetRtcpEventsAndReset(&rtcp_events); | |
306 rtcp_.SendRtcpFromRtpReceiver(&cast_message, &rtcp_events); | |
307 } | |
308 | |
309 void AudioReceiver::ScheduleNextRtcpReport() { | |
310 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | |
311 base::TimeDelta time_to_send = rtcp_.TimeToSendNextRtcpReport() - | |
312 cast_environment_->Clock()->NowTicks(); | |
313 | |
314 time_to_send = std::max( | |
315 time_to_send, base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs)); | |
316 | |
317 cast_environment_->PostDelayedTask( | |
318 CastEnvironment::MAIN, | |
319 FROM_HERE, | |
320 base::Bind(&AudioReceiver::SendNextRtcpReport, | |
321 weak_factory_.GetWeakPtr()), | |
322 time_to_send); | |
323 } | |
324 | |
325 void AudioReceiver::SendNextRtcpReport() { | |
326 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | |
327 // TODO(pwestin): add logging. | |
328 rtcp_.SendRtcpFromRtpReceiver(NULL, NULL); | |
329 ScheduleNextRtcpReport(); | |
330 } | |
331 | |
332 // Cast messages should be sent within a maximum interval. Schedule a call | |
333 // if not triggered elsewhere, e.g. by the cast message_builder. | |
334 void AudioReceiver::ScheduleNextCastMessage() { | |
335 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | |
336 base::TimeTicks send_time; | |
337 framer_.TimeToSendNextCastMessage(&send_time); | |
338 base::TimeDelta time_to_send = | |
339 send_time - cast_environment_->Clock()->NowTicks(); | |
340 time_to_send = std::max( | |
341 time_to_send, base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs)); | |
342 cast_environment_->PostDelayedTask( | |
343 CastEnvironment::MAIN, | |
344 FROM_HERE, | |
345 base::Bind(&AudioReceiver::SendNextCastMessage, | |
346 weak_factory_.GetWeakPtr()), | |
347 time_to_send); | |
348 } | |
349 | |
350 void AudioReceiver::SendNextCastMessage() { | |
351 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | |
352 framer_.SendCastMessage(); // Will only send a message if it is time. | |
353 ScheduleNextCastMessage(); | |
354 } | |
355 | |
356 } // namespace cast | |
357 } // namespace media | |
OLD | NEW |