OLD | NEW |
| (Empty) |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "media/cast/video_receiver/video_receiver.h" | |
6 | |
7 #include <algorithm> | |
8 | |
9 #include "base/bind.h" | |
10 #include "base/debug/trace_event.h" | |
11 #include "base/logging.h" | |
12 #include "base/message_loop/message_loop.h" | |
13 #include "media/base/video_frame.h" | |
14 #include "media/cast/logging/logging_defines.h" | |
15 #include "media/cast/transport/cast_transport_defines.h" | |
16 #include "media/cast/video_receiver/video_decoder.h" | |
17 | |
18 namespace { | |
19 const int kMinSchedulingDelayMs = 1; | |
20 } // namespace | |
21 | |
22 namespace media { | |
23 namespace cast { | |
24 | |
25 VideoReceiver::VideoReceiver(scoped_refptr<CastEnvironment> cast_environment, | |
26 const FrameReceiverConfig& video_config, | |
27 transport::PacedPacketSender* const packet_sender) | |
28 : RtpReceiver(cast_environment->Clock(), NULL, &video_config), | |
29 cast_environment_(cast_environment), | |
30 event_subscriber_(kReceiverRtcpEventHistorySize, VIDEO_EVENT), | |
31 codec_(video_config.codec.video), | |
32 target_playout_delay_( | |
33 base::TimeDelta::FromMilliseconds(video_config.rtp_max_delay_ms)), | |
34 expected_frame_duration_( | |
35 base::TimeDelta::FromSeconds(1) / video_config.max_frame_rate), | |
36 reports_are_scheduled_(false), | |
37 framer_(cast_environment->Clock(), | |
38 this, | |
39 video_config.incoming_ssrc, | |
40 true, | |
41 video_config.rtp_max_delay_ms * video_config.max_frame_rate / | |
42 1000), | |
43 rtcp_(cast_environment_, | |
44 NULL, | |
45 NULL, | |
46 packet_sender, | |
47 GetStatistics(), | |
48 video_config.rtcp_mode, | |
49 base::TimeDelta::FromMilliseconds(video_config.rtcp_interval), | |
50 video_config.feedback_ssrc, | |
51 video_config.incoming_ssrc, | |
52 video_config.rtcp_c_name, | |
53 false), | |
54 is_waiting_for_consecutive_frame_(false), | |
55 lip_sync_drift_(ClockDriftSmoother::GetDefaultTimeConstant()), | |
56 weak_factory_(this) { | |
57 DCHECK_GT(video_config.rtp_max_delay_ms, 0); | |
58 DCHECK_GT(video_config.max_frame_rate, 0); | |
59 video_decoder_.reset(new VideoDecoder(cast_environment, video_config)); | |
60 decryptor_.Initialize(video_config.aes_key, video_config.aes_iv_mask); | |
61 rtcp_.SetTargetDelay(target_playout_delay_); | |
62 cast_environment_->Logging()->AddRawEventSubscriber(&event_subscriber_); | |
63 memset(frame_id_to_rtp_timestamp_, 0, sizeof(frame_id_to_rtp_timestamp_)); | |
64 } | |
65 | |
66 VideoReceiver::~VideoReceiver() { | |
67 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | |
68 cast_environment_->Logging()->RemoveRawEventSubscriber(&event_subscriber_); | |
69 } | |
70 | |
71 void VideoReceiver::GetRawVideoFrame( | |
72 const VideoFrameDecodedCallback& callback) { | |
73 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | |
74 DCHECK(!callback.is_null()); | |
75 DCHECK(video_decoder_.get()); | |
76 GetEncodedVideoFrame(base::Bind( | |
77 &VideoReceiver::DecodeEncodedVideoFrame, | |
78 // Note: Use of Unretained is safe since this Closure is guaranteed to be | |
79 // invoked before destruction of |this|. | |
80 base::Unretained(this), | |
81 callback)); | |
82 } | |
83 | |
84 void VideoReceiver::DecodeEncodedVideoFrame( | |
85 const VideoFrameDecodedCallback& callback, | |
86 scoped_ptr<transport::EncodedFrame> encoded_frame) { | |
87 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | |
88 if (!encoded_frame) { | |
89 callback.Run( | |
90 make_scoped_refptr<VideoFrame>(NULL), base::TimeTicks(), false); | |
91 return; | |
92 } | |
93 const uint32 frame_id = encoded_frame->frame_id; | |
94 const uint32 rtp_timestamp = encoded_frame->rtp_timestamp; | |
95 const base::TimeTicks playout_time = encoded_frame->reference_time; | |
96 video_decoder_->DecodeFrame(encoded_frame.Pass(), | |
97 base::Bind(&VideoReceiver::EmitRawVideoFrame, | |
98 cast_environment_, | |
99 callback, | |
100 frame_id, | |
101 rtp_timestamp, | |
102 playout_time)); | |
103 } | |
104 | |
105 // static | |
106 void VideoReceiver::EmitRawVideoFrame( | |
107 const scoped_refptr<CastEnvironment>& cast_environment, | |
108 const VideoFrameDecodedCallback& callback, | |
109 uint32 frame_id, | |
110 uint32 rtp_timestamp, | |
111 const base::TimeTicks& playout_time, | |
112 const scoped_refptr<VideoFrame>& video_frame, | |
113 bool is_continuous) { | |
114 DCHECK(cast_environment->CurrentlyOn(CastEnvironment::MAIN)); | |
115 if (video_frame) { | |
116 const base::TimeTicks now = cast_environment->Clock()->NowTicks(); | |
117 cast_environment->Logging()->InsertFrameEvent( | |
118 now, FRAME_DECODED, VIDEO_EVENT, rtp_timestamp, frame_id); | |
119 cast_environment->Logging()->InsertFrameEventWithDelay( | |
120 now, FRAME_PLAYOUT, VIDEO_EVENT, rtp_timestamp, frame_id, | |
121 playout_time - now); | |
122 // Used by chrome/browser/extension/api/cast_streaming/performance_test.cc | |
123 TRACE_EVENT_INSTANT1( | |
124 "cast_perf_test", "FrameDecoded", | |
125 TRACE_EVENT_SCOPE_THREAD, | |
126 "rtp_timestamp", rtp_timestamp); | |
127 } | |
128 callback.Run(video_frame, playout_time, is_continuous); | |
129 } | |
130 | |
131 void VideoReceiver::GetEncodedVideoFrame(const FrameEncodedCallback& callback) { | |
132 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | |
133 frame_request_queue_.push_back(callback); | |
134 EmitAvailableEncodedFrames(); | |
135 } | |
136 | |
137 void VideoReceiver::EmitAvailableEncodedFrames() { | |
138 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | |
139 | |
140 while (!frame_request_queue_.empty()) { | |
141 // Attempt to peek at the next completed frame from the |framer_|. | |
142 // TODO(miu): We should only be peeking at the metadata, and not copying the | |
143 // payload yet! Or, at least, peek using a StringPiece instead of a copy. | |
144 scoped_ptr<transport::EncodedFrame> encoded_frame( | |
145 new transport::EncodedFrame()); | |
146 bool is_consecutively_next_frame = false; | |
147 bool have_multiple_complete_frames = false; | |
148 | |
149 if (!framer_.GetEncodedFrame(encoded_frame.get(), | |
150 &is_consecutively_next_frame, | |
151 &have_multiple_complete_frames)) { | |
152 VLOG(1) << "Wait for more video packets to produce a completed frame."; | |
153 return; // OnReceivedPayloadData() will invoke this method in the future. | |
154 } | |
155 | |
156 const base::TimeTicks now = cast_environment_->Clock()->NowTicks(); | |
157 const base::TimeTicks playout_time = | |
158 GetPlayoutTime(encoded_frame->rtp_timestamp); | |
159 | |
160 // If we have multiple decodable frames, and the current frame is | |
161 // too old, then skip it and decode the next frame instead. | |
162 if (have_multiple_complete_frames && now > playout_time) { | |
163 framer_.ReleaseFrame(encoded_frame->frame_id); | |
164 continue; | |
165 } | |
166 | |
167 // If |framer_| has a frame ready that is out of sequence, examine the | |
168 // playout time to determine whether it's acceptable to continue, thereby | |
169 // skipping one or more frames. Skip if the missing frame wouldn't complete | |
170 // playing before the start of playback of the available frame. | |
171 if (!is_consecutively_next_frame) { | |
172 // TODO(miu): Also account for expected decode time here? | |
173 const base::TimeTicks earliest_possible_end_time_of_missing_frame = | |
174 now + expected_frame_duration_; | |
175 if (earliest_possible_end_time_of_missing_frame < playout_time) { | |
176 VLOG(1) << "Wait for next consecutive frame instead of skipping."; | |
177 if (!is_waiting_for_consecutive_frame_) { | |
178 is_waiting_for_consecutive_frame_ = true; | |
179 cast_environment_->PostDelayedTask( | |
180 CastEnvironment::MAIN, | |
181 FROM_HERE, | |
182 base::Bind(&VideoReceiver::EmitAvailableEncodedFramesAfterWaiting, | |
183 weak_factory_.GetWeakPtr()), | |
184 playout_time - now); | |
185 } | |
186 return; | |
187 } | |
188 } | |
189 | |
190 // Decrypt the payload data in the frame, if crypto is being used. | |
191 if (decryptor_.initialized()) { | |
192 std::string decrypted_video_data; | |
193 if (!decryptor_.Decrypt(encoded_frame->frame_id, | |
194 encoded_frame->data, | |
195 &decrypted_video_data)) { | |
196 // Decryption failed. Give up on this frame, releasing it from the | |
197 // jitter buffer. | |
198 framer_.ReleaseFrame(encoded_frame->frame_id); | |
199 continue; | |
200 } | |
201 encoded_frame->data.swap(decrypted_video_data); | |
202 } | |
203 | |
204 // At this point, we have a decrypted EncodedFrame ready to be emitted. | |
205 encoded_frame->reference_time = playout_time; | |
206 framer_.ReleaseFrame(encoded_frame->frame_id); | |
207 // Used by chrome/browser/extension/api/cast_streaming/performance_test.cc | |
208 TRACE_EVENT_INSTANT2( | |
209 "cast_perf_test", "PullEncodedVideoFrame", | |
210 TRACE_EVENT_SCOPE_THREAD, | |
211 "rtp_timestamp", encoded_frame->rtp_timestamp, | |
212 // TODO(miu): Need to find an alternative to using ToInternalValue(): | |
213 "render_time", playout_time.ToInternalValue()); | |
214 cast_environment_->PostTask(CastEnvironment::MAIN, | |
215 FROM_HERE, | |
216 base::Bind(frame_request_queue_.front(), | |
217 base::Passed(&encoded_frame))); | |
218 frame_request_queue_.pop_front(); | |
219 } | |
220 } | |
221 | |
222 void VideoReceiver::EmitAvailableEncodedFramesAfterWaiting() { | |
223 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | |
224 DCHECK(is_waiting_for_consecutive_frame_); | |
225 is_waiting_for_consecutive_frame_ = false; | |
226 EmitAvailableEncodedFrames(); | |
227 } | |
228 | |
229 base::TimeTicks VideoReceiver::GetPlayoutTime(uint32 rtp_timestamp) const { | |
230 return lip_sync_reference_time_ + | |
231 lip_sync_drift_.Current() + | |
232 RtpDeltaToTimeDelta( | |
233 static_cast<int32>(rtp_timestamp - lip_sync_rtp_timestamp_), | |
234 kVideoFrequency) + | |
235 target_playout_delay_; | |
236 } | |
237 | |
238 void VideoReceiver::IncomingPacket(scoped_ptr<Packet> packet) { | |
239 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | |
240 if (Rtcp::IsRtcpPacket(&packet->front(), packet->size())) { | |
241 rtcp_.IncomingRtcpPacket(&packet->front(), packet->size()); | |
242 } else { | |
243 ReceivedPacket(&packet->front(), packet->size()); | |
244 } | |
245 if (!reports_are_scheduled_) { | |
246 ScheduleNextRtcpReport(); | |
247 ScheduleNextCastMessage(); | |
248 reports_are_scheduled_ = true; | |
249 } | |
250 } | |
251 | |
252 void VideoReceiver::OnReceivedPayloadData(const uint8* payload_data, | |
253 size_t payload_size, | |
254 const RtpCastHeader& rtp_header) { | |
255 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | |
256 | |
257 const base::TimeTicks now = cast_environment_->Clock()->NowTicks(); | |
258 | |
259 frame_id_to_rtp_timestamp_[rtp_header.frame_id & 0xff] = | |
260 rtp_header.rtp_timestamp; | |
261 cast_environment_->Logging()->InsertPacketEvent( | |
262 now, | |
263 PACKET_RECEIVED, | |
264 VIDEO_EVENT, | |
265 rtp_header.rtp_timestamp, | |
266 rtp_header.frame_id, | |
267 rtp_header.packet_id, | |
268 rtp_header.max_packet_id, | |
269 payload_size); | |
270 | |
271 bool duplicate = false; | |
272 const bool complete = | |
273 framer_.InsertPacket(payload_data, payload_size, rtp_header, &duplicate); | |
274 | |
275 // Duplicate packets are ignored. | |
276 if (duplicate) | |
277 return; | |
278 | |
279 // Update lip-sync values upon receiving the first packet of each frame, or if | |
280 // they have never been set yet. | |
281 if (rtp_header.packet_id == 0 || lip_sync_reference_time_.is_null()) { | |
282 RtpTimestamp fresh_sync_rtp; | |
283 base::TimeTicks fresh_sync_reference; | |
284 if (!rtcp_.GetLatestLipSyncTimes(&fresh_sync_rtp, &fresh_sync_reference)) { | |
285 // HACK: The sender should have provided Sender Reports before the first | |
286 // frame was sent. However, the spec does not currently require this. | |
287 // Therefore, when the data is missing, the local clock is used to | |
288 // generate reference timestamps. | |
289 VLOG(2) << "Lip sync info missing. Falling-back to local clock."; | |
290 fresh_sync_rtp = rtp_header.rtp_timestamp; | |
291 fresh_sync_reference = now; | |
292 } | |
293 // |lip_sync_reference_time_| is always incremented according to the time | |
294 // delta computed from the difference in RTP timestamps. Then, | |
295 // |lip_sync_drift_| accounts for clock drift and also smoothes-out any | |
296 // sudden/discontinuous shifts in the series of reference time values. | |
297 if (lip_sync_reference_time_.is_null()) { | |
298 lip_sync_reference_time_ = fresh_sync_reference; | |
299 } else { | |
300 lip_sync_reference_time_ += RtpDeltaToTimeDelta( | |
301 static_cast<int32>(fresh_sync_rtp - lip_sync_rtp_timestamp_), | |
302 kVideoFrequency); | |
303 } | |
304 lip_sync_rtp_timestamp_ = fresh_sync_rtp; | |
305 lip_sync_drift_.Update( | |
306 now, fresh_sync_reference - lip_sync_reference_time_); | |
307 } | |
308 | |
309 // Video frame not complete; wait for more packets. | |
310 if (!complete) | |
311 return; | |
312 | |
313 EmitAvailableEncodedFrames(); | |
314 } | |
315 | |
316 // Send a cast feedback message. Actual message created in the framer (cast | |
317 // message builder). | |
318 void VideoReceiver::CastFeedback(const RtcpCastMessage& cast_message) { | |
319 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | |
320 | |
321 base::TimeTicks now = cast_environment_->Clock()->NowTicks(); | |
322 RtpTimestamp rtp_timestamp = | |
323 frame_id_to_rtp_timestamp_[cast_message.ack_frame_id_ & 0xff]; | |
324 cast_environment_->Logging()->InsertFrameEvent( | |
325 now, FRAME_ACK_SENT, VIDEO_EVENT, | |
326 rtp_timestamp, cast_message.ack_frame_id_); | |
327 | |
328 ReceiverRtcpEventSubscriber::RtcpEventMultiMap rtcp_events; | |
329 event_subscriber_.GetRtcpEventsAndReset(&rtcp_events); | |
330 rtcp_.SendRtcpFromRtpReceiver(&cast_message, &rtcp_events); | |
331 } | |
332 | |
333 // Cast messages should be sent within a maximum interval. Schedule a call | |
334 // if not triggered elsewhere, e.g. by the cast message_builder. | |
335 void VideoReceiver::ScheduleNextCastMessage() { | |
336 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | |
337 base::TimeTicks send_time; | |
338 framer_.TimeToSendNextCastMessage(&send_time); | |
339 base::TimeDelta time_to_send = | |
340 send_time - cast_environment_->Clock()->NowTicks(); | |
341 time_to_send = std::max( | |
342 time_to_send, base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs)); | |
343 cast_environment_->PostDelayedTask( | |
344 CastEnvironment::MAIN, | |
345 FROM_HERE, | |
346 base::Bind(&VideoReceiver::SendNextCastMessage, | |
347 weak_factory_.GetWeakPtr()), | |
348 time_to_send); | |
349 } | |
350 | |
351 void VideoReceiver::SendNextCastMessage() { | |
352 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | |
353 framer_.SendCastMessage(); // Will only send a message if it is time. | |
354 ScheduleNextCastMessage(); | |
355 } | |
356 | |
357 void VideoReceiver::ScheduleNextRtcpReport() { | |
358 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | |
359 base::TimeDelta time_to_next = rtcp_.TimeToSendNextRtcpReport() - | |
360 cast_environment_->Clock()->NowTicks(); | |
361 | |
362 time_to_next = std::max( | |
363 time_to_next, base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs)); | |
364 | |
365 cast_environment_->PostDelayedTask( | |
366 CastEnvironment::MAIN, | |
367 FROM_HERE, | |
368 base::Bind(&VideoReceiver::SendNextRtcpReport, | |
369 weak_factory_.GetWeakPtr()), | |
370 time_to_next); | |
371 } | |
372 | |
373 void VideoReceiver::SendNextRtcpReport() { | |
374 DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); | |
375 rtcp_.SendRtcpFromRtpReceiver(NULL, NULL); | |
376 ScheduleNextRtcpReport(); | |
377 } | |
378 | |
379 } // namespace cast | |
380 } // namespace media | |
OLD | NEW |